irqflags.h 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle
  7. * Copyright (C) 1996 by Paul M. Antoine
  8. * Copyright (C) 1999 Silicon Graphics
  9. * Copyright (C) 2000 MIPS Technologies, Inc.
  10. */
  11. #ifndef _ASM_IRQFLAGS_H
  12. #define _ASM_IRQFLAGS_H
  13. #ifndef __ASSEMBLY__
  14. #include <linux/compiler.h>
  15. #include <asm/hazards.h>
  16. __asm__(
  17. " .macro arch_local_irq_enable \n"
  18. " .set push \n"
  19. " .set reorder \n"
  20. " .set noat \n"
  21. #ifdef CONFIG_MIPS_MT_SMTC
  22. " mfc0 $1, $2, 1 # SMTC - clear TCStatus.IXMT \n"
  23. " ori $1, 0x400 \n"
  24. " xori $1, 0x400 \n"
  25. " mtc0 $1, $2, 1 \n"
  26. #elif defined(CONFIG_CPU_MIPSR2)
  27. " ei \n"
  28. #else
  29. " mfc0 $1,$12 \n"
  30. " ori $1,0x1f \n"
  31. " xori $1,0x1e \n"
  32. " mtc0 $1,$12 \n"
  33. #endif
  34. " irq_enable_hazard \n"
  35. " .set pop \n"
  36. " .endm");
  37. extern void smtc_ipi_replay(void);
  38. static inline void arch_local_irq_enable(void)
  39. {
  40. #ifdef CONFIG_MIPS_MT_SMTC
  41. /*
  42. * SMTC kernel needs to do a software replay of queued
  43. * IPIs, at the cost of call overhead on each local_irq_enable()
  44. */
  45. smtc_ipi_replay();
  46. #endif
  47. __asm__ __volatile__(
  48. "arch_local_irq_enable"
  49. : /* no outputs */
  50. : /* no inputs */
  51. : "memory");
  52. }
  53. /*
  54. * For cli() we have to insert nops to make sure that the new value
  55. * has actually arrived in the status register before the end of this
  56. * macro.
  57. * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs
  58. * no nops at all.
  59. */
  60. /*
  61. * For TX49, operating only IE bit is not enough.
  62. *
  63. * If mfc0 $12 follows store and the mfc0 is last instruction of a
  64. * page and fetching the next instruction causes TLB miss, the result
  65. * of the mfc0 might wrongly contain EXL bit.
  66. *
  67. * ERT-TX49H2-027, ERT-TX49H3-012, ERT-TX49HL3-006, ERT-TX49H4-008
  68. *
  69. * Workaround: mask EXL bit of the result or place a nop before mfc0.
  70. */
  71. __asm__(
  72. " .macro arch_local_irq_disable\n"
  73. " .set push \n"
  74. " .set noat \n"
  75. #ifdef CONFIG_MIPS_MT_SMTC
  76. " mfc0 $1, $2, 1 \n"
  77. " ori $1, 0x400 \n"
  78. " .set noreorder \n"
  79. " mtc0 $1, $2, 1 \n"
  80. #elif defined(CONFIG_CPU_MIPSR2)
  81. " di \n"
  82. #else
  83. " mfc0 $1,$12 \n"
  84. " ori $1,0x1f \n"
  85. " xori $1,0x1f \n"
  86. " .set noreorder \n"
  87. " mtc0 $1,$12 \n"
  88. #endif
  89. " irq_disable_hazard \n"
  90. " .set pop \n"
  91. " .endm \n");
  92. static inline void arch_local_irq_disable(void)
  93. {
  94. __asm__ __volatile__(
  95. "arch_local_irq_disable"
  96. : /* no outputs */
  97. : /* no inputs */
  98. : "memory");
  99. }
  100. __asm__(
  101. " .macro arch_local_save_flags flags \n"
  102. " .set push \n"
  103. " .set reorder \n"
  104. #ifdef CONFIG_MIPS_MT_SMTC
  105. " mfc0 \\flags, $2, 1 \n"
  106. #else
  107. " mfc0 \\flags, $12 \n"
  108. #endif
  109. " .set pop \n"
  110. " .endm \n");
  111. static inline unsigned long arch_local_save_flags(void)
  112. {
  113. unsigned long flags;
  114. asm volatile("arch_local_save_flags %0" : "=r" (flags));
  115. return flags;
  116. }
  117. __asm__(
  118. " .macro arch_local_irq_save result \n"
  119. " .set push \n"
  120. " .set reorder \n"
  121. " .set noat \n"
  122. #ifdef CONFIG_MIPS_MT_SMTC
  123. " mfc0 \\result, $2, 1 \n"
  124. " ori $1, \\result, 0x400 \n"
  125. " .set noreorder \n"
  126. " mtc0 $1, $2, 1 \n"
  127. " andi \\result, \\result, 0x400 \n"
  128. #elif defined(CONFIG_CPU_MIPSR2)
  129. " di \\result \n"
  130. " andi \\result, 1 \n"
  131. #else
  132. " mfc0 \\result, $12 \n"
  133. " ori $1, \\result, 0x1f \n"
  134. " xori $1, 0x1f \n"
  135. " .set noreorder \n"
  136. " mtc0 $1, $12 \n"
  137. #endif
  138. " irq_disable_hazard \n"
  139. " .set pop \n"
  140. " .endm \n");
  141. static inline unsigned long arch_local_irq_save(void)
  142. {
  143. unsigned long flags;
  144. asm volatile("arch_local_irq_save\t%0"
  145. : "=r" (flags)
  146. : /* no inputs */
  147. : "memory");
  148. return flags;
  149. }
  150. __asm__(
  151. " .macro arch_local_irq_restore flags \n"
  152. " .set push \n"
  153. " .set noreorder \n"
  154. " .set noat \n"
  155. #ifdef CONFIG_MIPS_MT_SMTC
  156. "mfc0 $1, $2, 1 \n"
  157. "andi \\flags, 0x400 \n"
  158. "ori $1, 0x400 \n"
  159. "xori $1, 0x400 \n"
  160. "or \\flags, $1 \n"
  161. "mtc0 \\flags, $2, 1 \n"
  162. #elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
  163. /*
  164. * Slow, but doesn't suffer from a relatively unlikely race
  165. * condition we're having since days 1.
  166. */
  167. " beqz \\flags, 1f \n"
  168. " di \n"
  169. " ei \n"
  170. "1: \n"
  171. #elif defined(CONFIG_CPU_MIPSR2)
  172. /*
  173. * Fast, dangerous. Life is fun, life is good.
  174. */
  175. " mfc0 $1, $12 \n"
  176. " ins $1, \\flags, 0, 1 \n"
  177. " mtc0 $1, $12 \n"
  178. #else
  179. " mfc0 $1, $12 \n"
  180. " andi \\flags, 1 \n"
  181. " ori $1, 0x1f \n"
  182. " xori $1, 0x1f \n"
  183. " or \\flags, $1 \n"
  184. " mtc0 \\flags, $12 \n"
  185. #endif
  186. " irq_disable_hazard \n"
  187. " .set pop \n"
  188. " .endm \n");
  189. static inline void arch_local_irq_restore(unsigned long flags)
  190. {
  191. unsigned long __tmp1;
  192. #ifdef CONFIG_MIPS_MT_SMTC
  193. /*
  194. * SMTC kernel needs to do a software replay of queued
  195. * IPIs, at the cost of branch and call overhead on each
  196. * local_irq_restore()
  197. */
  198. if (unlikely(!(flags & 0x0400)))
  199. smtc_ipi_replay();
  200. #endif
  201. __asm__ __volatile__(
  202. "arch_local_irq_restore\t%0"
  203. : "=r" (__tmp1)
  204. : "0" (flags)
  205. : "memory");
  206. }
  207. static inline void __arch_local_irq_restore(unsigned long flags)
  208. {
  209. unsigned long __tmp1;
  210. __asm__ __volatile__(
  211. "arch_local_irq_restore\t%0"
  212. : "=r" (__tmp1)
  213. : "0" (flags)
  214. : "memory");
  215. }
  216. static inline int arch_irqs_disabled_flags(unsigned long flags)
  217. {
  218. #ifdef CONFIG_MIPS_MT_SMTC
  219. /*
  220. * SMTC model uses TCStatus.IXMT to disable interrupts for a thread/CPU
  221. */
  222. return flags & 0x400;
  223. #else
  224. return !(flags & 1);
  225. #endif
  226. }
  227. #endif
  228. /*
  229. * Do the CPU's IRQ-state tracing from assembly code.
  230. */
  231. #ifdef CONFIG_TRACE_IRQFLAGS
  232. /* Reload some registers clobbered by trace_hardirqs_on */
  233. #ifdef CONFIG_64BIT
  234. # define TRACE_IRQS_RELOAD_REGS \
  235. LONG_L $11, PT_R11(sp); \
  236. LONG_L $10, PT_R10(sp); \
  237. LONG_L $9, PT_R9(sp); \
  238. LONG_L $8, PT_R8(sp); \
  239. LONG_L $7, PT_R7(sp); \
  240. LONG_L $6, PT_R6(sp); \
  241. LONG_L $5, PT_R5(sp); \
  242. LONG_L $4, PT_R4(sp); \
  243. LONG_L $2, PT_R2(sp)
  244. #else
  245. # define TRACE_IRQS_RELOAD_REGS \
  246. LONG_L $7, PT_R7(sp); \
  247. LONG_L $6, PT_R6(sp); \
  248. LONG_L $5, PT_R5(sp); \
  249. LONG_L $4, PT_R4(sp); \
  250. LONG_L $2, PT_R2(sp)
  251. #endif
  252. # define TRACE_IRQS_ON \
  253. CLI; /* make sure trace_hardirqs_on() is called in kernel level */ \
  254. jal trace_hardirqs_on
  255. # define TRACE_IRQS_ON_RELOAD \
  256. TRACE_IRQS_ON; \
  257. TRACE_IRQS_RELOAD_REGS
  258. # define TRACE_IRQS_OFF \
  259. jal trace_hardirqs_off
  260. #else
  261. # define TRACE_IRQS_ON
  262. # define TRACE_IRQS_ON_RELOAD
  263. # define TRACE_IRQS_OFF
  264. #endif
  265. #endif /* _ASM_IRQFLAGS_H */