irqflags.h 2.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156
  1. #ifndef __ASM_ARM_IRQFLAGS_H
  2. #define __ASM_ARM_IRQFLAGS_H
  3. #ifdef __KERNEL__
  4. #include <asm/ptrace.h>
  5. /*
  6. * CPU interrupt mask handling.
  7. */
  8. #if __LINUX_ARM_ARCH__ >= 6
  9. static inline unsigned long arch_local_irq_save(void)
  10. {
  11. unsigned long flags;
  12. asm volatile(
  13. " mrs %0, cpsr @ arch_local_irq_save\n"
  14. " cpsid i"
  15. : "=r" (flags) : : "memory", "cc");
  16. return flags;
  17. }
  18. static inline void arch_local_irq_enable(void)
  19. {
  20. asm volatile(
  21. " cpsie i @ arch_local_irq_enable"
  22. :
  23. :
  24. : "memory", "cc");
  25. }
  26. static inline void arch_local_irq_disable(void)
  27. {
  28. asm volatile(
  29. " cpsid i @ arch_local_irq_disable"
  30. :
  31. :
  32. : "memory", "cc");
  33. }
  34. #define local_fiq_enable() __asm__("cpsie f @ __stf" : : : "memory", "cc")
  35. #define local_fiq_disable() __asm__("cpsid f @ __clf" : : : "memory", "cc")
  36. #else
  37. /*
  38. * Save the current interrupt enable state & disable IRQs
  39. */
  40. static inline unsigned long arch_local_irq_save(void)
  41. {
  42. unsigned long flags, temp;
  43. asm volatile(
  44. " mrs %0, cpsr @ arch_local_irq_save\n"
  45. " orr %1, %0, #128\n"
  46. " msr cpsr_c, %1"
  47. : "=r" (flags), "=r" (temp)
  48. :
  49. : "memory", "cc");
  50. return flags;
  51. }
  52. /*
  53. * Enable IRQs
  54. */
  55. static inline void arch_local_irq_enable(void)
  56. {
  57. unsigned long temp;
  58. asm volatile(
  59. " mrs %0, cpsr @ arch_local_irq_enable\n"
  60. " bic %0, %0, #128\n"
  61. " msr cpsr_c, %0"
  62. : "=r" (temp)
  63. :
  64. : "memory", "cc");
  65. }
  66. /*
  67. * Disable IRQs
  68. */
  69. static inline void arch_local_irq_disable(void)
  70. {
  71. unsigned long temp;
  72. asm volatile(
  73. " mrs %0, cpsr @ arch_local_irq_disable\n"
  74. " orr %0, %0, #128\n"
  75. " msr cpsr_c, %0"
  76. : "=r" (temp)
  77. :
  78. : "memory", "cc");
  79. }
  80. /*
  81. * Enable FIQs
  82. */
  83. #define local_fiq_enable() \
  84. ({ \
  85. unsigned long temp; \
  86. __asm__ __volatile__( \
  87. "mrs %0, cpsr @ stf\n" \
  88. " bic %0, %0, #64\n" \
  89. " msr cpsr_c, %0" \
  90. : "=r" (temp) \
  91. : \
  92. : "memory", "cc"); \
  93. })
  94. /*
  95. * Disable FIQs
  96. */
  97. #define local_fiq_disable() \
  98. ({ \
  99. unsigned long temp; \
  100. __asm__ __volatile__( \
  101. "mrs %0, cpsr @ clf\n" \
  102. " orr %0, %0, #64\n" \
  103. " msr cpsr_c, %0" \
  104. : "=r" (temp) \
  105. : \
  106. : "memory", "cc"); \
  107. })
  108. #endif
  109. /*
  110. * Save the current interrupt enable state.
  111. */
  112. static inline unsigned long arch_local_save_flags(void)
  113. {
  114. unsigned long flags;
  115. asm volatile(
  116. " mrs %0, cpsr @ local_save_flags"
  117. : "=r" (flags) : : "memory", "cc");
  118. return flags;
  119. }
  120. /*
  121. * restore saved IRQ & FIQ state
  122. */
  123. static inline void arch_local_irq_restore(unsigned long flags)
  124. {
  125. asm volatile(
  126. " msr cpsr_c, %0 @ local_irq_restore"
  127. :
  128. : "r" (flags)
  129. : "memory", "cc");
  130. }
  131. static inline int arch_irqs_disabled_flags(unsigned long flags)
  132. {
  133. return flags & PSR_I_BIT;
  134. }
  135. #endif
  136. #endif