system_32.h 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237
  1. #ifndef __ASM_SH_SYSTEM_32_H
  2. #define __ASM_SH_SYSTEM_32_H
  3. #include <linux/types.h>
  4. #include <asm/mmu.h>
  5. #ifdef CONFIG_SH_DSP
  6. #define is_dsp_enabled(tsk) \
  7. (!!(tsk->thread.dsp_status.status & SR_DSP))
  8. #define __restore_dsp(tsk) \
  9. do { \
  10. register u32 *__ts2 __asm__ ("r2") = \
  11. (u32 *)&tsk->thread.dsp_status; \
  12. __asm__ __volatile__ ( \
  13. ".balign 4\n\t" \
  14. "movs.l @r2+, a0\n\t" \
  15. "movs.l @r2+, a1\n\t" \
  16. "movs.l @r2+, a0g\n\t" \
  17. "movs.l @r2+, a1g\n\t" \
  18. "movs.l @r2+, m0\n\t" \
  19. "movs.l @r2+, m1\n\t" \
  20. "movs.l @r2+, x0\n\t" \
  21. "movs.l @r2+, x1\n\t" \
  22. "movs.l @r2+, y0\n\t" \
  23. "movs.l @r2+, y1\n\t" \
  24. "lds.l @r2+, dsr\n\t" \
  25. "ldc.l @r2+, rs\n\t" \
  26. "ldc.l @r2+, re\n\t" \
  27. "ldc.l @r2+, mod\n\t" \
  28. : : "r" (__ts2)); \
  29. } while (0)
  30. #define __save_dsp(tsk) \
  31. do { \
  32. register u32 *__ts2 __asm__ ("r2") = \
  33. (u32 *)&tsk->thread.dsp_status + 14; \
  34. \
  35. __asm__ __volatile__ ( \
  36. ".balign 4\n\t" \
  37. "stc.l mod, @-r2\n\t" \
  38. "stc.l re, @-r2\n\t" \
  39. "stc.l rs, @-r2\n\t" \
  40. "sts.l dsr, @-r2\n\t" \
  41. "movs.l y1, @-r2\n\t" \
  42. "movs.l y0, @-r2\n\t" \
  43. "movs.l x1, @-r2\n\t" \
  44. "movs.l x0, @-r2\n\t" \
  45. "movs.l m1, @-r2\n\t" \
  46. "movs.l m0, @-r2\n\t" \
  47. "movs.l a1g, @-r2\n\t" \
  48. "movs.l a0g, @-r2\n\t" \
  49. "movs.l a1, @-r2\n\t" \
  50. "movs.l a0, @-r2\n\t" \
  51. : : "r" (__ts2)); \
  52. } while (0)
  53. #else
  54. #define is_dsp_enabled(tsk) (0)
  55. #define __save_dsp(tsk) do { } while (0)
  56. #define __restore_dsp(tsk) do { } while (0)
  57. #endif
  58. #if defined(CONFIG_CPU_SH4A)
  59. #define __icbi(addr) __asm__ __volatile__ ( "icbi @%0\n\t" : : "r" (addr))
  60. #else
  61. #define __icbi(addr) mb()
  62. #endif
  63. #define __ocbp(addr) __asm__ __volatile__ ( "ocbp @%0\n\t" : : "r" (addr))
  64. #define __ocbi(addr) __asm__ __volatile__ ( "ocbi @%0\n\t" : : "r" (addr))
  65. #define __ocbwb(addr) __asm__ __volatile__ ( "ocbwb @%0\n\t" : : "r" (addr))
  66. struct task_struct *__switch_to(struct task_struct *prev,
  67. struct task_struct *next);
  68. /*
  69. * switch_to() should switch tasks to task nr n, first
  70. */
  71. #define switch_to(prev, next, last) \
  72. do { \
  73. register u32 *__ts1 __asm__ ("r1"); \
  74. register u32 *__ts2 __asm__ ("r2"); \
  75. register u32 *__ts4 __asm__ ("r4"); \
  76. register u32 *__ts5 __asm__ ("r5"); \
  77. register u32 *__ts6 __asm__ ("r6"); \
  78. register u32 __ts7 __asm__ ("r7"); \
  79. struct task_struct *__last; \
  80. \
  81. if (is_dsp_enabled(prev)) \
  82. __save_dsp(prev); \
  83. \
  84. __ts1 = (u32 *)&prev->thread.sp; \
  85. __ts2 = (u32 *)&prev->thread.pc; \
  86. __ts4 = (u32 *)prev; \
  87. __ts5 = (u32 *)next; \
  88. __ts6 = (u32 *)&next->thread.sp; \
  89. __ts7 = next->thread.pc; \
  90. \
  91. __asm__ __volatile__ ( \
  92. ".balign 4\n\t" \
  93. "stc.l gbr, @-r15\n\t" \
  94. "sts.l pr, @-r15\n\t" \
  95. "mov.l r8, @-r15\n\t" \
  96. "mov.l r9, @-r15\n\t" \
  97. "mov.l r10, @-r15\n\t" \
  98. "mov.l r11, @-r15\n\t" \
  99. "mov.l r12, @-r15\n\t" \
  100. "mov.l r13, @-r15\n\t" \
  101. "mov.l r14, @-r15\n\t" \
  102. "mov.l r15, @r1\t! save SP\n\t" \
  103. "mov.l @r6, r15\t! change to new stack\n\t" \
  104. "mova 1f, %0\n\t" \
  105. "mov.l %0, @r2\t! save PC\n\t" \
  106. "mov.l 2f, %0\n\t" \
  107. "jmp @%0\t! call __switch_to\n\t" \
  108. " lds r7, pr\t! with return to new PC\n\t" \
  109. ".balign 4\n" \
  110. "2:\n\t" \
  111. ".long __switch_to\n" \
  112. "1:\n\t" \
  113. "mov.l @r15+, r14\n\t" \
  114. "mov.l @r15+, r13\n\t" \
  115. "mov.l @r15+, r12\n\t" \
  116. "mov.l @r15+, r11\n\t" \
  117. "mov.l @r15+, r10\n\t" \
  118. "mov.l @r15+, r9\n\t" \
  119. "mov.l @r15+, r8\n\t" \
  120. "lds.l @r15+, pr\n\t" \
  121. "ldc.l @r15+, gbr\n\t" \
  122. : "=z" (__last) \
  123. : "r" (__ts1), "r" (__ts2), "r" (__ts4), \
  124. "r" (__ts5), "r" (__ts6), "r" (__ts7) \
  125. : "r3", "t"); \
  126. \
  127. last = __last; \
  128. } while (0)
  129. #define finish_arch_switch(prev) \
  130. do { \
  131. if (is_dsp_enabled(prev)) \
  132. __restore_dsp(prev); \
  133. } while (0)
  134. #ifdef CONFIG_CPU_HAS_SR_RB
  135. #define lookup_exception_vector() \
  136. ({ \
  137. unsigned long _vec; \
  138. \
  139. __asm__ __volatile__ ( \
  140. "stc r2_bank, %0\n\t" \
  141. : "=r" (_vec) \
  142. ); \
  143. \
  144. _vec; \
  145. })
  146. #else
  147. #define lookup_exception_vector() \
  148. ({ \
  149. unsigned long _vec; \
  150. __asm__ __volatile__ ( \
  151. "mov r4, %0\n\t" \
  152. : "=r" (_vec) \
  153. ); \
  154. \
  155. _vec; \
  156. })
  157. #endif
  158. static inline reg_size_t register_align(void *val)
  159. {
  160. return (unsigned long)(signed long)val;
  161. }
  162. int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs,
  163. struct mem_access *ma, int, unsigned long address);
  164. static inline void trigger_address_error(void)
  165. {
  166. __asm__ __volatile__ (
  167. "ldc %0, sr\n\t"
  168. "mov.l @%1, %0"
  169. :
  170. : "r" (0x10000000), "r" (0x80000001)
  171. );
  172. }
  173. asmlinkage void do_address_error(struct pt_regs *regs,
  174. unsigned long writeaccess,
  175. unsigned long address);
  176. asmlinkage void do_divide_error(unsigned long r4, unsigned long r5,
  177. unsigned long r6, unsigned long r7,
  178. struct pt_regs __regs);
  179. asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5,
  180. unsigned long r6, unsigned long r7,
  181. struct pt_regs __regs);
  182. asmlinkage void do_illegal_slot_inst(unsigned long r4, unsigned long r5,
  183. unsigned long r6, unsigned long r7,
  184. struct pt_regs __regs);
  185. asmlinkage void do_exception_error(unsigned long r4, unsigned long r5,
  186. unsigned long r6, unsigned long r7,
  187. struct pt_regs __regs);
  188. static inline void set_bl_bit(void)
  189. {
  190. unsigned long __dummy0, __dummy1;
  191. __asm__ __volatile__ (
  192. "stc sr, %0\n\t"
  193. "or %2, %0\n\t"
  194. "and %3, %0\n\t"
  195. "ldc %0, sr\n\t"
  196. : "=&r" (__dummy0), "=r" (__dummy1)
  197. : "r" (0x10000000), "r" (0xffffff0f)
  198. : "memory"
  199. );
  200. }
  201. static inline void clear_bl_bit(void)
  202. {
  203. unsigned long __dummy0, __dummy1;
  204. __asm__ __volatile__ (
  205. "stc sr, %0\n\t"
  206. "and %2, %0\n\t"
  207. "ldc %0, sr\n\t"
  208. : "=&r" (__dummy0), "=r" (__dummy1)
  209. : "1" (~0x10000000)
  210. : "memory"
  211. );
  212. }
  213. #endif /* __ASM_SH_SYSTEM_32_H */