etrap_32.S 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321
  1. /*
  2. * etrap.S: Sparc trap window preparation for entry into the
  3. * Linux kernel.
  4. *
  5. * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
  6. */
  7. #include <asm/head.h>
  8. #include <asm/asi.h>
  9. #include <asm/contregs.h>
  10. #include <asm/page.h>
  11. #include <asm/psr.h>
  12. #include <asm/ptrace.h>
  13. #include <asm/winmacro.h>
  14. #include <asm/asmmacro.h>
  15. #include <asm/thread_info.h>
  16. /* Registers to not touch at all. */
  17. #define t_psr l0 /* Set by caller */
  18. #define t_pc l1 /* Set by caller */
  19. #define t_npc l2 /* Set by caller */
  20. #define t_wim l3 /* Set by caller */
  21. #define t_twinmask l4 /* Set at beginning of this entry routine. */
  22. #define t_kstack l5 /* Set right before pt_regs frame is built */
  23. #define t_retpc l6 /* If you change this, change winmacro.h header file */
  24. #define t_systable l7 /* Never touch this, could be the syscall table ptr. */
  25. #define curptr g6 /* Set after pt_regs frame is built */
  26. .text
  27. .align 4
  28. /* SEVEN WINDOW PATCH INSTRUCTIONS */
  29. .globl tsetup_7win_patch1, tsetup_7win_patch2
  30. .globl tsetup_7win_patch3, tsetup_7win_patch4
  31. .globl tsetup_7win_patch5, tsetup_7win_patch6
  32. tsetup_7win_patch1: sll %t_wim, 0x6, %t_wim
  33. tsetup_7win_patch2: and %g2, 0x7f, %g2
  34. tsetup_7win_patch3: and %g2, 0x7f, %g2
  35. tsetup_7win_patch4: and %g1, 0x7f, %g1
  36. tsetup_7win_patch5: sll %t_wim, 0x6, %t_wim
  37. tsetup_7win_patch6: and %g2, 0x7f, %g2
  38. /* END OF PATCH INSTRUCTIONS */
  39. /* At trap time, interrupts and all generic traps do the
  40. * following:
  41. *
  42. * rd %psr, %l0
  43. * b some_handler
  44. * rd %wim, %l3
  45. * nop
  46. *
  47. * Then 'some_handler' if it needs a trap frame (ie. it has
  48. * to call c-code and the trap cannot be handled in-window)
  49. * then it does the SAVE_ALL macro in entry.S which does
  50. *
  51. * sethi %hi(trap_setup), %l4
  52. * jmpl %l4 + %lo(trap_setup), %l6
  53. * nop
  54. */
  55. /* 2 3 4 window number
  56. * -----
  57. * O T S mnemonic
  58. *
  59. * O == Current window before trap
  60. * T == Window entered when trap occurred
  61. * S == Window we will need to save if (1<<T) == %wim
  62. *
  63. * Before execution gets here, it must be guaranteed that
  64. * %l0 contains trap time %psr, %l1 and %l2 contain the
  65. * trap pc and npc, and %l3 contains the trap time %wim.
  66. */
  67. .globl trap_setup, tsetup_patch1, tsetup_patch2
  68. .globl tsetup_patch3, tsetup_patch4
  69. .globl tsetup_patch5, tsetup_patch6
  70. trap_setup:
  71. /* Calculate mask of trap window. See if from user
  72. * or kernel and branch conditionally.
  73. */
  74. mov 1, %t_twinmask
  75. andcc %t_psr, PSR_PS, %g0 ! fromsupv_p = (psr & PSR_PS)
  76. be trap_setup_from_user ! nope, from user mode
  77. sll %t_twinmask, %t_psr, %t_twinmask ! t_twinmask = (1 << psr)
  78. /* From kernel, allocate more kernel stack and
  79. * build a pt_regs trap frame.
  80. */
  81. sub %fp, (STACKFRAME_SZ + TRACEREG_SZ), %t_kstack
  82. STORE_PT_ALL(t_kstack, t_psr, t_pc, t_npc, g2)
  83. /* See if we are in the trap window. */
  84. andcc %t_twinmask, %t_wim, %g0
  85. bne trap_setup_kernel_spill ! in trap window, clean up
  86. nop
  87. /* Trap from kernel with a window available.
  88. * Just do it...
  89. */
  90. jmpl %t_retpc + 0x8, %g0 ! return to caller
  91. mov %t_kstack, %sp ! jump onto new stack
  92. trap_setup_kernel_spill:
  93. ld [%curptr + TI_UWINMASK], %g1
  94. orcc %g0, %g1, %g0
  95. bne trap_setup_user_spill ! there are some user windows, yuck
  96. /* Spill from kernel, but only kernel windows, adjust
  97. * %wim and go.
  98. */
  99. srl %t_wim, 0x1, %g2 ! begin computation of new %wim
  100. tsetup_patch1:
  101. sll %t_wim, 0x7, %t_wim ! patched on 7 window Sparcs
  102. or %t_wim, %g2, %g2
  103. tsetup_patch2:
  104. and %g2, 0xff, %g2 ! patched on 7 window Sparcs
  105. save %g0, %g0, %g0
  106. /* Set new %wim value */
  107. wr %g2, 0x0, %wim
  108. /* Save the kernel window onto the corresponding stack. */
  109. STORE_WINDOW(sp)
  110. restore %g0, %g0, %g0
  111. jmpl %t_retpc + 0x8, %g0 ! return to caller
  112. mov %t_kstack, %sp ! and onto new kernel stack
  113. #define STACK_OFFSET (THREAD_SIZE - TRACEREG_SZ - STACKFRAME_SZ)
  114. trap_setup_from_user:
  115. /* We can't use %curptr yet. */
  116. LOAD_CURRENT(t_kstack, t_twinmask)
  117. sethi %hi(STACK_OFFSET), %t_twinmask
  118. or %t_twinmask, %lo(STACK_OFFSET), %t_twinmask
  119. add %t_kstack, %t_twinmask, %t_kstack
  120. mov 1, %t_twinmask
  121. sll %t_twinmask, %t_psr, %t_twinmask ! t_twinmask = (1 << psr)
  122. /* Build pt_regs frame. */
  123. STORE_PT_ALL(t_kstack, t_psr, t_pc, t_npc, g2)
  124. #if 0
  125. /* If we're sure every task_struct is THREAD_SIZE aligned,
  126. we can speed this up. */
  127. sethi %hi(STACK_OFFSET), %curptr
  128. or %curptr, %lo(STACK_OFFSET), %curptr
  129. sub %t_kstack, %curptr, %curptr
  130. #else
  131. sethi %hi(~(THREAD_SIZE - 1)), %curptr
  132. and %t_kstack, %curptr, %curptr
  133. #endif
  134. /* Clear current_thread_info->w_saved */
  135. st %g0, [%curptr + TI_W_SAVED]
  136. /* See if we are in the trap window. */
  137. andcc %t_twinmask, %t_wim, %g0
  138. bne trap_setup_user_spill ! yep we are
  139. orn %g0, %t_twinmask, %g1 ! negate trap win mask into %g1
  140. /* Trap from user, but not into the invalid window.
  141. * Calculate new umask. The way this works is,
  142. * any window from the %wim at trap time until
  143. * the window right before the one we are in now,
  144. * is a user window. A diagram:
  145. *
  146. * 7 6 5 4 3 2 1 0 window number
  147. * ---------------
  148. * I L T mnemonic
  149. *
  150. * Window 'I' is the invalid window in our example,
  151. * window 'L' is the window the user was in when
  152. * the trap occurred, window T is the trap window
  153. * we are in now. So therefore, windows 5, 4 and
  154. * 3 are user windows. The following sequence
  155. * computes the user winmask to represent this.
  156. */
  157. subcc %t_wim, %t_twinmask, %g2
  158. bneg,a 1f
  159. sub %g2, 0x1, %g2
  160. 1:
  161. andn %g2, %t_twinmask, %g2
  162. tsetup_patch3:
  163. and %g2, 0xff, %g2 ! patched on 7win Sparcs
  164. st %g2, [%curptr + TI_UWINMASK] ! store new umask
  165. jmpl %t_retpc + 0x8, %g0 ! return to caller
  166. mov %t_kstack, %sp ! and onto kernel stack
  167. trap_setup_user_spill:
  168. /* A spill occurred from either kernel or user mode
  169. * and there exist some user windows to deal with.
  170. * A mask of the currently valid user windows
  171. * is in %g1 upon entry to here.
  172. */
  173. tsetup_patch4:
  174. and %g1, 0xff, %g1 ! patched on 7win Sparcs, mask
  175. srl %t_wim, 0x1, %g2 ! compute new %wim
  176. tsetup_patch5:
  177. sll %t_wim, 0x7, %t_wim ! patched on 7win Sparcs
  178. or %t_wim, %g2, %g2 ! %g2 is new %wim
  179. tsetup_patch6:
  180. and %g2, 0xff, %g2 ! patched on 7win Sparcs
  181. andn %g1, %g2, %g1 ! clear this bit in %g1
  182. st %g1, [%curptr + TI_UWINMASK]
  183. save %g0, %g0, %g0
  184. wr %g2, 0x0, %wim
  185. /* Call MMU-architecture dependent stack checking
  186. * routine.
  187. */
  188. .globl tsetup_mmu_patchme
  189. tsetup_mmu_patchme:
  190. b tsetup_sun4c_stackchk
  191. andcc %sp, 0x7, %g0
  192. /* Architecture specific stack checking routines. When either
  193. * of these routines are called, the globals are free to use
  194. * as they have been safely stashed on the new kernel stack
  195. * pointer. Thus the definition below for simplicity.
  196. */
  197. #define glob_tmp g1
  198. tsetup_sun4c_stackchk:
  199. /* Done by caller: andcc %sp, 0x7, %g0 */
  200. bne trap_setup_user_stack_is_bolixed
  201. sra %sp, 29, %glob_tmp
  202. add %glob_tmp, 0x1, %glob_tmp
  203. andncc %glob_tmp, 0x1, %g0
  204. bne trap_setup_user_stack_is_bolixed
  205. and %sp, 0xfff, %glob_tmp ! delay slot
  206. /* See if our dump area will be on more than one
  207. * page.
  208. */
  209. add %glob_tmp, 0x38, %glob_tmp
  210. andncc %glob_tmp, 0xff8, %g0
  211. be tsetup_sun4c_onepage ! only one page to check
  212. lda [%sp] ASI_PTE, %glob_tmp ! have to check first page anyways
  213. tsetup_sun4c_twopages:
  214. /* Is first page ok permission wise? */
  215. srl %glob_tmp, 29, %glob_tmp
  216. cmp %glob_tmp, 0x6
  217. bne trap_setup_user_stack_is_bolixed
  218. add %sp, 0x38, %glob_tmp /* Is second page in vma hole? */
  219. sra %glob_tmp, 29, %glob_tmp
  220. add %glob_tmp, 0x1, %glob_tmp
  221. andncc %glob_tmp, 0x1, %g0
  222. bne trap_setup_user_stack_is_bolixed
  223. add %sp, 0x38, %glob_tmp
  224. lda [%glob_tmp] ASI_PTE, %glob_tmp
  225. tsetup_sun4c_onepage:
  226. srl %glob_tmp, 29, %glob_tmp
  227. cmp %glob_tmp, 0x6 ! can user write to it?
  228. bne trap_setup_user_stack_is_bolixed ! failure
  229. nop
  230. STORE_WINDOW(sp)
  231. restore %g0, %g0, %g0
  232. jmpl %t_retpc + 0x8, %g0
  233. mov %t_kstack, %sp
  234. .globl tsetup_srmmu_stackchk
  235. tsetup_srmmu_stackchk:
  236. /* Check results of callers andcc %sp, 0x7, %g0 */
  237. bne trap_setup_user_stack_is_bolixed
  238. sethi %hi(PAGE_OFFSET), %glob_tmp
  239. cmp %glob_tmp, %sp
  240. bleu,a 1f
  241. lda [%g0] ASI_M_MMUREGS, %glob_tmp ! read MMU control
  242. trap_setup_user_stack_is_bolixed:
  243. /* From user/kernel into invalid window w/bad user
  244. * stack. Save bad user stack, and return to caller.
  245. */
  246. SAVE_BOLIXED_USER_STACK(curptr, g3)
  247. restore %g0, %g0, %g0
  248. jmpl %t_retpc + 0x8, %g0
  249. mov %t_kstack, %sp
  250. 1:
  251. /* Clear the fault status and turn on the no_fault bit. */
  252. or %glob_tmp, 0x2, %glob_tmp ! or in no_fault bit
  253. sta %glob_tmp, [%g0] ASI_M_MMUREGS ! set it
  254. /* Dump the registers and cross fingers. */
  255. STORE_WINDOW(sp)
  256. /* Clear the no_fault bit and check the status. */
  257. andn %glob_tmp, 0x2, %glob_tmp
  258. sta %glob_tmp, [%g0] ASI_M_MMUREGS
  259. mov AC_M_SFAR, %glob_tmp
  260. lda [%glob_tmp] ASI_M_MMUREGS, %g0
  261. mov AC_M_SFSR, %glob_tmp
  262. lda [%glob_tmp] ASI_M_MMUREGS, %glob_tmp ! save away status of winstore
  263. andcc %glob_tmp, 0x2, %g0 ! did we fault?
  264. bne trap_setup_user_stack_is_bolixed ! failure
  265. nop
  266. restore %g0, %g0, %g0
  267. jmpl %t_retpc + 0x8, %g0
  268. mov %t_kstack, %sp