rtrap_32.S 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265
  1. /*
  2. * rtrap.S: Return from Sparc trap low-level code.
  3. *
  4. * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
  5. */
  6. #include <asm/page.h>
  7. #include <asm/ptrace.h>
  8. #include <asm/psr.h>
  9. #include <asm/asi.h>
  10. #include <asm/smp.h>
  11. #include <asm/contregs.h>
  12. #include <asm/winmacro.h>
  13. #include <asm/asmmacro.h>
  14. #include <asm/thread_info.h>
  15. #define t_psr l0
  16. #define t_pc l1
  17. #define t_npc l2
  18. #define t_wim l3
  19. #define twin_tmp1 l4
  20. #define glob_tmp g4
  21. #define curptr g6
  22. /* 7 WINDOW SPARC PATCH INSTRUCTIONS */
  23. .globl rtrap_7win_patch1, rtrap_7win_patch2, rtrap_7win_patch3
  24. .globl rtrap_7win_patch4, rtrap_7win_patch5
  25. rtrap_7win_patch1: srl %t_wim, 0x6, %glob_tmp
  26. rtrap_7win_patch2: and %glob_tmp, 0x7f, %glob_tmp
  27. rtrap_7win_patch3: srl %g1, 7, %g2
  28. rtrap_7win_patch4: srl %g2, 6, %g2
  29. rtrap_7win_patch5: and %g1, 0x7f, %g1
  30. /* END OF PATCH INSTRUCTIONS */
  31. /* We need to check for a few things which are:
  32. * 1) The need to call schedule() because this
  33. * processes quantum is up.
  34. * 2) Pending signals for this process, if any
  35. * exist we need to call do_signal() to do
  36. * the needy.
  37. *
  38. * Else we just check if the rett would land us
  39. * in an invalid window, if so we need to grab
  40. * it off the user/kernel stack first.
  41. */
  42. .globl ret_trap_entry, rtrap_patch1, rtrap_patch2
  43. .globl rtrap_patch3, rtrap_patch4, rtrap_patch5
  44. .globl ret_trap_lockless_ipi
  45. ret_trap_entry:
  46. ret_trap_lockless_ipi:
  47. andcc %t_psr, PSR_PS, %g0
  48. sethi %hi(PSR_SYSCALL), %g1
  49. be 1f
  50. andn %t_psr, %g1, %t_psr
  51. wr %t_psr, 0x0, %psr
  52. b ret_trap_kernel
  53. nop
  54. 1:
  55. ld [%curptr + TI_FLAGS], %g2
  56. andcc %g2, (_TIF_NEED_RESCHED), %g0
  57. be signal_p
  58. nop
  59. call schedule
  60. nop
  61. ld [%curptr + TI_FLAGS], %g2
  62. signal_p:
  63. andcc %g2, _TIF_DO_NOTIFY_RESUME_MASK, %g0
  64. bz,a ret_trap_continue
  65. ld [%sp + STACKFRAME_SZ + PT_PSR], %t_psr
  66. mov %g2, %o2
  67. mov %l5, %o1
  68. call do_notify_resume
  69. add %sp, STACKFRAME_SZ, %o0 ! pt_regs ptr
  70. b signal_p
  71. ld [%curptr + TI_FLAGS], %g2
  72. ret_trap_continue:
  73. sethi %hi(PSR_SYSCALL), %g1
  74. andn %t_psr, %g1, %t_psr
  75. wr %t_psr, 0x0, %psr
  76. WRITE_PAUSE
  77. ld [%curptr + TI_W_SAVED], %twin_tmp1
  78. orcc %g0, %twin_tmp1, %g0
  79. be ret_trap_nobufwins
  80. nop
  81. wr %t_psr, PSR_ET, %psr
  82. WRITE_PAUSE
  83. mov 1, %o1
  84. call try_to_clear_window_buffer
  85. add %sp, STACKFRAME_SZ, %o0
  86. b signal_p
  87. ld [%curptr + TI_FLAGS], %g2
  88. ret_trap_nobufwins:
  89. /* Load up the user's out registers so we can pull
  90. * a window from the stack, if necessary.
  91. */
  92. LOAD_PT_INS(sp)
  93. /* If there are already live user windows in the
  94. * set we can return from trap safely.
  95. */
  96. ld [%curptr + TI_UWINMASK], %twin_tmp1
  97. orcc %g0, %twin_tmp1, %g0
  98. bne ret_trap_userwins_ok
  99. nop
  100. /* Calculate new %wim, we have to pull a register
  101. * window from the users stack.
  102. */
  103. ret_trap_pull_one_window:
  104. rd %wim, %t_wim
  105. sll %t_wim, 0x1, %twin_tmp1
  106. rtrap_patch1: srl %t_wim, 0x7, %glob_tmp
  107. or %glob_tmp, %twin_tmp1, %glob_tmp
  108. rtrap_patch2: and %glob_tmp, 0xff, %glob_tmp
  109. wr %glob_tmp, 0x0, %wim
  110. /* Here comes the architecture specific
  111. * branch to the user stack checking routine
  112. * for return from traps.
  113. */
  114. b srmmu_rett_stackchk
  115. andcc %fp, 0x7, %g0
  116. ret_trap_userwins_ok:
  117. LOAD_PT_PRIV(sp, t_psr, t_pc, t_npc)
  118. or %t_pc, %t_npc, %g2
  119. andcc %g2, 0x3, %g0
  120. sethi %hi(PSR_SYSCALL), %g2
  121. be 1f
  122. andn %t_psr, %g2, %t_psr
  123. b ret_trap_unaligned_pc
  124. add %sp, STACKFRAME_SZ, %o0
  125. 1:
  126. LOAD_PT_YREG(sp, g1)
  127. LOAD_PT_GLOBALS(sp)
  128. wr %t_psr, 0x0, %psr
  129. WRITE_PAUSE
  130. jmp %t_pc
  131. rett %t_npc
  132. ret_trap_unaligned_pc:
  133. ld [%sp + STACKFRAME_SZ + PT_PC], %o1
  134. ld [%sp + STACKFRAME_SZ + PT_NPC], %o2
  135. ld [%sp + STACKFRAME_SZ + PT_PSR], %o3
  136. wr %t_wim, 0x0, %wim ! or else...
  137. wr %t_psr, PSR_ET, %psr
  138. WRITE_PAUSE
  139. call do_memaccess_unaligned
  140. nop
  141. b signal_p
  142. ld [%curptr + TI_FLAGS], %g2
  143. ret_trap_kernel:
  144. /* Will the rett land us in the invalid window? */
  145. mov 2, %g1
  146. sll %g1, %t_psr, %g1
  147. rtrap_patch3: srl %g1, 8, %g2
  148. or %g1, %g2, %g1
  149. rd %wim, %g2
  150. andcc %g2, %g1, %g0
  151. be 1f ! Nope, just return from the trap
  152. sll %g2, 0x1, %g1
  153. /* We have to grab a window before returning. */
  154. rtrap_patch4: srl %g2, 7, %g2
  155. or %g1, %g2, %g1
  156. rtrap_patch5: and %g1, 0xff, %g1
  157. wr %g1, 0x0, %wim
  158. /* Grrr, make sure we load from the right %sp... */
  159. LOAD_PT_ALL(sp, t_psr, t_pc, t_npc, g1)
  160. restore %g0, %g0, %g0
  161. LOAD_WINDOW(sp)
  162. b 2f
  163. save %g0, %g0, %g0
  164. /* Reload the entire frame in case this is from a
  165. * kernel system call or whatever...
  166. */
  167. 1:
  168. LOAD_PT_ALL(sp, t_psr, t_pc, t_npc, g1)
  169. 2:
  170. sethi %hi(PSR_SYSCALL), %twin_tmp1
  171. andn %t_psr, %twin_tmp1, %t_psr
  172. wr %t_psr, 0x0, %psr
  173. WRITE_PAUSE
  174. jmp %t_pc
  175. rett %t_npc
  176. ret_trap_user_stack_is_bolixed:
  177. wr %t_wim, 0x0, %wim
  178. wr %t_psr, PSR_ET, %psr
  179. WRITE_PAUSE
  180. call window_ret_fault
  181. add %sp, STACKFRAME_SZ, %o0
  182. b signal_p
  183. ld [%curptr + TI_FLAGS], %g2
  184. .globl srmmu_rett_stackchk
  185. srmmu_rett_stackchk:
  186. bne ret_trap_user_stack_is_bolixed
  187. sethi %hi(PAGE_OFFSET), %g1
  188. cmp %g1, %fp
  189. bleu ret_trap_user_stack_is_bolixed
  190. mov AC_M_SFSR, %g1
  191. LEON_PI(lda [%g1] ASI_LEON_MMUREGS, %g0)
  192. SUN_PI_(lda [%g1] ASI_M_MMUREGS, %g0)
  193. LEON_PI(lda [%g0] ASI_LEON_MMUREGS, %g1)
  194. SUN_PI_(lda [%g0] ASI_M_MMUREGS, %g1)
  195. or %g1, 0x2, %g1
  196. LEON_PI(sta %g1, [%g0] ASI_LEON_MMUREGS)
  197. SUN_PI_(sta %g1, [%g0] ASI_M_MMUREGS)
  198. restore %g0, %g0, %g0
  199. LOAD_WINDOW(sp)
  200. save %g0, %g0, %g0
  201. andn %g1, 0x2, %g1
  202. LEON_PI(sta %g1, [%g0] ASI_LEON_MMUREGS)
  203. SUN_PI_(sta %g1, [%g0] ASI_M_MMUREGS)
  204. mov AC_M_SFAR, %g2
  205. LEON_PI(lda [%g2] ASI_LEON_MMUREGS, %g2)
  206. SUN_PI_(lda [%g2] ASI_M_MMUREGS, %g2)
  207. mov AC_M_SFSR, %g1
  208. LEON_PI(lda [%g1] ASI_LEON_MMUREGS, %g1)
  209. SUN_PI_(lda [%g1] ASI_M_MMUREGS, %g1)
  210. andcc %g1, 0x2, %g0
  211. be ret_trap_userwins_ok
  212. nop
  213. b,a ret_trap_user_stack_is_bolixed