entry.S 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle
  7. * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  8. * Copyright (C) 2001 MIPS Technologies, Inc.
  9. */
  10. #include <asm/asm.h>
  11. #include <asm/asmmacro.h>
  12. #include <asm/regdef.h>
  13. #include <asm/mipsregs.h>
  14. #include <asm/stackframe.h>
  15. #include <asm/isadep.h>
  16. #include <asm/thread_info.h>
  17. #include <asm/war.h>
  18. #ifdef CONFIG_MIPS_MT_SMTC
  19. #include <asm/mipsmtregs.h>
  20. #endif
  21. #ifndef CONFIG_PREEMPT
  22. #define resume_kernel restore_all
  23. #else
  24. #define __ret_from_irq ret_from_exception
  25. #endif
  26. .text
  27. .align 5
  28. #ifndef CONFIG_PREEMPT
  29. FEXPORT(ret_from_exception)
  30. local_irq_disable # preempt stop
  31. b __ret_from_irq
  32. #endif
  33. FEXPORT(ret_from_irq)
  34. LONG_S s0, TI_REGS($28)
  35. FEXPORT(__ret_from_irq)
  36. LONG_L t0, PT_STATUS(sp) # returning to kernel mode?
  37. andi t0, t0, KU_USER
  38. beqz t0, resume_kernel
  39. resume_userspace:
  40. local_irq_disable # make sure we dont miss an
  41. # interrupt setting need_resched
  42. # between sampling and return
  43. LONG_L a2, TI_FLAGS($28) # current->work
  44. andi t0, a2, _TIF_WORK_MASK # (ignoring syscall_trace)
  45. bnez t0, work_pending
  46. j restore_all
  47. #ifdef CONFIG_PREEMPT
  48. resume_kernel:
  49. local_irq_disable
  50. lw t0, TI_PRE_COUNT($28)
  51. bnez t0, restore_all
  52. need_resched:
  53. LONG_L t0, TI_FLAGS($28)
  54. andi t1, t0, _TIF_NEED_RESCHED
  55. beqz t1, restore_all
  56. LONG_L t0, PT_STATUS(sp) # Interrupts off?
  57. andi t0, 1
  58. beqz t0, restore_all
  59. jal preempt_schedule_irq
  60. b need_resched
  61. #endif
  62. FEXPORT(ret_from_fork)
  63. jal schedule_tail # a0 = struct task_struct *prev
  64. FEXPORT(syscall_exit)
  65. local_irq_disable # make sure need_resched and
  66. # signals dont change between
  67. # sampling and return
  68. LONG_L a2, TI_FLAGS($28) # current->work
  69. li t0, _TIF_ALLWORK_MASK
  70. and t0, a2, t0
  71. bnez t0, syscall_exit_work
  72. FEXPORT(restore_all) # restore full frame
  73. #ifdef CONFIG_MIPS_MT_SMTC
  74. #ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP
  75. /* Re-arm any temporarily masked interrupts not explicitly "acked" */
  76. mfc0 v0, CP0_TCSTATUS
  77. ori v1, v0, TCSTATUS_IXMT
  78. mtc0 v1, CP0_TCSTATUS
  79. andi v0, TCSTATUS_IXMT
  80. _ehb
  81. mfc0 t0, CP0_TCCONTEXT
  82. DMT 9 # dmt t1
  83. jal mips_ihb
  84. mfc0 t2, CP0_STATUS
  85. andi t3, t0, 0xff00
  86. or t2, t2, t3
  87. mtc0 t2, CP0_STATUS
  88. _ehb
  89. andi t1, t1, VPECONTROL_TE
  90. beqz t1, 1f
  91. EMT
  92. 1:
  93. mfc0 v1, CP0_TCSTATUS
  94. /* We set IXMT above, XOR should clear it here */
  95. xori v1, v1, TCSTATUS_IXMT
  96. or v1, v0, v1
  97. mtc0 v1, CP0_TCSTATUS
  98. _ehb
  99. xor t0, t0, t3
  100. mtc0 t0, CP0_TCCONTEXT
  101. #endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */
  102. /* Detect and execute deferred IPI "interrupts" */
  103. LONG_L s0, TI_REGS($28)
  104. LONG_S sp, TI_REGS($28)
  105. jal deferred_smtc_ipi
  106. LONG_S s0, TI_REGS($28)
  107. #endif /* CONFIG_MIPS_MT_SMTC */
  108. .set noat
  109. RESTORE_TEMP
  110. RESTORE_AT
  111. RESTORE_STATIC
  112. FEXPORT(restore_partial) # restore partial frame
  113. #ifdef CONFIG_TRACE_IRQFLAGS
  114. SAVE_STATIC
  115. SAVE_AT
  116. SAVE_TEMP
  117. LONG_L v0, PT_STATUS(sp)
  118. #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
  119. and v0, ST0_IEP
  120. #else
  121. and v0, ST0_IE
  122. #endif
  123. beqz v0, 1f
  124. jal trace_hardirqs_on
  125. b 2f
  126. 1: jal trace_hardirqs_off
  127. 2:
  128. RESTORE_TEMP
  129. RESTORE_AT
  130. RESTORE_STATIC
  131. #endif
  132. RESTORE_SOME
  133. RESTORE_SP_AND_RET
  134. .set at
  135. work_pending:
  136. andi t0, a2, _TIF_NEED_RESCHED # a2 is preloaded with TI_FLAGS
  137. beqz t0, work_notifysig
  138. work_resched:
  139. jal schedule
  140. local_irq_disable # make sure need_resched and
  141. # signals dont change between
  142. # sampling and return
  143. LONG_L a2, TI_FLAGS($28)
  144. andi t0, a2, _TIF_WORK_MASK # is there any work to be done
  145. # other than syscall tracing?
  146. beqz t0, restore_all
  147. andi t0, a2, _TIF_NEED_RESCHED
  148. bnez t0, work_resched
  149. work_notifysig: # deal with pending signals and
  150. # notify-resume requests
  151. move a0, sp
  152. li a1, 0
  153. jal do_notify_resume # a2 already loaded
  154. j resume_userspace
  155. FEXPORT(syscall_exit_work_partial)
  156. SAVE_STATIC
  157. syscall_exit_work:
  158. li t0, _TIF_WORK_SYSCALL_EXIT
  159. and t0, a2 # a2 is preloaded with TI_FLAGS
  160. beqz t0, work_pending # trace bit set?
  161. local_irq_enable # could let syscall_trace_leave()
  162. # call schedule() instead
  163. move a0, sp
  164. jal syscall_trace_leave
  165. b resume_userspace
  166. #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT)
  167. /*
  168. * MIPS32R2 Instruction Hazard Barrier - must be called
  169. *
  170. * For C code use the inline version named instruction_hazard().
  171. */
  172. LEAF(mips_ihb)
  173. .set mips32r2
  174. jr.hb ra
  175. nop
  176. END(mips_ihb)
  177. #endif /* CONFIG_CPU_MIPSR2 or CONFIG_MIPS_MT */