entry_mm.S 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420
  1. /* -*- mode: asm -*-
  2. *
  3. * linux/arch/m68k/kernel/entry.S
  4. *
  5. * Copyright (C) 1991, 1992 Linus Torvalds
  6. *
  7. * This file is subject to the terms and conditions of the GNU General Public
  8. * License. See the file README.legal in the main directory of this archive
  9. * for more details.
  10. *
  11. * Linux/m68k support by Hamish Macdonald
  12. *
  13. * 68060 fixes by Jesper Skov
  14. *
  15. */
  16. /*
  17. * entry.S contains the system-call and fault low-level handling routines.
  18. * This also contains the timer-interrupt handler, as well as all interrupts
  19. * and faults that can result in a task-switch.
  20. *
  21. * NOTE: This code handles signal-recognition, which happens every time
  22. * after a timer-interrupt and after each system call.
  23. *
  24. */
  25. /*
  26. * 12/03/96 Jes: Currently we only support m68k single-cpu systems, so
  27. * all pointers that used to be 'current' are now entry
  28. * number 0 in the 'current_set' list.
  29. *
  30. * 6/05/00 RZ: addedd writeback completion after return from sighandler
  31. * for 68040
  32. */
  33. #include <linux/linkage.h>
  34. #include <asm/entry.h>
  35. #include <asm/errno.h>
  36. #include <asm/setup.h>
  37. #include <asm/segment.h>
  38. #include <asm/traps.h>
  39. #include <asm/unistd.h>
  40. #include <asm/asm-offsets.h>
  41. .globl system_call, buserr, trap, resume
  42. .globl sys_call_table
  43. .globl sys_fork, sys_clone, sys_vfork
  44. .globl ret_from_interrupt, bad_interrupt
  45. .globl auto_irqhandler_fixup
  46. .globl user_irqvec_fixup
  47. .text
  48. ENTRY(buserr)
  49. SAVE_ALL_INT
  50. GET_CURRENT(%d0)
  51. movel %sp,%sp@- | stack frame pointer argument
  52. bsrl buserr_c
  53. addql #4,%sp
  54. jra .Lret_from_exception
  55. ENTRY(trap)
  56. SAVE_ALL_INT
  57. GET_CURRENT(%d0)
  58. movel %sp,%sp@- | stack frame pointer argument
  59. bsrl trap_c
  60. addql #4,%sp
  61. jra .Lret_from_exception
  62. | After a fork we jump here directly from resume,
  63. | so that %d1 contains the previous task
  64. | schedule_tail now used regardless of CONFIG_SMP
  65. ENTRY(ret_from_fork)
  66. movel %d1,%sp@-
  67. jsr schedule_tail
  68. addql #4,%sp
  69. jra .Lret_from_exception
  70. do_trace_entry:
  71. movel #-ENOSYS,%sp@(PT_OFF_D0)| needed for strace
  72. subql #4,%sp
  73. SAVE_SWITCH_STACK
  74. jbsr syscall_trace
  75. RESTORE_SWITCH_STACK
  76. addql #4,%sp
  77. movel %sp@(PT_OFF_ORIG_D0),%d0
  78. cmpl #NR_syscalls,%d0
  79. jcs syscall
  80. badsys:
  81. movel #-ENOSYS,%sp@(PT_OFF_D0)
  82. jra ret_from_syscall
  83. do_trace_exit:
  84. subql #4,%sp
  85. SAVE_SWITCH_STACK
  86. jbsr syscall_trace
  87. RESTORE_SWITCH_STACK
  88. addql #4,%sp
  89. jra .Lret_from_exception
  90. ENTRY(ret_from_signal)
  91. movel %curptr@(TASK_STACK),%a1
  92. tstb %a1@(TINFO_FLAGS+2)
  93. jge 1f
  94. jbsr syscall_trace
  95. 1: RESTORE_SWITCH_STACK
  96. addql #4,%sp
  97. /* on 68040 complete pending writebacks if any */
  98. #ifdef CONFIG_M68040
  99. bfextu %sp@(PT_OFF_FORMATVEC){#0,#4},%d0
  100. subql #7,%d0 | bus error frame ?
  101. jbne 1f
  102. movel %sp,%sp@-
  103. jbsr berr_040cleanup
  104. addql #4,%sp
  105. 1:
  106. #endif
  107. jra .Lret_from_exception
  108. ENTRY(system_call)
  109. SAVE_ALL_SYS
  110. GET_CURRENT(%d1)
  111. movel %d1,%a1
  112. | save top of frame
  113. movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0)
  114. | syscall trace?
  115. tstb %a1@(TINFO_FLAGS+2)
  116. jmi do_trace_entry
  117. cmpl #NR_syscalls,%d0
  118. jcc badsys
  119. syscall:
  120. jbsr @(sys_call_table,%d0:l:4)@(0)
  121. movel %d0,%sp@(PT_OFF_D0) | save the return value
  122. ret_from_syscall:
  123. |oriw #0x0700,%sr
  124. movel %curptr@(TASK_STACK),%a1
  125. movew %a1@(TINFO_FLAGS+2),%d0
  126. jne syscall_exit_work
  127. 1: RESTORE_ALL
  128. syscall_exit_work:
  129. btst #5,%sp@(PT_OFF_SR) | check if returning to kernel
  130. bnes 1b | if so, skip resched, signals
  131. lslw #1,%d0
  132. jcs do_trace_exit
  133. jmi do_delayed_trace
  134. lslw #8,%d0
  135. jmi do_signal_return
  136. pea resume_userspace
  137. jra schedule
  138. ENTRY(ret_from_exception)
  139. .Lret_from_exception:
  140. btst #5,%sp@(PT_OFF_SR) | check if returning to kernel
  141. bnes 1f | if so, skip resched, signals
  142. | only allow interrupts when we are really the last one on the
  143. | kernel stack, otherwise stack overflow can occur during
  144. | heavy interrupt load
  145. andw #ALLOWINT,%sr
  146. resume_userspace:
  147. movel %curptr@(TASK_STACK),%a1
  148. moveb %a1@(TINFO_FLAGS+3),%d0
  149. jne exit_work
  150. 1: RESTORE_ALL
  151. exit_work:
  152. | save top of frame
  153. movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0)
  154. lslb #1,%d0
  155. jmi do_signal_return
  156. pea resume_userspace
  157. jra schedule
  158. do_signal_return:
  159. |andw #ALLOWINT,%sr
  160. subql #4,%sp | dummy return address
  161. SAVE_SWITCH_STACK
  162. pea %sp@(SWITCH_STACK_SIZE)
  163. bsrl do_signal
  164. addql #4,%sp
  165. RESTORE_SWITCH_STACK
  166. addql #4,%sp
  167. jbra resume_userspace
  168. do_delayed_trace:
  169. bclr #7,%sp@(PT_OFF_SR) | clear trace bit in SR
  170. pea 1 | send SIGTRAP
  171. movel %curptr,%sp@-
  172. pea LSIGTRAP
  173. jbsr send_sig
  174. addql #8,%sp
  175. addql #4,%sp
  176. jbra resume_userspace
  177. /* This is the main interrupt handler for autovector interrupts */
  178. ENTRY(auto_inthandler)
  179. SAVE_ALL_INT
  180. GET_CURRENT(%d0)
  181. movel %d0,%a1
  182. addqb #1,%a1@(TINFO_PREEMPT+1)
  183. | put exception # in d0
  184. bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0
  185. subw #VEC_SPUR,%d0
  186. movel %sp,%sp@-
  187. movel %d0,%sp@- | put vector # on stack
  188. auto_irqhandler_fixup = . + 2
  189. jsr do_IRQ | process the IRQ
  190. addql #8,%sp | pop parameters off stack
  191. ret_from_interrupt:
  192. movel %curptr@(TASK_STACK),%a1
  193. subqb #1,%a1@(TINFO_PREEMPT+1)
  194. jeq ret_from_last_interrupt
  195. 2: RESTORE_ALL
  196. ALIGN
  197. ret_from_last_interrupt:
  198. moveq #(~ALLOWINT>>8)&0xff,%d0
  199. andb %sp@(PT_OFF_SR),%d0
  200. jne 2b
  201. /* check if we need to do software interrupts */
  202. tstl irq_stat+CPUSTAT_SOFTIRQ_PENDING
  203. jeq .Lret_from_exception
  204. pea ret_from_exception
  205. jra do_softirq
  206. /* Handler for user defined interrupt vectors */
  207. ENTRY(user_inthandler)
  208. SAVE_ALL_INT
  209. GET_CURRENT(%d0)
  210. movel %d0,%a1
  211. addqb #1,%a1@(TINFO_PREEMPT+1)
  212. | put exception # in d0
  213. bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0
  214. user_irqvec_fixup = . + 2
  215. subw #VEC_USER,%d0
  216. movel %sp,%sp@-
  217. movel %d0,%sp@- | put vector # on stack
  218. jsr do_IRQ | process the IRQ
  219. addql #8,%sp | pop parameters off stack
  220. movel %curptr@(TASK_STACK),%a1
  221. subqb #1,%a1@(TINFO_PREEMPT+1)
  222. jeq ret_from_last_interrupt
  223. RESTORE_ALL
  224. /* Handler for uninitialized and spurious interrupts */
  225. ENTRY(bad_inthandler)
  226. SAVE_ALL_INT
  227. GET_CURRENT(%d0)
  228. movel %d0,%a1
  229. addqb #1,%a1@(TINFO_PREEMPT+1)
  230. movel %sp,%sp@-
  231. jsr handle_badint
  232. addql #4,%sp
  233. movel %curptr@(TASK_STACK),%a1
  234. subqb #1,%a1@(TINFO_PREEMPT+1)
  235. jeq ret_from_last_interrupt
  236. RESTORE_ALL
  237. ENTRY(sys_fork)
  238. SAVE_SWITCH_STACK
  239. pea %sp@(SWITCH_STACK_SIZE)
  240. jbsr m68k_fork
  241. addql #4,%sp
  242. RESTORE_SWITCH_STACK
  243. rts
  244. ENTRY(sys_clone)
  245. SAVE_SWITCH_STACK
  246. pea %sp@(SWITCH_STACK_SIZE)
  247. jbsr m68k_clone
  248. addql #4,%sp
  249. RESTORE_SWITCH_STACK
  250. rts
  251. ENTRY(sys_vfork)
  252. SAVE_SWITCH_STACK
  253. pea %sp@(SWITCH_STACK_SIZE)
  254. jbsr m68k_vfork
  255. addql #4,%sp
  256. RESTORE_SWITCH_STACK
  257. rts
  258. ENTRY(sys_sigreturn)
  259. SAVE_SWITCH_STACK
  260. jbsr do_sigreturn
  261. RESTORE_SWITCH_STACK
  262. rts
  263. ENTRY(sys_rt_sigreturn)
  264. SAVE_SWITCH_STACK
  265. jbsr do_rt_sigreturn
  266. RESTORE_SWITCH_STACK
  267. rts
  268. resume:
  269. /*
  270. * Beware - when entering resume, prev (the current task) is
  271. * in a0, next (the new task) is in a1,so don't change these
  272. * registers until their contents are no longer needed.
  273. */
  274. /* save sr */
  275. movew %sr,%a0@(TASK_THREAD+THREAD_SR)
  276. /* save fs (sfc,%dfc) (may be pointing to kernel memory) */
  277. movec %sfc,%d0
  278. movew %d0,%a0@(TASK_THREAD+THREAD_FS)
  279. /* save usp */
  280. /* it is better to use a movel here instead of a movew 8*) */
  281. movec %usp,%d0
  282. movel %d0,%a0@(TASK_THREAD+THREAD_USP)
  283. /* save non-scratch registers on stack */
  284. SAVE_SWITCH_STACK
  285. /* save current kernel stack pointer */
  286. movel %sp,%a0@(TASK_THREAD+THREAD_KSP)
  287. /* save floating point context */
  288. #ifndef CONFIG_M68KFPU_EMU_ONLY
  289. #ifdef CONFIG_M68KFPU_EMU
  290. tstl m68k_fputype
  291. jeq 3f
  292. #endif
  293. fsave %a0@(TASK_THREAD+THREAD_FPSTATE)
  294. #if defined(CONFIG_M68060)
  295. #if !defined(CPU_M68060_ONLY)
  296. btst #3,m68k_cputype+3
  297. beqs 1f
  298. #endif
  299. /* The 060 FPU keeps status in bits 15-8 of the first longword */
  300. tstb %a0@(TASK_THREAD+THREAD_FPSTATE+2)
  301. jeq 3f
  302. #if !defined(CPU_M68060_ONLY)
  303. jra 2f
  304. #endif
  305. #endif /* CONFIG_M68060 */
  306. #if !defined(CPU_M68060_ONLY)
  307. 1: tstb %a0@(TASK_THREAD+THREAD_FPSTATE)
  308. jeq 3f
  309. #endif
  310. 2: fmovemx %fp0-%fp7,%a0@(TASK_THREAD+THREAD_FPREG)
  311. fmoveml %fpcr/%fpsr/%fpiar,%a0@(TASK_THREAD+THREAD_FPCNTL)
  312. 3:
  313. #endif /* CONFIG_M68KFPU_EMU_ONLY */
  314. /* Return previous task in %d1 */
  315. movel %curptr,%d1
  316. /* switch to new task (a1 contains new task) */
  317. movel %a1,%curptr
  318. /* restore floating point context */
  319. #ifndef CONFIG_M68KFPU_EMU_ONLY
  320. #ifdef CONFIG_M68KFPU_EMU
  321. tstl m68k_fputype
  322. jeq 4f
  323. #endif
  324. #if defined(CONFIG_M68060)
  325. #if !defined(CPU_M68060_ONLY)
  326. btst #3,m68k_cputype+3
  327. beqs 1f
  328. #endif
  329. /* The 060 FPU keeps status in bits 15-8 of the first longword */
  330. tstb %a1@(TASK_THREAD+THREAD_FPSTATE+2)
  331. jeq 3f
  332. #if !defined(CPU_M68060_ONLY)
  333. jra 2f
  334. #endif
  335. #endif /* CONFIG_M68060 */
  336. #if !defined(CPU_M68060_ONLY)
  337. 1: tstb %a1@(TASK_THREAD+THREAD_FPSTATE)
  338. jeq 3f
  339. #endif
  340. 2: fmovemx %a1@(TASK_THREAD+THREAD_FPREG),%fp0-%fp7
  341. fmoveml %a1@(TASK_THREAD+THREAD_FPCNTL),%fpcr/%fpsr/%fpiar
  342. 3: frestore %a1@(TASK_THREAD+THREAD_FPSTATE)
  343. 4:
  344. #endif /* CONFIG_M68KFPU_EMU_ONLY */
  345. /* restore the kernel stack pointer */
  346. movel %a1@(TASK_THREAD+THREAD_KSP),%sp
  347. /* restore non-scratch registers */
  348. RESTORE_SWITCH_STACK
  349. /* restore user stack pointer */
  350. movel %a1@(TASK_THREAD+THREAD_USP),%a0
  351. movel %a0,%usp
  352. /* restore fs (sfc,%dfc) */
  353. movew %a1@(TASK_THREAD+THREAD_FS),%a0
  354. movec %a0,%sfc
  355. movec %a0,%dfc
  356. /* restore status register */
  357. movew %a1@(TASK_THREAD+THREAD_SR),%sr
  358. rts