entry.S 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434
  1. /*
  2. *
  3. * linux/arch/h8300/kernel/entry.S
  4. *
  5. * Yoshinori Sato <ysato@users.sourceforge.jp>
  6. * David McCullough <davidm@snapgear.com>
  7. *
  8. */
  9. /*
  10. * entry.S
  11. * include exception/interrupt gateway
  12. * system call entry
  13. */
  14. #include <linux/sys.h>
  15. #include <asm/unistd.h>
  16. #include <asm/setup.h>
  17. #include <asm/segment.h>
  18. #include <asm/linkage.h>
  19. #include <asm/asm-offsets.h>
  20. #include <asm/thread_info.h>
  21. #include <asm/errno.h>
  22. #if defined(CONFIG_CPU_H8300H)
  23. #define USERRET 8
  24. INTERRUPTS = 64
  25. .h8300h
  26. .macro SHLL2 reg
  27. shll.l \reg
  28. shll.l \reg
  29. .endm
  30. .macro SHLR2 reg
  31. shlr.l \reg
  32. shlr.l \reg
  33. .endm
  34. .macro SAVEREGS
  35. mov.l er0,@-sp
  36. mov.l er1,@-sp
  37. mov.l er2,@-sp
  38. mov.l er3,@-sp
  39. .endm
  40. .macro RESTOREREGS
  41. mov.l @sp+,er3
  42. mov.l @sp+,er2
  43. .endm
  44. .macro SAVEEXR
  45. .endm
  46. .macro RESTOREEXR
  47. .endm
  48. #endif
  49. #if defined(CONFIG_CPU_H8S)
  50. #define USERRET 10
  51. #define USEREXR 8
  52. INTERRUPTS = 128
  53. .h8300s
  54. .macro SHLL2 reg
  55. shll.l #2,\reg
  56. .endm
  57. .macro SHLR2 reg
  58. shlr.l #2,\reg
  59. .endm
  60. .macro SAVEREGS
  61. stm.l er0-er3,@-sp
  62. .endm
  63. .macro RESTOREREGS
  64. ldm.l @sp+,er2-er3
  65. .endm
  66. .macro SAVEEXR
  67. mov.w @(USEREXR:16,er0),r1
  68. mov.w r1,@(LEXR-LER3:16,sp) /* copy EXR */
  69. .endm
  70. .macro RESTOREEXR
  71. mov.w @(LEXR-LER1:16,sp),r1 /* restore EXR */
  72. mov.b r1l,r1h
  73. mov.w r1,@(USEREXR:16,er0)
  74. .endm
  75. #endif
  76. /* CPU context save/restore macros. */
  77. .macro SAVE_ALL
  78. mov.l er0,@-sp
  79. stc ccr,r0l /* check kernel mode */
  80. btst #4,r0l
  81. bne 5f
  82. /* user mode */
  83. mov.l sp,@_sw_usp
  84. mov.l @sp,er0 /* restore saved er0 */
  85. orc #0x10,ccr /* switch kernel stack */
  86. mov.l @_sw_ksp,sp
  87. sub.l #(LRET-LORIG),sp /* allocate LORIG - LRET */
  88. SAVEREGS
  89. mov.l @_sw_usp,er0
  90. mov.l @(USERRET:16,er0),er1 /* copy the RET addr */
  91. mov.l er1,@(LRET-LER3:16,sp)
  92. SAVEEXR
  93. mov.l @(LORIG-LER3:16,sp),er0
  94. mov.l er0,@(LER0-LER3:16,sp) /* copy ER0 */
  95. mov.w e1,r1 /* e1 highbyte = ccr */
  96. and #0xef,r1h /* mask mode? flag */
  97. bra 6f
  98. 5:
  99. /* kernel mode */
  100. mov.l @sp,er0 /* restore saved er0 */
  101. subs #2,sp /* set dummy ccr */
  102. subs #4,sp /* set dummp sp */
  103. SAVEREGS
  104. mov.w @(LRET-LER3:16,sp),r1 /* copy old ccr */
  105. 6:
  106. mov.b r1h,r1l
  107. mov.b #0,r1h
  108. mov.w r1,@(LCCR-LER3:16,sp) /* set ccr */
  109. mov.l @_sw_usp,er2
  110. mov.l er2,@(LSP-LER3:16,sp) /* set usp */
  111. mov.l er6,@-sp /* syscall arg #6 */
  112. mov.l er5,@-sp /* syscall arg #5 */
  113. mov.l er4,@-sp /* syscall arg #4 */
  114. .endm /* r1 = ccr */
  115. .macro RESTORE_ALL
  116. mov.l @sp+,er4
  117. mov.l @sp+,er5
  118. mov.l @sp+,er6
  119. RESTOREREGS
  120. mov.w @(LCCR-LER1:16,sp),r0 /* check kernel mode */
  121. btst #4,r0l
  122. bne 7f
  123. orc #0xc0,ccr
  124. mov.l @(LSP-LER1:16,sp),er0
  125. mov.l @(LER0-LER1:16,sp),er1 /* restore ER0 */
  126. mov.l er1,@er0
  127. RESTOREEXR
  128. mov.w @(LCCR-LER1:16,sp),r1 /* restore the RET addr */
  129. mov.b r1l,r1h
  130. mov.b @(LRET+1-LER1:16,sp),r1l
  131. mov.w r1,e1
  132. mov.w @(LRET+2-LER1:16,sp),r1
  133. mov.l er1,@(USERRET:16,er0)
  134. mov.l @sp+,er1
  135. add.l #(LRET-LER1),sp /* remove LORIG - LRET */
  136. mov.l sp,@_sw_ksp
  137. andc #0xef,ccr /* switch to user mode */
  138. mov.l er0,sp
  139. bra 8f
  140. 7:
  141. mov.l @sp+,er1
  142. add.l #10,sp
  143. 8:
  144. mov.l @sp+,er0
  145. adds #4,sp /* remove the sw created LVEC */
  146. rte
  147. .endm
  148. .globl _system_call
  149. .globl ret_from_exception
  150. .globl ret_from_fork
  151. .globl ret_from_kernel_thread
  152. .globl ret_from_interrupt
  153. .globl _interrupt_redirect_table
  154. .globl _sw_ksp,_sw_usp
  155. .globl _resume
  156. .globl _interrupt_entry
  157. .globl _trace_break
  158. .globl _nmi
  159. #if defined(CONFIG_ROMKERNEL)
  160. .section .int_redirect,"ax"
  161. _interrupt_redirect_table:
  162. #if defined(CONFIG_CPU_H8300H)
  163. .rept 7
  164. .long 0
  165. .endr
  166. #endif
  167. #if defined(CONFIG_CPU_H8S)
  168. .rept 5
  169. .long 0
  170. .endr
  171. jmp @_trace_break
  172. .long 0
  173. #endif
  174. jsr @_interrupt_entry /* NMI */
  175. jmp @_system_call /* TRAPA #0 (System call) */
  176. .long 0
  177. #if defined(CONFIG_KGDB)
  178. jmp @_kgdb_trap
  179. #else
  180. .long 0
  181. #endif
  182. jmp @_trace_break /* TRAPA #3 (breakpoint) */
  183. .rept INTERRUPTS-12
  184. jsr @_interrupt_entry
  185. .endr
  186. #endif
  187. #if defined(CONFIG_RAMKERNEL)
  188. .globl _interrupt_redirect_table
  189. .section .bss
  190. _interrupt_redirect_table:
  191. .space 4
  192. #endif
  193. .section .text
  194. .align 2
  195. _interrupt_entry:
  196. SAVE_ALL
  197. /* r1l is saved ccr */
  198. mov.l sp,er0
  199. add.l #LVEC,er0
  200. btst #4,r1l
  201. bne 1f
  202. /* user LVEC */
  203. mov.l @_sw_usp,er0
  204. adds #4,er0
  205. 1:
  206. mov.l @er0,er0 /* LVEC address */
  207. #if defined(CONFIG_ROMKERNEL)
  208. sub.l #_interrupt_redirect_table,er0
  209. #endif
  210. #if defined(CONFIG_RAMKERNEL)
  211. mov.l @_interrupt_redirect_table,er1
  212. sub.l er1,er0
  213. #endif
  214. SHLR2 er0
  215. dec.l #1,er0
  216. mov.l sp,er1
  217. subs #4,er1 /* adjust ret_pc */
  218. #if defined(CONFIG_CPU_H8S)
  219. orc #7,exr
  220. #endif
  221. jsr @do_IRQ
  222. jmp @ret_from_interrupt
  223. _system_call:
  224. subs #4,sp /* dummy LVEC */
  225. SAVE_ALL
  226. /* er0: syscall nr */
  227. andc #0xbf,ccr
  228. mov.l er0,er4
  229. /* save top of frame */
  230. mov.l sp,er0
  231. jsr @set_esp0
  232. andc #0x3f,ccr
  233. mov.l sp,er2
  234. and.w #0xe000,r2
  235. mov.l @(TI_FLAGS:16,er2),er2
  236. and.w #_TIF_WORK_SYSCALL_MASK,r2
  237. beq 1f
  238. mov.l sp,er0
  239. jsr @do_syscall_trace_enter
  240. 1:
  241. cmp.l #__NR_syscalls,er4
  242. bcc badsys
  243. SHLL2 er4
  244. mov.l #_sys_call_table,er0
  245. add.l er4,er0
  246. mov.l @er0,er4
  247. beq ret_from_exception:16
  248. mov.l @(LER1:16,sp),er0
  249. mov.l @(LER2:16,sp),er1
  250. mov.l @(LER3:16,sp),er2
  251. jsr @er4
  252. mov.l er0,@(LER0:16,sp) /* save the return value */
  253. mov.l sp,er2
  254. and.w #0xe000,r2
  255. mov.l @(TI_FLAGS:16,er2),er2
  256. and.w #_TIF_WORK_SYSCALL_MASK,r2
  257. beq 2f
  258. mov.l sp,er0
  259. jsr @do_syscall_trace_leave
  260. 2:
  261. orc #0xc0,ccr
  262. bra resume_userspace
  263. badsys:
  264. mov.l #-ENOSYS,er0
  265. mov.l er0,@(LER0:16,sp)
  266. bra resume_userspace
  267. #if !defined(CONFIG_PREEMPT)
  268. #define resume_kernel restore_all
  269. #endif
  270. ret_from_exception:
  271. #if defined(CONFIG_PREEMPT)
  272. orc #0xc0,ccr
  273. #endif
  274. ret_from_interrupt:
  275. mov.b @(LCCR+1:16,sp),r0l
  276. btst #4,r0l
  277. bne resume_kernel:16 /* return from kernel */
  278. resume_userspace:
  279. andc #0xbf,ccr
  280. mov.l sp,er4
  281. and.w #0xe000,r4 /* er4 <- current thread info */
  282. mov.l @(TI_FLAGS:16,er4),er1
  283. and.l #_TIF_WORK_MASK,er1
  284. beq restore_all:8
  285. work_pending:
  286. btst #TIF_NEED_RESCHED,r1l
  287. bne work_resched:8
  288. /* work notifysig */
  289. mov.l sp,er0
  290. subs #4,er0 /* er0: pt_regs */
  291. jsr @do_notify_resume
  292. bra resume_userspace:8
  293. work_resched:
  294. mov.l sp,er0
  295. jsr @set_esp0
  296. jsr @schedule
  297. bra resume_userspace:8
  298. restore_all:
  299. RESTORE_ALL /* Does RTE */
  300. #if defined(CONFIG_PREEMPT)
  301. resume_kernel:
  302. mov.l @(TI_PRE_COUNT:16,er4),er0
  303. bne restore_all:8
  304. need_resched:
  305. mov.l @(TI_FLAGS:16,er4),er0
  306. btst #TIF_NEED_RESCHED,r0l
  307. beq restore_all:8
  308. mov.b @(LCCR+1:16,sp),r0l /* Interrupt Enabled? */
  309. bmi restore_all:8
  310. mov.l sp,er0
  311. jsr @set_esp0
  312. jsr @preempt_schedule_irq
  313. bra need_resched:8
  314. #endif
  315. ret_from_fork:
  316. mov.l er2,er0
  317. jsr @schedule_tail
  318. jmp @ret_from_exception
  319. ret_from_kernel_thread:
  320. mov.l er2,er0
  321. jsr @schedule_tail
  322. mov.l @(LER4:16,sp),er0
  323. mov.l @(LER5:16,sp),er1
  324. jsr @er1
  325. jmp @ret_from_exception
  326. _resume:
  327. /*
  328. * Beware - when entering resume, offset of tss is in d1,
  329. * prev (the current task) is in a0, next (the new task)
  330. * is in a1 and d2.b is non-zero if the mm structure is
  331. * shared between the tasks, so don't change these
  332. * registers until their contents are no longer needed.
  333. */
  334. /* save sr */
  335. sub.w r3,r3
  336. stc ccr,r3l
  337. mov.w r3,@(THREAD_CCR+2:16,er0)
  338. /* disable interrupts */
  339. orc #0xc0,ccr
  340. mov.l @_sw_usp,er3
  341. mov.l er3,@(THREAD_USP:16,er0)
  342. mov.l sp,@(THREAD_KSP:16,er0)
  343. /* Skip address space switching if they are the same. */
  344. /* FIXME: what did we hack out of here, this does nothing! */
  345. mov.l @(THREAD_USP:16,er1),er0
  346. mov.l er0,@_sw_usp
  347. mov.l @(THREAD_KSP:16,er1),sp
  348. /* restore status register */
  349. mov.w @(THREAD_CCR+2:16,er1),r3
  350. ldc r3l,ccr
  351. rts
  352. _trace_break:
  353. subs #4,sp
  354. SAVE_ALL
  355. sub.l er1,er1
  356. dec.l #1,er1
  357. mov.l er1,@(LORIG,sp)
  358. mov.l sp,er0
  359. jsr @set_esp0
  360. mov.l @_sw_usp,er0
  361. mov.l @er0,er1
  362. mov.w @(-2:16,er1),r2
  363. cmp.w #0x5730,r2
  364. beq 1f
  365. subs #2,er1
  366. mov.l er1,@er0
  367. 1:
  368. and.w #0xff,e1
  369. mov.l er1,er0
  370. jsr @trace_trap
  371. jmp @ret_from_exception
  372. _nmi:
  373. subs #4, sp
  374. mov.l er0, @-sp
  375. mov.l @_interrupt_redirect_table, er0
  376. add.l #8*4, er0
  377. mov.l er0, @(4,sp)
  378. mov.l @sp+, er0
  379. jmp @_interrupt_entry
  380. #if defined(CONFIG_KGDB)
  381. _kgdb_trap:
  382. subs #4,sp
  383. SAVE_ALL
  384. mov.l sp,er0
  385. add.l #LRET,er0
  386. mov.l er0,@(LSP,sp)
  387. jsr @set_esp0
  388. mov.l sp,er0
  389. subs #4,er0
  390. jsr @h8300_kgdb_trap
  391. jmp @ret_from_exception
  392. #endif
  393. .section .bss
  394. _sw_ksp:
  395. .space 4
  396. _sw_usp:
  397. .space 4
  398. .end