entry-nommu.S 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619
  1. /*
  2. * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
  3. * Copyright (C) 2007-2009 PetaLogix
  4. * Copyright (C) 2006 Atmark Techno, Inc.
  5. *
  6. * This file is subject to the terms and conditions of the GNU General Public
  7. * License. See the file "COPYING" in the main directory of this archive
  8. * for more details.
  9. */
  10. #include <linux/linkage.h>
  11. #include <asm/thread_info.h>
  12. #include <linux/errno.h>
  13. #include <asm/entry.h>
  14. #include <asm/asm-offsets.h>
  15. #include <asm/registers.h>
  16. #include <asm/unistd.h>
  17. #include <asm/percpu.h>
  18. #include <asm/signal.h>
  19. #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
  20. .macro disable_irq
  21. msrclr r0, MSR_IE
  22. .endm
  23. .macro enable_irq
  24. msrset r0, MSR_IE
  25. .endm
  26. .macro clear_bip
  27. msrclr r0, MSR_BIP
  28. .endm
  29. #else
  30. .macro disable_irq
  31. mfs r11, rmsr
  32. andi r11, r11, ~MSR_IE
  33. mts rmsr, r11
  34. .endm
  35. .macro enable_irq
  36. mfs r11, rmsr
  37. ori r11, r11, MSR_IE
  38. mts rmsr, r11
  39. .endm
  40. .macro clear_bip
  41. mfs r11, rmsr
  42. andi r11, r11, ~MSR_BIP
  43. mts rmsr, r11
  44. .endm
  45. #endif
  46. ENTRY(_interrupt)
  47. swi r1, r0, PER_CPU(ENTRY_SP) /* save the current sp */
  48. swi r11, r0, PER_CPU(R11_SAVE) /* temporarily save r11 */
  49. lwi r11, r0, PER_CPU(KM) /* load mode indicator */
  50. beqid r11, 1f
  51. nop
  52. brid 2f /* jump over */
  53. addik r1, r1, (-PT_SIZE) /* room for pt_regs (delay slot) */
  54. 1: /* switch to kernel stack */
  55. lwi r1, r0, PER_CPU(CURRENT_SAVE) /* get the saved current */
  56. lwi r1, r1, TS_THREAD_INFO /* get the thread info */
  57. /* calculate kernel stack pointer */
  58. addik r1, r1, THREAD_SIZE - PT_SIZE
  59. 2:
  60. swi r11, r1, PT_MODE /* store the mode */
  61. lwi r11, r0, PER_CPU(R11_SAVE) /* reload r11 */
  62. swi r2, r1, PT_R2
  63. swi r3, r1, PT_R3
  64. swi r4, r1, PT_R4
  65. swi r5, r1, PT_R5
  66. swi r6, r1, PT_R6
  67. swi r7, r1, PT_R7
  68. swi r8, r1, PT_R8
  69. swi r9, r1, PT_R9
  70. swi r10, r1, PT_R10
  71. swi r11, r1, PT_R11
  72. swi r12, r1, PT_R12
  73. swi r13, r1, PT_R13
  74. swi r14, r1, PT_R14
  75. swi r14, r1, PT_PC
  76. swi r15, r1, PT_R15
  77. swi r16, r1, PT_R16
  78. swi r17, r1, PT_R17
  79. swi r18, r1, PT_R18
  80. swi r19, r1, PT_R19
  81. swi r20, r1, PT_R20
  82. swi r21, r1, PT_R21
  83. swi r22, r1, PT_R22
  84. swi r23, r1, PT_R23
  85. swi r24, r1, PT_R24
  86. swi r25, r1, PT_R25
  87. swi r26, r1, PT_R26
  88. swi r27, r1, PT_R27
  89. swi r28, r1, PT_R28
  90. swi r29, r1, PT_R29
  91. swi r30, r1, PT_R30
  92. swi r31, r1, PT_R31
  93. /* special purpose registers */
  94. mfs r11, rmsr
  95. swi r11, r1, PT_MSR
  96. mfs r11, rear
  97. swi r11, r1, PT_EAR
  98. mfs r11, resr
  99. swi r11, r1, PT_ESR
  100. mfs r11, rfsr
  101. swi r11, r1, PT_FSR
  102. /* reload original stack pointer and save it */
  103. lwi r11, r0, PER_CPU(ENTRY_SP)
  104. swi r11, r1, PT_R1
  105. /* update mode indicator we are in kernel mode */
  106. addik r11, r0, 1
  107. swi r11, r0, PER_CPU(KM)
  108. /* restore r31 */
  109. lwi r31, r0, PER_CPU(CURRENT_SAVE)
  110. /* prepare the link register, the argument and jump */
  111. addik r15, r0, ret_from_intr - 8
  112. addk r6, r0, r15
  113. braid do_IRQ
  114. add r5, r0, r1
  115. ret_from_intr:
  116. lwi r11, r1, PT_MODE
  117. bneid r11, no_intr_resched
  118. lwi r6, r31, TS_THREAD_INFO /* get thread info */
  119. lwi r19, r6, TI_FLAGS /* get flags in thread info */
  120. /* do an extra work if any bits are set */
  121. andi r11, r19, _TIF_NEED_RESCHED
  122. beqi r11, 1f
  123. bralid r15, schedule
  124. nop
  125. 1: andi r11, r19, _TIF_SIGPENDING
  126. beqid r11, no_intr_resched
  127. addk r5, r1, r0
  128. addk r7, r0, r0
  129. bralid r15, do_signal
  130. addk r6, r0, r0
  131. no_intr_resched:
  132. /* Disable interrupts, we are now committed to the state restore */
  133. disable_irq
  134. /* save mode indicator */
  135. lwi r11, r1, PT_MODE
  136. swi r11, r0, PER_CPU(KM)
  137. /* save r31 */
  138. swi r31, r0, PER_CPU(CURRENT_SAVE)
  139. restore_context:
  140. /* special purpose registers */
  141. lwi r11, r1, PT_FSR
  142. mts rfsr, r11
  143. lwi r11, r1, PT_ESR
  144. mts resr, r11
  145. lwi r11, r1, PT_EAR
  146. mts rear, r11
  147. lwi r11, r1, PT_MSR
  148. mts rmsr, r11
  149. lwi r31, r1, PT_R31
  150. lwi r30, r1, PT_R30
  151. lwi r29, r1, PT_R29
  152. lwi r28, r1, PT_R28
  153. lwi r27, r1, PT_R27
  154. lwi r26, r1, PT_R26
  155. lwi r25, r1, PT_R25
  156. lwi r24, r1, PT_R24
  157. lwi r23, r1, PT_R23
  158. lwi r22, r1, PT_R22
  159. lwi r21, r1, PT_R21
  160. lwi r20, r1, PT_R20
  161. lwi r19, r1, PT_R19
  162. lwi r18, r1, PT_R18
  163. lwi r17, r1, PT_R17
  164. lwi r16, r1, PT_R16
  165. lwi r15, r1, PT_R15
  166. lwi r14, r1, PT_PC
  167. lwi r13, r1, PT_R13
  168. lwi r12, r1, PT_R12
  169. lwi r11, r1, PT_R11
  170. lwi r10, r1, PT_R10
  171. lwi r9, r1, PT_R9
  172. lwi r8, r1, PT_R8
  173. lwi r7, r1, PT_R7
  174. lwi r6, r1, PT_R6
  175. lwi r5, r1, PT_R5
  176. lwi r4, r1, PT_R4
  177. lwi r3, r1, PT_R3
  178. lwi r2, r1, PT_R2
  179. lwi r1, r1, PT_R1
  180. rtid r14, 0
  181. nop
  182. ENTRY(_reset)
  183. brai 0;
  184. ENTRY(_user_exception)
  185. swi r1, r0, PER_CPU(ENTRY_SP) /* save the current sp */
  186. swi r11, r0, PER_CPU(R11_SAVE) /* temporarily save r11 */
  187. lwi r11, r0, PER_CPU(KM) /* load mode indicator */
  188. beqid r11, 1f /* Already in kernel mode? */
  189. nop
  190. brid 2f /* jump over */
  191. addik r1, r1, (-PT_SIZE) /* Room for pt_regs (delay slot) */
  192. 1: /* Switch to kernel stack */
  193. lwi r1, r0, PER_CPU(CURRENT_SAVE) /* get the saved current */
  194. lwi r1, r1, TS_THREAD_INFO /* get the thread info */
  195. /* calculate kernel stack pointer */
  196. addik r1, r1, THREAD_SIZE - PT_SIZE
  197. 2:
  198. swi r11, r1, PT_MODE /* store the mode */
  199. lwi r11, r0, PER_CPU(R11_SAVE) /* reload r11 */
  200. /* save them on stack */
  201. swi r2, r1, PT_R2
  202. swi r3, r1, PT_R3 /* r3: _always_ in clobber list; see unistd.h */
  203. swi r4, r1, PT_R4 /* r4: _always_ in clobber list; see unistd.h */
  204. swi r5, r1, PT_R5
  205. swi r6, r1, PT_R6
  206. swi r7, r1, PT_R7
  207. swi r8, r1, PT_R8
  208. swi r9, r1, PT_R9
  209. swi r10, r1, PT_R10
  210. swi r11, r1, PT_R11
  211. /* r12: _always_ in clobber list; see unistd.h */
  212. swi r12, r1, PT_R12
  213. swi r13, r1, PT_R13
  214. /* r14: _always_ in clobber list; see unistd.h */
  215. swi r14, r1, PT_R14
  216. /* but we want to return to the next inst. */
  217. addik r14, r14, 0x4
  218. swi r14, r1, PT_PC /* increment by 4 and store in pc */
  219. swi r15, r1, PT_R15
  220. swi r16, r1, PT_R16
  221. swi r17, r1, PT_R17
  222. swi r18, r1, PT_R18
  223. swi r19, r1, PT_R19
  224. swi r20, r1, PT_R20
  225. swi r21, r1, PT_R21
  226. swi r22, r1, PT_R22
  227. swi r23, r1, PT_R23
  228. swi r24, r1, PT_R24
  229. swi r25, r1, PT_R25
  230. swi r26, r1, PT_R26
  231. swi r27, r1, PT_R27
  232. swi r28, r1, PT_R28
  233. swi r29, r1, PT_R29
  234. swi r30, r1, PT_R30
  235. swi r31, r1, PT_R31
  236. disable_irq
  237. nop /* make sure IE bit is in effect */
  238. clear_bip /* once IE is in effect it is safe to clear BIP */
  239. nop
  240. /* special purpose registers */
  241. mfs r11, rmsr
  242. swi r11, r1, PT_MSR
  243. mfs r11, rear
  244. swi r11, r1, PT_EAR
  245. mfs r11, resr
  246. swi r11, r1, PT_ESR
  247. mfs r11, rfsr
  248. swi r11, r1, PT_FSR
  249. /* reload original stack pointer and save it */
  250. lwi r11, r0, PER_CPU(ENTRY_SP)
  251. swi r11, r1, PT_R1
  252. /* update mode indicator we are in kernel mode */
  253. addik r11, r0, 1
  254. swi r11, r0, PER_CPU(KM)
  255. /* restore r31 */
  256. lwi r31, r0, PER_CPU(CURRENT_SAVE)
  257. /* re-enable interrupts now we are in kernel mode */
  258. enable_irq
  259. /* See if the system call number is valid. */
  260. addi r11, r12, -__NR_syscalls
  261. bgei r11, 1f /* return to user if not valid */
  262. /* Figure out which function to use for this system call. */
  263. /* Note Microblaze barrel shift is optional, so don't rely on it */
  264. add r12, r12, r12 /* convert num -> ptr */
  265. add r12, r12, r12
  266. lwi r12, r12, sys_call_table /* Get function pointer */
  267. addik r15, r0, ret_to_user-8 /* set return address */
  268. bra r12 /* Make the system call. */
  269. bri 0 /* won't reach here */
  270. 1:
  271. brid ret_to_user /* jump to syscall epilogue */
  272. addi r3, r0, -ENOSYS /* set errno in delay slot */
  273. /*
  274. * Debug traps are like a system call, but entered via brki r14, 0x60
  275. * All we need to do is send the SIGTRAP signal to current, ptrace and do_signal
  276. * will handle the rest
  277. */
  278. ENTRY(_debug_exception)
  279. swi r1, r0, PER_CPU(ENTRY_SP) /* save the current sp */
  280. lwi r1, r0, PER_CPU(CURRENT_SAVE) /* get the saved current */
  281. lwi r1, r1, TS_THREAD_INFO /* get the thread info */
  282. addik r1, r1, THREAD_SIZE - PT_SIZE /* get the kernel stack */
  283. swi r11, r0, PER_CPU(R11_SAVE) /* temporarily save r11 */
  284. lwi r11, r0, PER_CPU(KM) /* load mode indicator */
  285. //save_context:
  286. swi r11, r1, PT_MODE /* store the mode */
  287. lwi r11, r0, PER_CPU(R11_SAVE) /* reload r11 */
  288. /* save them on stack */
  289. swi r2, r1, PT_R2
  290. swi r3, r1, PT_R3 /* r3: _always_ in clobber list; see unistd.h */
  291. swi r4, r1, PT_R4 /* r4: _always_ in clobber list; see unistd.h */
  292. swi r5, r1, PT_R5
  293. swi r6, r1, PT_R6
  294. swi r7, r1, PT_R7
  295. swi r8, r1, PT_R8
  296. swi r9, r1, PT_R9
  297. swi r10, r1, PT_R10
  298. swi r11, r1, PT_R11
  299. /* r12: _always_ in clobber list; see unistd.h */
  300. swi r12, r1, PT_R12
  301. swi r13, r1, PT_R13
  302. /* r14: _always_ in clobber list; see unistd.h */
  303. swi r14, r1, PT_R14
  304. swi r14, r1, PT_PC /* Will return to interrupted instruction */
  305. swi r15, r1, PT_R15
  306. swi r16, r1, PT_R16
  307. swi r17, r1, PT_R17
  308. swi r18, r1, PT_R18
  309. swi r19, r1, PT_R19
  310. swi r20, r1, PT_R20
  311. swi r21, r1, PT_R21
  312. swi r22, r1, PT_R22
  313. swi r23, r1, PT_R23
  314. swi r24, r1, PT_R24
  315. swi r25, r1, PT_R25
  316. swi r26, r1, PT_R26
  317. swi r27, r1, PT_R27
  318. swi r28, r1, PT_R28
  319. swi r29, r1, PT_R29
  320. swi r30, r1, PT_R30
  321. swi r31, r1, PT_R31
  322. disable_irq
  323. nop /* make sure IE bit is in effect */
  324. clear_bip /* once IE is in effect it is safe to clear BIP */
  325. nop
  326. /* special purpose registers */
  327. mfs r11, rmsr
  328. swi r11, r1, PT_MSR
  329. mfs r11, rear
  330. swi r11, r1, PT_EAR
  331. mfs r11, resr
  332. swi r11, r1, PT_ESR
  333. mfs r11, rfsr
  334. swi r11, r1, PT_FSR
  335. /* reload original stack pointer and save it */
  336. lwi r11, r0, PER_CPU(ENTRY_SP)
  337. swi r11, r1, PT_R1
  338. /* update mode indicator we are in kernel mode */
  339. addik r11, r0, 1
  340. swi r11, r0, PER_CPU(KM)
  341. /* restore r31 */
  342. lwi r31, r0, PER_CPU(CURRENT_SAVE)
  343. /* re-enable interrupts now we are in kernel mode */
  344. enable_irq
  345. addi r5, r0, SIGTRAP /* sending the trap signal */
  346. add r6, r0, r31 /* to current */
  347. bralid r15, send_sig
  348. add r7, r0, r0 /* 3rd param zero */
  349. /* Restore r3/r4 to work around how ret_to_user works */
  350. lwi r3, r1, PT_R3
  351. lwi r4, r1, PT_R4
  352. bri ret_to_user
  353. ENTRY(_break)
  354. bri 0
  355. /* struct task_struct *_switch_to(struct thread_info *prev,
  356. struct thread_info *next); */
  357. ENTRY(_switch_to)
  358. /* prepare return value */
  359. addk r3, r0, r31
  360. /* save registers in cpu_context */
  361. /* use r11 and r12, volatile registers, as temp register */
  362. addik r11, r5, TI_CPU_CONTEXT
  363. swi r1, r11, CC_R1
  364. swi r2, r11, CC_R2
  365. /* skip volatile registers.
  366. * they are saved on stack when we jumped to _switch_to() */
  367. /* dedicated registers */
  368. swi r13, r11, CC_R13
  369. swi r14, r11, CC_R14
  370. swi r15, r11, CC_R15
  371. swi r16, r11, CC_R16
  372. swi r17, r11, CC_R17
  373. swi r18, r11, CC_R18
  374. /* save non-volatile registers */
  375. swi r19, r11, CC_R19
  376. swi r20, r11, CC_R20
  377. swi r21, r11, CC_R21
  378. swi r22, r11, CC_R22
  379. swi r23, r11, CC_R23
  380. swi r24, r11, CC_R24
  381. swi r25, r11, CC_R25
  382. swi r26, r11, CC_R26
  383. swi r27, r11, CC_R27
  384. swi r28, r11, CC_R28
  385. swi r29, r11, CC_R29
  386. swi r30, r11, CC_R30
  387. /* special purpose registers */
  388. mfs r12, rmsr
  389. swi r12, r11, CC_MSR
  390. mfs r12, rear
  391. swi r12, r11, CC_EAR
  392. mfs r12, resr
  393. swi r12, r11, CC_ESR
  394. mfs r12, rfsr
  395. swi r12, r11, CC_FSR
  396. /* update r31, the current */
  397. lwi r31, r6, TI_TASK
  398. swi r31, r0, PER_CPU(CURRENT_SAVE)
  399. /* get new process' cpu context and restore */
  400. addik r11, r6, TI_CPU_CONTEXT
  401. /* special purpose registers */
  402. lwi r12, r11, CC_FSR
  403. mts rfsr, r12
  404. lwi r12, r11, CC_ESR
  405. mts resr, r12
  406. lwi r12, r11, CC_EAR
  407. mts rear, r12
  408. lwi r12, r11, CC_MSR
  409. mts rmsr, r12
  410. /* non-volatile registers */
  411. lwi r30, r11, CC_R30
  412. lwi r29, r11, CC_R29
  413. lwi r28, r11, CC_R28
  414. lwi r27, r11, CC_R27
  415. lwi r26, r11, CC_R26
  416. lwi r25, r11, CC_R25
  417. lwi r24, r11, CC_R24
  418. lwi r23, r11, CC_R23
  419. lwi r22, r11, CC_R22
  420. lwi r21, r11, CC_R21
  421. lwi r20, r11, CC_R20
  422. lwi r19, r11, CC_R19
  423. /* dedicated registers */
  424. lwi r18, r11, CC_R18
  425. lwi r17, r11, CC_R17
  426. lwi r16, r11, CC_R16
  427. lwi r15, r11, CC_R15
  428. lwi r14, r11, CC_R14
  429. lwi r13, r11, CC_R13
  430. /* skip volatile registers */
  431. lwi r2, r11, CC_R2
  432. lwi r1, r11, CC_R1
  433. rtsd r15, 8
  434. nop
  435. ENTRY(ret_from_fork)
  436. addk r5, r0, r3
  437. addk r6, r0, r1
  438. brlid r15, schedule_tail
  439. nop
  440. swi r31, r1, PT_R31 /* save r31 in user context. */
  441. /* will soon be restored to r31 in ret_to_user */
  442. addk r3, r0, r0
  443. brid ret_to_user
  444. nop
  445. work_pending:
  446. enable_irq
  447. andi r11, r19, _TIF_NEED_RESCHED
  448. beqi r11, 1f
  449. bralid r15, schedule
  450. nop
  451. 1: andi r11, r19, _TIF_SIGPENDING
  452. beqi r11, no_work_pending
  453. addk r5, r1, r0
  454. addik r7, r0, 1
  455. bralid r15, do_signal
  456. addk r6, r0, r0
  457. bri no_work_pending
  458. ENTRY(ret_to_user)
  459. disable_irq
  460. swi r4, r1, PT_R4 /* return val */
  461. swi r3, r1, PT_R3 /* return val */
  462. lwi r6, r31, TS_THREAD_INFO /* get thread info */
  463. lwi r19, r6, TI_FLAGS /* get flags in thread info */
  464. bnei r19, work_pending /* do an extra work if any bits are set */
  465. no_work_pending:
  466. disable_irq
  467. /* save r31 */
  468. swi r31, r0, PER_CPU(CURRENT_SAVE)
  469. /* save mode indicator */
  470. lwi r18, r1, PT_MODE
  471. swi r18, r0, PER_CPU(KM)
  472. //restore_context:
  473. /* special purpose registers */
  474. lwi r18, r1, PT_FSR
  475. mts rfsr, r18
  476. lwi r18, r1, PT_ESR
  477. mts resr, r18
  478. lwi r18, r1, PT_EAR
  479. mts rear, r18
  480. lwi r18, r1, PT_MSR
  481. mts rmsr, r18
  482. lwi r31, r1, PT_R31
  483. lwi r30, r1, PT_R30
  484. lwi r29, r1, PT_R29
  485. lwi r28, r1, PT_R28
  486. lwi r27, r1, PT_R27
  487. lwi r26, r1, PT_R26
  488. lwi r25, r1, PT_R25
  489. lwi r24, r1, PT_R24
  490. lwi r23, r1, PT_R23
  491. lwi r22, r1, PT_R22
  492. lwi r21, r1, PT_R21
  493. lwi r20, r1, PT_R20
  494. lwi r19, r1, PT_R19
  495. lwi r18, r1, PT_R18
  496. lwi r17, r1, PT_R17
  497. lwi r16, r1, PT_R16
  498. lwi r15, r1, PT_R15
  499. lwi r14, r1, PT_PC
  500. lwi r13, r1, PT_R13
  501. lwi r12, r1, PT_R12
  502. lwi r11, r1, PT_R11
  503. lwi r10, r1, PT_R10
  504. lwi r9, r1, PT_R9
  505. lwi r8, r1, PT_R8
  506. lwi r7, r1, PT_R7
  507. lwi r6, r1, PT_R6
  508. lwi r5, r1, PT_R5
  509. lwi r4, r1, PT_R4 /* return val */
  510. lwi r3, r1, PT_R3 /* return val */
  511. lwi r2, r1, PT_R2
  512. lwi r1, r1, PT_R1
  513. rtid r14, 0
  514. nop
  515. sys_vfork:
  516. brid microblaze_vfork
  517. addk r5, r1, r0
  518. sys_clone:
  519. brid microblaze_clone
  520. addk r7, r1, r0
  521. sys_execve:
  522. brid microblaze_execve
  523. addk r8, r1, r0
  524. sys_rt_sigreturn_wrapper:
  525. brid sys_rt_sigreturn
  526. addk r5, r1, r0
  527. sys_rt_sigsuspend_wrapper:
  528. brid sys_rt_sigsuspend
  529. addk r7, r1, r0
  530. /* Interrupt vector table */
  531. .section .init.ivt, "ax"
  532. .org 0x0
  533. brai _reset
  534. brai _user_exception
  535. brai _interrupt
  536. brai _break
  537. brai _hw_exception_handler
  538. .org 0x60
  539. brai _debug_exception
  540. .section .rodata,"a"
  541. #include "syscall_table.S"
  542. syscall_table_size=(.-sys_call_table)
  543. type_SYSCALL:
  544. .ascii "SYSCALL\0"
  545. type_IRQ:
  546. .ascii "IRQ\0"
  547. type_IRQ_PREEMPT:
  548. .ascii "IRQ (PREEMPTED)\0"
  549. type_SYSCALL_PREEMPT:
  550. .ascii " SYSCALL (PREEMPTED)\0"
  551. /*
  552. * Trap decoding for stack unwinder
  553. * Tuples are (start addr, end addr, string)
  554. * If return address lies on [start addr, end addr],
  555. * unwinder displays 'string'
  556. */
  557. .align 4
  558. .global microblaze_trap_handlers
  559. microblaze_trap_handlers:
  560. /* Exact matches come first */
  561. .word ret_to_user ; .word ret_to_user ; .word type_SYSCALL
  562. .word ret_from_intr; .word ret_from_intr ; .word type_IRQ
  563. /* Fuzzy matches go here */
  564. .word ret_from_intr; .word no_intr_resched; .word type_IRQ_PREEMPT
  565. .word work_pending ; .word no_work_pending; .word type_SYSCALL_PREEMPT
  566. /* End of table */
  567. .word 0 ; .word 0 ; .word 0