entry.S 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041
  1. /*
  2. * Low-level system-call handling, trap handlers and context-switching
  3. *
  4. * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
  5. * Copyright (C) 2008-2009 PetaLogix
  6. * Copyright (C) 2003 John Williams <jwilliams@itee.uq.edu.au>
  7. * Copyright (C) 2001,2002 NEC Corporation
  8. * Copyright (C) 2001,2002 Miles Bader <miles@gnu.org>
  9. *
  10. * This file is subject to the terms and conditions of the GNU General
  11. * Public License. See the file COPYING in the main directory of this
  12. * archive for more details.
  13. *
  14. * Written by Miles Bader <miles@gnu.org>
  15. * Heavily modified by John Williams for Microblaze
  16. */
  17. #include <linux/sys.h>
  18. #include <linux/linkage.h>
  19. #include <asm/entry.h>
  20. #include <asm/current.h>
  21. #include <asm/processor.h>
  22. #include <asm/exceptions.h>
  23. #include <asm/asm-offsets.h>
  24. #include <asm/thread_info.h>
  25. #include <asm/page.h>
  26. #include <asm/unistd.h>
  27. #include <linux/errno.h>
  28. #include <asm/signal.h>
  29. #undef DEBUG
  30. #ifdef DEBUG
  31. /* Create space for syscalls counting. */
  32. .section .data
  33. .global syscall_debug_table
  34. .align 4
  35. syscall_debug_table:
  36. .space (__NR_syscalls * 4)
  37. #endif /* DEBUG */
  38. #define C_ENTRY(name) .globl name; .align 4; name
  39. /*
  40. * Various ways of setting and clearing BIP in flags reg.
  41. * This is mucky, but necessary using microblaze version that
  42. * allows msr ops to write to BIP
  43. */
  44. #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
  45. .macro clear_bip
  46. msrclr r0, MSR_BIP
  47. .endm
  48. .macro set_bip
  49. msrset r0, MSR_BIP
  50. .endm
  51. .macro clear_eip
  52. msrclr r0, MSR_EIP
  53. .endm
  54. .macro set_ee
  55. msrset r0, MSR_EE
  56. .endm
  57. .macro disable_irq
  58. msrclr r0, MSR_IE
  59. .endm
  60. .macro enable_irq
  61. msrset r0, MSR_IE
  62. .endm
  63. .macro set_ums
  64. msrset r0, MSR_UMS
  65. msrclr r0, MSR_VMS
  66. .endm
  67. .macro set_vms
  68. msrclr r0, MSR_UMS
  69. msrset r0, MSR_VMS
  70. .endm
  71. .macro clear_ums
  72. msrclr r0, MSR_UMS
  73. .endm
  74. .macro clear_vms_ums
  75. msrclr r0, MSR_VMS | MSR_UMS
  76. .endm
  77. #else
  78. .macro clear_bip
  79. mfs r11, rmsr
  80. andi r11, r11, ~MSR_BIP
  81. mts rmsr, r11
  82. .endm
  83. .macro set_bip
  84. mfs r11, rmsr
  85. ori r11, r11, MSR_BIP
  86. mts rmsr, r11
  87. .endm
  88. .macro clear_eip
  89. mfs r11, rmsr
  90. andi r11, r11, ~MSR_EIP
  91. mts rmsr, r11
  92. .endm
  93. .macro set_ee
  94. mfs r11, rmsr
  95. ori r11, r11, MSR_EE
  96. mts rmsr, r11
  97. .endm
  98. .macro disable_irq
  99. mfs r11, rmsr
  100. andi r11, r11, ~MSR_IE
  101. mts rmsr, r11
  102. .endm
  103. .macro enable_irq
  104. mfs r11, rmsr
  105. ori r11, r11, MSR_IE
  106. mts rmsr, r11
  107. .endm
  108. .macro set_ums
  109. mfs r11, rmsr
  110. ori r11, r11, MSR_VMS
  111. andni r11, r11, MSR_UMS
  112. mts rmsr, r11
  113. .endm
  114. .macro set_vms
  115. mfs r11, rmsr
  116. ori r11, r11, MSR_VMS
  117. andni r11, r11, MSR_UMS
  118. mts rmsr, r11
  119. .endm
  120. .macro clear_ums
  121. mfs r11, rmsr
  122. andni r11, r11, MSR_UMS
  123. mts rmsr,r11
  124. .endm
  125. .macro clear_vms_ums
  126. mfs r11, rmsr
  127. andni r11, r11, (MSR_VMS|MSR_UMS)
  128. mts rmsr,r11
  129. .endm
  130. #endif
  131. /* Define how to call high-level functions. With MMU, virtual mode must be
  132. * enabled when calling the high-level function. Clobbers R11.
  133. * VM_ON, VM_OFF, DO_JUMP_BIPCLR, DO_CALL
  134. */
  135. /* turn on virtual protected mode save */
  136. #define VM_ON \
  137. set_ums; \
  138. rted r0, 2f; \
  139. nop; \
  140. 2:
  141. /* turn off virtual protected mode save and user mode save*/
  142. #define VM_OFF \
  143. clear_vms_ums; \
  144. rted r0, TOPHYS(1f); \
  145. nop; \
  146. 1:
  147. #define SAVE_REGS \
  148. swi r2, r1, PT_R2; /* Save SDA */ \
  149. swi r3, r1, PT_R3; \
  150. swi r4, r1, PT_R4; \
  151. swi r5, r1, PT_R5; \
  152. swi r6, r1, PT_R6; \
  153. swi r7, r1, PT_R7; \
  154. swi r8, r1, PT_R8; \
  155. swi r9, r1, PT_R9; \
  156. swi r10, r1, PT_R10; \
  157. swi r11, r1, PT_R11; /* save clobbered regs after rval */\
  158. swi r12, r1, PT_R12; \
  159. swi r13, r1, PT_R13; /* Save SDA2 */ \
  160. swi r14, r1, PT_PC; /* PC, before IRQ/trap */ \
  161. swi r15, r1, PT_R15; /* Save LP */ \
  162. swi r16, r1, PT_R16; \
  163. swi r17, r1, PT_R17; \
  164. swi r18, r1, PT_R18; /* Save asm scratch reg */ \
  165. swi r19, r1, PT_R19; \
  166. swi r20, r1, PT_R20; \
  167. swi r21, r1, PT_R21; \
  168. swi r22, r1, PT_R22; \
  169. swi r23, r1, PT_R23; \
  170. swi r24, r1, PT_R24; \
  171. swi r25, r1, PT_R25; \
  172. swi r26, r1, PT_R26; \
  173. swi r27, r1, PT_R27; \
  174. swi r28, r1, PT_R28; \
  175. swi r29, r1, PT_R29; \
  176. swi r30, r1, PT_R30; \
  177. swi r31, r1, PT_R31; /* Save current task reg */ \
  178. mfs r11, rmsr; /* save MSR */ \
  179. swi r11, r1, PT_MSR;
  180. #define RESTORE_REGS \
  181. lwi r11, r1, PT_MSR; \
  182. mts rmsr , r11; \
  183. lwi r2, r1, PT_R2; /* restore SDA */ \
  184. lwi r3, r1, PT_R3; \
  185. lwi r4, r1, PT_R4; \
  186. lwi r5, r1, PT_R5; \
  187. lwi r6, r1, PT_R6; \
  188. lwi r7, r1, PT_R7; \
  189. lwi r8, r1, PT_R8; \
  190. lwi r9, r1, PT_R9; \
  191. lwi r10, r1, PT_R10; \
  192. lwi r11, r1, PT_R11; /* restore clobbered regs after rval */\
  193. lwi r12, r1, PT_R12; \
  194. lwi r13, r1, PT_R13; /* restore SDA2 */ \
  195. lwi r14, r1, PT_PC; /* RESTORE_LINK PC, before IRQ/trap */\
  196. lwi r15, r1, PT_R15; /* restore LP */ \
  197. lwi r16, r1, PT_R16; \
  198. lwi r17, r1, PT_R17; \
  199. lwi r18, r1, PT_R18; /* restore asm scratch reg */ \
  200. lwi r19, r1, PT_R19; \
  201. lwi r20, r1, PT_R20; \
  202. lwi r21, r1, PT_R21; \
  203. lwi r22, r1, PT_R22; \
  204. lwi r23, r1, PT_R23; \
  205. lwi r24, r1, PT_R24; \
  206. lwi r25, r1, PT_R25; \
  207. lwi r26, r1, PT_R26; \
  208. lwi r27, r1, PT_R27; \
  209. lwi r28, r1, PT_R28; \
  210. lwi r29, r1, PT_R29; \
  211. lwi r30, r1, PT_R30; \
  212. lwi r31, r1, PT_R31; /* Restore cur task reg */
  213. #define SAVE_STATE \
  214. swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* save stack */ \
  215. /* See if already in kernel mode.*/ \
  216. mfs r1, rmsr; \
  217. andi r1, r1, MSR_UMS; \
  218. bnei r1, 1f; \
  219. /* Kernel-mode state save. */ \
  220. /* Reload kernel stack-ptr. */ \
  221. lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
  222. /* FIXME: I can add these two lines to one */ \
  223. /* tophys(r1,r1); */ \
  224. /* addik r1, r1, -PT_SIZE; */ \
  225. addik r1, r1, CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - PT_SIZE; \
  226. SAVE_REGS \
  227. brid 2f; \
  228. swi r1, r1, PT_MODE; \
  229. 1: /* User-mode state save. */ \
  230. lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
  231. tophys(r1,r1); \
  232. lwi r1, r1, TS_THREAD_INFO; /* get the thread info */ \
  233. /* MS these three instructions can be added to one */ \
  234. /* addik r1, r1, THREAD_SIZE; */ \
  235. /* tophys(r1,r1); */ \
  236. /* addik r1, r1, -PT_SIZE; */ \
  237. addik r1, r1, THREAD_SIZE + CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - PT_SIZE; \
  238. SAVE_REGS \
  239. lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
  240. swi r11, r1, PT_R1; /* Store user SP. */ \
  241. swi r0, r1, PT_MODE; /* Was in user-mode. */ \
  242. /* MS: I am clearing UMS even in case when I come from kernel space */ \
  243. clear_ums; \
  244. 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
  245. .text
  246. /*
  247. * User trap.
  248. *
  249. * System calls are handled here.
  250. *
  251. * Syscall protocol:
  252. * Syscall number in r12, args in r5-r10
  253. * Return value in r3
  254. *
  255. * Trap entered via brki instruction, so BIP bit is set, and interrupts
  256. * are masked. This is nice, means we don't have to CLI before state save
  257. */
  258. C_ENTRY(_user_exception):
  259. swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
  260. addi r14, r14, 4 /* return address is 4 byte after call */
  261. mfs r1, rmsr
  262. nop
  263. andi r1, r1, MSR_UMS
  264. bnei r1, 1f
  265. /* Kernel-mode state save - kernel execve */
  266. lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
  267. tophys(r1,r1);
  268. addik r1, r1, -PT_SIZE; /* Make room on the stack. */
  269. SAVE_REGS
  270. swi r1, r1, PT_MODE; /* pt_regs -> kernel mode */
  271. brid 2f;
  272. nop; /* Fill delay slot */
  273. /* User-mode state save. */
  274. 1:
  275. lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
  276. tophys(r1,r1);
  277. lwi r1, r1, TS_THREAD_INFO; /* get stack from task_struct */
  278. /* calculate kernel stack pointer from task struct 8k */
  279. addik r1, r1, THREAD_SIZE;
  280. tophys(r1,r1);
  281. addik r1, r1, -PT_SIZE; /* Make room on the stack. */
  282. SAVE_REGS
  283. swi r0, r1, PT_R3
  284. swi r0, r1, PT_R4
  285. swi r0, r1, PT_MODE; /* Was in user-mode. */
  286. lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
  287. swi r11, r1, PT_R1; /* Store user SP. */
  288. clear_ums;
  289. 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
  290. /* Save away the syscall number. */
  291. swi r12, r1, PT_R0;
  292. tovirt(r1,r1)
  293. /* where the trap should return need -8 to adjust for rtsd r15, 8*/
  294. /* Jump to the appropriate function for the system call number in r12
  295. * (r12 is not preserved), or return an error if r12 is not valid. The LP
  296. * register should point to the location where
  297. * the called function should return. [note that MAKE_SYS_CALL uses label 1] */
  298. /* Step into virtual mode */
  299. rtbd r0, 3f
  300. nop
  301. 3:
  302. lwi r11, CURRENT_TASK, TS_THREAD_INFO /* get thread info */
  303. lwi r11, r11, TI_FLAGS /* get flags in thread info */
  304. andi r11, r11, _TIF_WORK_SYSCALL_MASK
  305. beqi r11, 4f
  306. addik r3, r0, -ENOSYS
  307. swi r3, r1, PT_R3
  308. brlid r15, do_syscall_trace_enter
  309. addik r5, r1, PT_R0
  310. # do_syscall_trace_enter returns the new syscall nr.
  311. addk r12, r0, r3
  312. lwi r5, r1, PT_R5;
  313. lwi r6, r1, PT_R6;
  314. lwi r7, r1, PT_R7;
  315. lwi r8, r1, PT_R8;
  316. lwi r9, r1, PT_R9;
  317. lwi r10, r1, PT_R10;
  318. 4:
  319. /* Jump to the appropriate function for the system call number in r12
  320. * (r12 is not preserved), or return an error if r12 is not valid.
  321. * The LP register should point to the location where the called function
  322. * should return. [note that MAKE_SYS_CALL uses label 1] */
  323. /* See if the system call number is valid */
  324. addi r11, r12, -__NR_syscalls;
  325. bgei r11,5f;
  326. /* Figure out which function to use for this system call. */
  327. /* Note Microblaze barrel shift is optional, so don't rely on it */
  328. add r12, r12, r12; /* convert num -> ptr */
  329. add r12, r12, r12;
  330. #ifdef DEBUG
  331. /* Trac syscalls and stored them to syscall_debug_table */
  332. /* The first syscall location stores total syscall number */
  333. lwi r3, r0, syscall_debug_table
  334. addi r3, r3, 1
  335. swi r3, r0, syscall_debug_table
  336. lwi r3, r12, syscall_debug_table
  337. addi r3, r3, 1
  338. swi r3, r12, syscall_debug_table
  339. #endif
  340. # Find and jump into the syscall handler.
  341. lwi r12, r12, sys_call_table
  342. /* where the trap should return need -8 to adjust for rtsd r15, 8 */
  343. addi r15, r0, ret_from_trap-8
  344. bra r12
  345. /* The syscall number is invalid, return an error. */
  346. 5:
  347. rtsd r15, 8; /* looks like a normal subroutine return */
  348. addi r3, r0, -ENOSYS;
  349. /* Entry point used to return from a syscall/trap */
  350. /* We re-enable BIP bit before state restore */
  351. C_ENTRY(ret_from_trap):
  352. swi r3, r1, PT_R3
  353. swi r4, r1, PT_R4
  354. lwi r11, r1, PT_MODE;
  355. /* See if returning to kernel mode, if so, skip resched &c. */
  356. bnei r11, 2f;
  357. /* We're returning to user mode, so check for various conditions that
  358. * trigger rescheduling. */
  359. /* FIXME: Restructure all these flag checks. */
  360. lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
  361. lwi r11, r11, TI_FLAGS; /* get flags in thread info */
  362. andi r11, r11, _TIF_WORK_SYSCALL_MASK
  363. beqi r11, 1f
  364. brlid r15, do_syscall_trace_leave
  365. addik r5, r1, PT_R0
  366. 1:
  367. /* We're returning to user mode, so check for various conditions that
  368. * trigger rescheduling. */
  369. /* get thread info from current task */
  370. lwi r11, CURRENT_TASK, TS_THREAD_INFO;
  371. lwi r11, r11, TI_FLAGS; /* get flags in thread info */
  372. andi r11, r11, _TIF_NEED_RESCHED;
  373. beqi r11, 5f;
  374. bralid r15, schedule; /* Call scheduler */
  375. nop; /* delay slot */
  376. /* Maybe handle a signal */
  377. 5: /* get thread info from current task*/
  378. lwi r11, CURRENT_TASK, TS_THREAD_INFO;
  379. lwi r11, r11, TI_FLAGS; /* get flags in thread info */
  380. andi r11, r11, _TIF_SIGPENDING;
  381. beqi r11, 1f; /* Signals to handle, handle them */
  382. addik r5, r1, 0; /* Arg 1: struct pt_regs *regs */
  383. addi r7, r0, 1; /* Arg 3: int in_syscall */
  384. bralid r15, do_signal; /* Handle any signals */
  385. add r6, r0, r0; /* Arg 2: sigset_t *oldset */
  386. /* Finally, return to user state. */
  387. 1: set_bip; /* Ints masked for state restore */
  388. swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
  389. VM_OFF;
  390. tophys(r1,r1);
  391. RESTORE_REGS;
  392. addik r1, r1, PT_SIZE /* Clean up stack space. */
  393. lwi r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */
  394. bri 6f;
  395. /* Return to kernel state. */
  396. 2: set_bip; /* Ints masked for state restore */
  397. VM_OFF;
  398. tophys(r1,r1);
  399. RESTORE_REGS;
  400. addik r1, r1, PT_SIZE /* Clean up stack space. */
  401. tovirt(r1,r1);
  402. 6:
  403. TRAP_return: /* Make global symbol for debugging */
  404. rtbd r14, 0; /* Instructions to return from an IRQ */
  405. nop;
  406. /* These syscalls need access to the struct pt_regs on the stack, so we
  407. implement them in assembly (they're basically all wrappers anyway). */
  408. C_ENTRY(sys_fork_wrapper):
  409. addi r5, r0, SIGCHLD /* Arg 0: flags */
  410. lwi r6, r1, PT_R1 /* Arg 1: child SP (use parent's) */
  411. addik r7, r1, 0 /* Arg 2: parent context */
  412. add r8. r0, r0 /* Arg 3: (unused) */
  413. add r9, r0, r0; /* Arg 4: (unused) */
  414. brid do_fork /* Do real work (tail-call) */
  415. add r10, r0, r0; /* Arg 5: (unused) */
  416. /* This the initial entry point for a new child thread, with an appropriate
  417. stack in place that makes it look the the child is in the middle of an
  418. syscall. This function is actually `returned to' from switch_thread
  419. (copy_thread makes ret_from_fork the return address in each new thread's
  420. saved context). */
  421. C_ENTRY(ret_from_fork):
  422. bralid r15, schedule_tail; /* ...which is schedule_tail's arg */
  423. add r3, r5, r0; /* switch_thread returns the prev task */
  424. /* ( in the delay slot ) */
  425. brid ret_from_trap; /* Do normal trap return */
  426. add r3, r0, r0; /* Child's fork call should return 0. */
  427. C_ENTRY(sys_vfork):
  428. brid microblaze_vfork /* Do real work (tail-call) */
  429. addik r5, r1, 0
  430. C_ENTRY(sys_clone):
  431. bnei r6, 1f; /* See if child SP arg (arg 1) is 0. */
  432. lwi r6, r1, PT_R1; /* If so, use paret's stack ptr */
  433. 1: addik r7, r1, 0; /* Arg 2: parent context */
  434. add r8, r0, r0; /* Arg 3: (unused) */
  435. add r9, r0, r0; /* Arg 4: (unused) */
  436. brid do_fork /* Do real work (tail-call) */
  437. add r10, r0, r0; /* Arg 5: (unused) */
  438. C_ENTRY(sys_execve):
  439. brid microblaze_execve; /* Do real work (tail-call).*/
  440. addik r8, r1, 0; /* add user context as 4th arg */
  441. C_ENTRY(sys_rt_sigreturn_wrapper):
  442. brid sys_rt_sigreturn /* Do real work */
  443. addik r5, r1, 0; /* add user context as 1st arg */
  444. /*
  445. * HW EXCEPTION rutine start
  446. */
  447. C_ENTRY(full_exception_trap):
  448. /* adjust exception address for privileged instruction
  449. * for finding where is it */
  450. addik r17, r17, -4
  451. SAVE_STATE /* Save registers */
  452. /* PC, before IRQ/trap - this is one instruction above */
  453. swi r17, r1, PT_PC;
  454. tovirt(r1,r1)
  455. /* FIXME this can be store directly in PT_ESR reg.
  456. * I tested it but there is a fault */
  457. /* where the trap should return need -8 to adjust for rtsd r15, 8 */
  458. addik r15, r0, ret_from_exc - 8
  459. mfs r6, resr
  460. mfs r7, rfsr; /* save FSR */
  461. mts rfsr, r0; /* Clear sticky fsr */
  462. rted r0, full_exception
  463. addik r5, r1, 0 /* parameter struct pt_regs * regs */
  464. /*
  465. * Unaligned data trap.
  466. *
  467. * Unaligned data trap last on 4k page is handled here.
  468. *
  469. * Trap entered via exception, so EE bit is set, and interrupts
  470. * are masked. This is nice, means we don't have to CLI before state save
  471. *
  472. * The assembler routine is in "arch/microblaze/kernel/hw_exception_handler.S"
  473. */
  474. C_ENTRY(unaligned_data_trap):
  475. /* MS: I have to save r11 value and then restore it because
  476. * set_bit, clear_eip, set_ee use r11 as temp register if MSR
  477. * instructions are not used. We don't need to do if MSR instructions
  478. * are used and they use r0 instead of r11.
  479. * I am using ENTRY_SP which should be primary used only for stack
  480. * pointer saving. */
  481. swi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
  482. set_bip; /* equalize initial state for all possible entries */
  483. clear_eip;
  484. set_ee;
  485. lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
  486. SAVE_STATE /* Save registers.*/
  487. /* PC, before IRQ/trap - this is one instruction above */
  488. swi r17, r1, PT_PC;
  489. tovirt(r1,r1)
  490. /* where the trap should return need -8 to adjust for rtsd r15, 8 */
  491. addik r15, r0, ret_from_exc-8
  492. mfs r3, resr /* ESR */
  493. mfs r4, rear /* EAR */
  494. rtbd r0, _unaligned_data_exception
  495. addik r7, r1, 0 /* parameter struct pt_regs * regs */
  496. /*
  497. * Page fault traps.
  498. *
  499. * If the real exception handler (from hw_exception_handler.S) didn't find
  500. * the mapping for the process, then we're thrown here to handle such situation.
  501. *
  502. * Trap entered via exceptions, so EE bit is set, and interrupts
  503. * are masked. This is nice, means we don't have to CLI before state save
  504. *
  505. * Build a standard exception frame for TLB Access errors. All TLB exceptions
  506. * will bail out to this point if they can't resolve the lightweight TLB fault.
  507. *
  508. * The C function called is in "arch/microblaze/mm/fault.c", declared as:
  509. * void do_page_fault(struct pt_regs *regs,
  510. * unsigned long address,
  511. * unsigned long error_code)
  512. */
  513. /* data and intruction trap - which is choose is resolved int fault.c */
  514. C_ENTRY(page_fault_data_trap):
  515. SAVE_STATE /* Save registers.*/
  516. /* PC, before IRQ/trap - this is one instruction above */
  517. swi r17, r1, PT_PC;
  518. tovirt(r1,r1)
  519. /* where the trap should return need -8 to adjust for rtsd r15, 8 */
  520. addik r15, r0, ret_from_exc-8
  521. mfs r6, rear /* parameter unsigned long address */
  522. mfs r7, resr /* parameter unsigned long error_code */
  523. rted r0, do_page_fault
  524. addik r5, r1, 0 /* parameter struct pt_regs * regs */
  525. C_ENTRY(page_fault_instr_trap):
  526. SAVE_STATE /* Save registers.*/
  527. /* PC, before IRQ/trap - this is one instruction above */
  528. swi r17, r1, PT_PC;
  529. tovirt(r1,r1)
  530. /* where the trap should return need -8 to adjust for rtsd r15, 8 */
  531. addik r15, r0, ret_from_exc-8
  532. mfs r6, rear /* parameter unsigned long address */
  533. ori r7, r0, 0 /* parameter unsigned long error_code */
  534. rted r0, do_page_fault
  535. addik r5, r1, 0 /* parameter struct pt_regs * regs */
  536. /* Entry point used to return from an exception. */
  537. C_ENTRY(ret_from_exc):
  538. lwi r11, r1, PT_MODE;
  539. bnei r11, 2f; /* See if returning to kernel mode, */
  540. /* ... if so, skip resched &c. */
  541. /* We're returning to user mode, so check for various conditions that
  542. trigger rescheduling. */
  543. lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
  544. lwi r11, r11, TI_FLAGS; /* get flags in thread info */
  545. andi r11, r11, _TIF_NEED_RESCHED;
  546. beqi r11, 5f;
  547. /* Call the scheduler before returning from a syscall/trap. */
  548. bralid r15, schedule; /* Call scheduler */
  549. nop; /* delay slot */
  550. /* Maybe handle a signal */
  551. 5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
  552. lwi r11, r11, TI_FLAGS; /* get flags in thread info */
  553. andi r11, r11, _TIF_SIGPENDING;
  554. beqi r11, 1f; /* Signals to handle, handle them */
  555. /*
  556. * Handle a signal return; Pending signals should be in r18.
  557. *
  558. * Not all registers are saved by the normal trap/interrupt entry
  559. * points (for instance, call-saved registers (because the normal
  560. * C-compiler calling sequence in the kernel makes sure they're
  561. * preserved), and call-clobbered registers in the case of
  562. * traps), but signal handlers may want to examine or change the
  563. * complete register state. Here we save anything not saved by
  564. * the normal entry sequence, so that it may be safely restored
  565. * (in a possibly modified form) after do_signal returns. */
  566. addik r5, r1, 0; /* Arg 1: struct pt_regs *regs */
  567. addi r7, r0, 0; /* Arg 3: int in_syscall */
  568. bralid r15, do_signal; /* Handle any signals */
  569. add r6, r0, r0; /* Arg 2: sigset_t *oldset */
  570. /* Finally, return to user state. */
  571. 1: set_bip; /* Ints masked for state restore */
  572. swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
  573. VM_OFF;
  574. tophys(r1,r1);
  575. RESTORE_REGS;
  576. addik r1, r1, PT_SIZE /* Clean up stack space. */
  577. lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer. */
  578. bri 6f;
  579. /* Return to kernel state. */
  580. 2: set_bip; /* Ints masked for state restore */
  581. VM_OFF;
  582. tophys(r1,r1);
  583. RESTORE_REGS;
  584. addik r1, r1, PT_SIZE /* Clean up stack space. */
  585. tovirt(r1,r1);
  586. 6:
  587. EXC_return: /* Make global symbol for debugging */
  588. rtbd r14, 0; /* Instructions to return from an IRQ */
  589. nop;
  590. /*
  591. * HW EXCEPTION rutine end
  592. */
  593. /*
  594. * Hardware maskable interrupts.
  595. *
  596. * The stack-pointer (r1) should have already been saved to the memory
  597. * location PER_CPU(ENTRY_SP).
  598. */
  599. C_ENTRY(_interrupt):
  600. /* MS: we are in physical address */
  601. /* Save registers, switch to proper stack, convert SP to virtual.*/
  602. swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
  603. /* MS: See if already in kernel mode. */
  604. mfs r1, rmsr
  605. nop
  606. andi r1, r1, MSR_UMS
  607. bnei r1, 1f
  608. /* Kernel-mode state save. */
  609. lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
  610. tophys(r1,r1); /* MS: I have in r1 physical address where stack is */
  611. /* save registers */
  612. /* MS: Make room on the stack -> activation record */
  613. addik r1, r1, -PT_SIZE;
  614. SAVE_REGS
  615. brid 2f;
  616. swi r1, r1, PT_MODE; /* 0 - user mode, 1 - kernel mode */
  617. 1:
  618. /* User-mode state save. */
  619. /* MS: get the saved current */
  620. lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
  621. tophys(r1,r1);
  622. lwi r1, r1, TS_THREAD_INFO;
  623. addik r1, r1, THREAD_SIZE;
  624. tophys(r1,r1);
  625. /* save registers */
  626. addik r1, r1, -PT_SIZE;
  627. SAVE_REGS
  628. /* calculate mode */
  629. swi r0, r1, PT_MODE;
  630. lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
  631. swi r11, r1, PT_R1;
  632. clear_ums;
  633. 2:
  634. lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
  635. tovirt(r1,r1)
  636. addik r15, r0, irq_call;
  637. irq_call:rtbd r0, do_IRQ;
  638. addik r5, r1, 0;
  639. /* MS: we are in virtual mode */
  640. ret_from_irq:
  641. lwi r11, r1, PT_MODE;
  642. bnei r11, 2f;
  643. lwi r11, CURRENT_TASK, TS_THREAD_INFO;
  644. lwi r11, r11, TI_FLAGS; /* MS: get flags from thread info */
  645. andi r11, r11, _TIF_NEED_RESCHED;
  646. beqi r11, 5f
  647. bralid r15, schedule;
  648. nop; /* delay slot */
  649. /* Maybe handle a signal */
  650. 5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* MS: get thread info */
  651. lwi r11, r11, TI_FLAGS; /* get flags in thread info */
  652. andi r11, r11, _TIF_SIGPENDING;
  653. beqid r11, no_intr_resched
  654. /* Handle a signal return; Pending signals should be in r18. */
  655. addi r7, r0, 0; /* Arg 3: int in_syscall */
  656. addik r5, r1, 0; /* Arg 1: struct pt_regs *regs */
  657. bralid r15, do_signal; /* Handle any signals */
  658. add r6, r0, r0; /* Arg 2: sigset_t *oldset */
  659. /* Finally, return to user state. */
  660. no_intr_resched:
  661. /* Disable interrupts, we are now committed to the state restore */
  662. disable_irq
  663. swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE);
  664. VM_OFF;
  665. tophys(r1,r1);
  666. RESTORE_REGS
  667. addik r1, r1, PT_SIZE /* MS: Clean up stack space. */
  668. lwi r1, r1, PT_R1 - PT_SIZE;
  669. bri 6f;
  670. /* MS: Return to kernel state. */
  671. 2:
  672. #ifdef CONFIG_PREEMPT
  673. lwi r11, CURRENT_TASK, TS_THREAD_INFO;
  674. /* MS: get preempt_count from thread info */
  675. lwi r5, r11, TI_PREEMPT_COUNT;
  676. bgti r5, restore;
  677. lwi r5, r11, TI_FLAGS; /* get flags in thread info */
  678. andi r5, r5, _TIF_NEED_RESCHED;
  679. beqi r5, restore /* if zero jump over */
  680. preempt:
  681. /* interrupts are off that's why I am calling preempt_chedule_irq */
  682. bralid r15, preempt_schedule_irq
  683. nop
  684. lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
  685. lwi r5, r11, TI_FLAGS; /* get flags in thread info */
  686. andi r5, r5, _TIF_NEED_RESCHED;
  687. bnei r5, preempt /* if non zero jump to resched */
  688. restore:
  689. #endif
  690. VM_OFF /* MS: turn off MMU */
  691. tophys(r1,r1)
  692. RESTORE_REGS
  693. addik r1, r1, PT_SIZE /* MS: Clean up stack space. */
  694. tovirt(r1,r1);
  695. 6:
  696. IRQ_return: /* MS: Make global symbol for debugging */
  697. rtid r14, 0
  698. nop
  699. /*
  700. * Debug trap for KGDB. Enter to _debug_exception by brki r16, 0x18
  701. * and call handling function with saved pt_regs
  702. */
  703. C_ENTRY(_debug_exception):
  704. /* BIP bit is set on entry, no interrupts can occur */
  705. swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
  706. mfs r1, rmsr
  707. nop
  708. andi r1, r1, MSR_UMS
  709. bnei r1, 1f
  710. /* MS: Kernel-mode state save - kgdb */
  711. lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
  712. /* BIP bit is set on entry, no interrupts can occur */
  713. addik r1, r1, CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - PT_SIZE;
  714. SAVE_REGS;
  715. /* save all regs to pt_reg structure */
  716. swi r0, r1, PT_R0; /* R0 must be saved too */
  717. swi r14, r1, PT_R14 /* rewrite saved R14 value */
  718. swi r16, r1, PT_PC; /* PC and r16 are the same */
  719. /* save special purpose registers to pt_regs */
  720. mfs r11, rear;
  721. swi r11, r1, PT_EAR;
  722. mfs r11, resr;
  723. swi r11, r1, PT_ESR;
  724. mfs r11, rfsr;
  725. swi r11, r1, PT_FSR;
  726. /* stack pointer is in physical address at it is decrease
  727. * by PT_SIZE but we need to get correct R1 value */
  728. addik r11, r1, CONFIG_KERNEL_START - CONFIG_KERNEL_BASE_ADDR + PT_SIZE;
  729. swi r11, r1, PT_R1
  730. /* MS: r31 - current pointer isn't changed */
  731. tovirt(r1,r1)
  732. #ifdef CONFIG_KGDB
  733. addi r5, r1, 0 /* pass pt_reg address as the first arg */
  734. addik r15, r0, dbtrap_call; /* return address */
  735. rtbd r0, microblaze_kgdb_break
  736. nop;
  737. #endif
  738. /* MS: Place handler for brki from kernel space if KGDB is OFF.
  739. * It is very unlikely that another brki instruction is called. */
  740. bri 0
  741. /* MS: User-mode state save - gdb */
  742. 1: lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
  743. tophys(r1,r1);
  744. lwi r1, r1, TS_THREAD_INFO; /* get the thread info */
  745. addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */
  746. tophys(r1,r1);
  747. addik r1, r1, -PT_SIZE; /* Make room on the stack. */
  748. SAVE_REGS;
  749. swi r16, r1, PT_PC; /* Save LP */
  750. swi r0, r1, PT_MODE; /* Was in user-mode. */
  751. lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
  752. swi r11, r1, PT_R1; /* Store user SP. */
  753. lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
  754. tovirt(r1,r1)
  755. set_vms;
  756. addik r5, r1, 0;
  757. addik r15, r0, dbtrap_call;
  758. dbtrap_call: /* Return point for kernel/user entry + 8 because of rtsd r15, 8 */
  759. rtbd r0, sw_exception
  760. nop
  761. /* MS: The first instruction for the second part of the gdb/kgdb */
  762. set_bip; /* Ints masked for state restore */
  763. lwi r11, r1, PT_MODE;
  764. bnei r11, 2f;
  765. /* MS: Return to user space - gdb */
  766. /* Get current task ptr into r11 */
  767. lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
  768. lwi r11, r11, TI_FLAGS; /* get flags in thread info */
  769. andi r11, r11, _TIF_NEED_RESCHED;
  770. beqi r11, 5f;
  771. /* Call the scheduler before returning from a syscall/trap. */
  772. bralid r15, schedule; /* Call scheduler */
  773. nop; /* delay slot */
  774. /* Maybe handle a signal */
  775. 5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
  776. lwi r11, r11, TI_FLAGS; /* get flags in thread info */
  777. andi r11, r11, _TIF_SIGPENDING;
  778. beqi r11, 1f; /* Signals to handle, handle them */
  779. addik r5, r1, 0; /* Arg 1: struct pt_regs *regs */
  780. addi r7, r0, 0; /* Arg 3: int in_syscall */
  781. bralid r15, do_signal; /* Handle any signals */
  782. add r6, r0, r0; /* Arg 2: sigset_t *oldset */
  783. /* Finally, return to user state. */
  784. 1: swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
  785. VM_OFF;
  786. tophys(r1,r1);
  787. /* MS: Restore all regs */
  788. RESTORE_REGS
  789. addik r1, r1, PT_SIZE /* Clean up stack space */
  790. lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer */
  791. DBTRAP_return_user: /* MS: Make global symbol for debugging */
  792. rtbd r16, 0; /* MS: Instructions to return from a debug trap */
  793. nop;
  794. /* MS: Return to kernel state - kgdb */
  795. 2: VM_OFF;
  796. tophys(r1,r1);
  797. /* MS: Restore all regs */
  798. RESTORE_REGS
  799. lwi r14, r1, PT_R14;
  800. lwi r16, r1, PT_PC;
  801. addik r1, r1, PT_SIZE; /* MS: Clean up stack space */
  802. tovirt(r1,r1);
  803. DBTRAP_return_kernel: /* MS: Make global symbol for debugging */
  804. rtbd r16, 0; /* MS: Instructions to return from a debug trap */
  805. nop;
  806. ENTRY(_switch_to)
  807. /* prepare return value */
  808. addk r3, r0, CURRENT_TASK
  809. /* save registers in cpu_context */
  810. /* use r11 and r12, volatile registers, as temp register */
  811. /* give start of cpu_context for previous process */
  812. addik r11, r5, TI_CPU_CONTEXT
  813. swi r1, r11, CC_R1
  814. swi r2, r11, CC_R2
  815. /* skip volatile registers.
  816. * they are saved on stack when we jumped to _switch_to() */
  817. /* dedicated registers */
  818. swi r13, r11, CC_R13
  819. swi r14, r11, CC_R14
  820. swi r15, r11, CC_R15
  821. swi r16, r11, CC_R16
  822. swi r17, r11, CC_R17
  823. swi r18, r11, CC_R18
  824. /* save non-volatile registers */
  825. swi r19, r11, CC_R19
  826. swi r20, r11, CC_R20
  827. swi r21, r11, CC_R21
  828. swi r22, r11, CC_R22
  829. swi r23, r11, CC_R23
  830. swi r24, r11, CC_R24
  831. swi r25, r11, CC_R25
  832. swi r26, r11, CC_R26
  833. swi r27, r11, CC_R27
  834. swi r28, r11, CC_R28
  835. swi r29, r11, CC_R29
  836. swi r30, r11, CC_R30
  837. /* special purpose registers */
  838. mfs r12, rmsr
  839. swi r12, r11, CC_MSR
  840. mfs r12, rear
  841. swi r12, r11, CC_EAR
  842. mfs r12, resr
  843. swi r12, r11, CC_ESR
  844. mfs r12, rfsr
  845. swi r12, r11, CC_FSR
  846. /* update r31, the current-give me pointer to task which will be next */
  847. lwi CURRENT_TASK, r6, TI_TASK
  848. /* stored it to current_save too */
  849. swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE)
  850. /* get new process' cpu context and restore */
  851. /* give me start where start context of next task */
  852. addik r11, r6, TI_CPU_CONTEXT
  853. /* non-volatile registers */
  854. lwi r30, r11, CC_R30
  855. lwi r29, r11, CC_R29
  856. lwi r28, r11, CC_R28
  857. lwi r27, r11, CC_R27
  858. lwi r26, r11, CC_R26
  859. lwi r25, r11, CC_R25
  860. lwi r24, r11, CC_R24
  861. lwi r23, r11, CC_R23
  862. lwi r22, r11, CC_R22
  863. lwi r21, r11, CC_R21
  864. lwi r20, r11, CC_R20
  865. lwi r19, r11, CC_R19
  866. /* dedicated registers */
  867. lwi r18, r11, CC_R18
  868. lwi r17, r11, CC_R17
  869. lwi r16, r11, CC_R16
  870. lwi r15, r11, CC_R15
  871. lwi r14, r11, CC_R14
  872. lwi r13, r11, CC_R13
  873. /* skip volatile registers */
  874. lwi r2, r11, CC_R2
  875. lwi r1, r11, CC_R1
  876. /* special purpose registers */
  877. lwi r12, r11, CC_FSR
  878. mts rfsr, r12
  879. lwi r12, r11, CC_MSR
  880. mts rmsr, r12
  881. rtsd r15, 8
  882. nop
  883. ENTRY(_reset)
  884. brai 0; /* Jump to reset vector */
  885. /* These are compiled and loaded into high memory, then
  886. * copied into place in mach_early_setup */
  887. .section .init.ivt, "ax"
  888. #if CONFIG_MANUAL_RESET_VECTOR
  889. .org 0x0
  890. brai CONFIG_MANUAL_RESET_VECTOR
  891. #endif
  892. .org 0x8
  893. brai TOPHYS(_user_exception); /* syscall handler */
  894. .org 0x10
  895. brai TOPHYS(_interrupt); /* Interrupt handler */
  896. .org 0x18
  897. brai TOPHYS(_debug_exception); /* debug trap handler */
  898. .org 0x20
  899. brai TOPHYS(_hw_exception_handler); /* HW exception handler */
  900. .section .rodata,"a"
  901. #include "syscall_table.S"
  902. syscall_table_size=(.-sys_call_table)
  903. type_SYSCALL:
  904. .ascii "SYSCALL\0"
  905. type_IRQ:
  906. .ascii "IRQ\0"
  907. type_IRQ_PREEMPT:
  908. .ascii "IRQ (PREEMPTED)\0"
  909. type_SYSCALL_PREEMPT:
  910. .ascii " SYSCALL (PREEMPTED)\0"
  911. /*
  912. * Trap decoding for stack unwinder
  913. * Tuples are (start addr, end addr, string)
  914. * If return address lies on [start addr, end addr],
  915. * unwinder displays 'string'
  916. */
  917. .align 4
  918. .global microblaze_trap_handlers
  919. microblaze_trap_handlers:
  920. /* Exact matches come first */
  921. .word ret_from_trap; .word ret_from_trap ; .word type_SYSCALL
  922. .word ret_from_irq ; .word ret_from_irq ; .word type_IRQ
  923. /* Fuzzy matches go here */
  924. .word ret_from_irq ; .word no_intr_resched ; .word type_IRQ_PREEMPT
  925. .word ret_from_trap; .word TRAP_return ; .word type_SYSCALL_PREEMPT
  926. /* End of table */
  927. .word 0 ; .word 0 ; .word 0