entry-avr32b.S 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876
  1. /*
  2. * Copyright (C) 2004-2006 Atmel Corporation
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. */
  8. /*
  9. * This file contains the low-level entry-points into the kernel, that is,
  10. * exception handlers, debug trap handlers, interrupt handlers and the
  11. * system call handler.
  12. */
  13. #include <linux/errno.h>
  14. #include <asm/asm.h>
  15. #include <asm/hardirq.h>
  16. #include <asm/irq.h>
  17. #include <asm/ocd.h>
  18. #include <asm/page.h>
  19. #include <asm/pgtable.h>
  20. #include <asm/ptrace.h>
  21. #include <asm/sysreg.h>
  22. #include <asm/thread_info.h>
  23. #include <asm/unistd.h>
  24. #ifdef CONFIG_PREEMPT
  25. # define preempt_stop mask_interrupts
  26. #else
  27. # define preempt_stop
  28. # define fault_resume_kernel fault_restore_all
  29. #endif
  30. #define __MASK(x) ((1 << (x)) - 1)
  31. #define IRQ_MASK ((__MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT) | \
  32. (__MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT))
  33. .section .ex.text,"ax",@progbits
  34. .align 2
  35. exception_vectors:
  36. bral handle_critical
  37. .align 2
  38. bral handle_critical
  39. .align 2
  40. bral do_bus_error_write
  41. .align 2
  42. bral do_bus_error_read
  43. .align 2
  44. bral do_nmi_ll
  45. .align 2
  46. bral handle_address_fault
  47. .align 2
  48. bral handle_protection_fault
  49. .align 2
  50. bral handle_debug
  51. .align 2
  52. bral do_illegal_opcode_ll
  53. .align 2
  54. bral do_illegal_opcode_ll
  55. .align 2
  56. bral do_illegal_opcode_ll
  57. .align 2
  58. bral do_fpe_ll
  59. .align 2
  60. bral do_illegal_opcode_ll
  61. .align 2
  62. bral handle_address_fault
  63. .align 2
  64. bral handle_address_fault
  65. .align 2
  66. bral handle_protection_fault
  67. .align 2
  68. bral handle_protection_fault
  69. .align 2
  70. bral do_dtlb_modified
  71. #define tlbmiss_save pushm r0-r3
  72. #define tlbmiss_restore popm r0-r3
  73. .org 0x50
  74. .global itlb_miss
  75. itlb_miss:
  76. tlbmiss_save
  77. rjmp tlb_miss_common
  78. .org 0x60
  79. dtlb_miss_read:
  80. tlbmiss_save
  81. rjmp tlb_miss_common
  82. .org 0x70
  83. dtlb_miss_write:
  84. tlbmiss_save
  85. .global tlb_miss_common
  86. .align 2
  87. tlb_miss_common:
  88. mfsr r0, SYSREG_TLBEAR
  89. mfsr r1, SYSREG_PTBR
  90. /*
  91. * First level lookup: The PGD contains virtual pointers to
  92. * the second-level page tables, but they may be NULL if not
  93. * present.
  94. */
  95. pgtbl_lookup:
  96. lsr r2, r0, PGDIR_SHIFT
  97. ld.w r3, r1[r2 << 2]
  98. bfextu r1, r0, PAGE_SHIFT, PGDIR_SHIFT - PAGE_SHIFT
  99. cp.w r3, 0
  100. breq page_table_not_present
  101. /* Second level lookup */
  102. ld.w r2, r3[r1 << 2]
  103. mfsr r0, SYSREG_TLBARLO
  104. bld r2, _PAGE_BIT_PRESENT
  105. brcc page_not_present
  106. /* Mark the page as accessed */
  107. sbr r2, _PAGE_BIT_ACCESSED
  108. st.w r3[r1 << 2], r2
  109. /* Drop software flags */
  110. andl r2, _PAGE_FLAGS_HARDWARE_MASK & 0xffff
  111. mtsr SYSREG_TLBELO, r2
  112. /* Figure out which entry we want to replace */
  113. mfsr r1, SYSREG_MMUCR
  114. clz r2, r0
  115. brcc 1f
  116. mov r3, -1 /* All entries have been accessed, */
  117. mov r2, 0 /* so start at 0 */
  118. mtsr SYSREG_TLBARLO, r3 /* and reset TLBAR */
  119. 1: bfins r1, r2, SYSREG_DRP_OFFSET, SYSREG_DRP_SIZE
  120. mtsr SYSREG_MMUCR, r1
  121. tlbw
  122. tlbmiss_restore
  123. rete
  124. /* The slow path of the TLB miss handler */
  125. .align 2
  126. page_table_not_present:
  127. /* Do we need to synchronize with swapper_pg_dir? */
  128. bld r0, 31
  129. brcs sync_with_swapper_pg_dir
  130. page_not_present:
  131. tlbmiss_restore
  132. sub sp, 4
  133. stmts --sp, r0-lr
  134. call save_full_context_ex
  135. mfsr r12, SYSREG_ECR
  136. mov r11, sp
  137. call do_page_fault
  138. rjmp ret_from_exception
  139. .align 2
  140. sync_with_swapper_pg_dir:
  141. /*
  142. * If swapper_pg_dir contains a non-NULL second-level page
  143. * table pointer, copy it into the current PGD. If not, we
  144. * must handle it as a full-blown page fault.
  145. *
  146. * Jumping back to pgtbl_lookup causes an unnecessary lookup,
  147. * but it is guaranteed to be a cache hit, it won't happen
  148. * very often, and we absolutely do not want to sacrifice any
  149. * performance in the fast path in order to improve this.
  150. */
  151. mov r1, lo(swapper_pg_dir)
  152. orh r1, hi(swapper_pg_dir)
  153. ld.w r3, r1[r2 << 2]
  154. cp.w r3, 0
  155. breq page_not_present
  156. mfsr r1, SYSREG_PTBR
  157. st.w r1[r2 << 2], r3
  158. rjmp pgtbl_lookup
  159. /*
  160. * We currently have two bytes left at this point until we
  161. * crash into the system call handler...
  162. *
  163. * Don't worry, the assembler will let us know.
  164. */
  165. /* --- System Call --- */
  166. .org 0x100
  167. system_call:
  168. #ifdef CONFIG_PREEMPT
  169. mask_interrupts
  170. #endif
  171. pushm r12 /* r12_orig */
  172. stmts --sp, r0-lr
  173. mfsr r0, SYSREG_RAR_SUP
  174. mfsr r1, SYSREG_RSR_SUP
  175. #ifdef CONFIG_PREEMPT
  176. unmask_interrupts
  177. #endif
  178. zero_fp
  179. stm --sp, r0-r1
  180. /* check for syscall tracing */
  181. get_thread_info r0
  182. ld.w r1, r0[TI_flags]
  183. bld r1, TIF_SYSCALL_TRACE
  184. brcs syscall_trace_enter
  185. syscall_trace_cont:
  186. cp.w r8, NR_syscalls
  187. brhs syscall_badsys
  188. lddpc lr, syscall_table_addr
  189. ld.w lr, lr[r8 << 2]
  190. mov r8, r5 /* 5th argument (6th is pushed by stub) */
  191. icall lr
  192. .global syscall_return
  193. syscall_return:
  194. get_thread_info r0
  195. mask_interrupts /* make sure we don't miss an interrupt
  196. setting need_resched or sigpending
  197. between sampling and the rets */
  198. /* Store the return value so that the correct value is loaded below */
  199. stdsp sp[REG_R12], r12
  200. ld.w r1, r0[TI_flags]
  201. andl r1, _TIF_ALLWORK_MASK, COH
  202. brne syscall_exit_work
  203. syscall_exit_cont:
  204. popm r8-r9
  205. mtsr SYSREG_RAR_SUP, r8
  206. mtsr SYSREG_RSR_SUP, r9
  207. ldmts sp++, r0-lr
  208. sub sp, -4 /* r12_orig */
  209. rets
  210. .align 2
  211. syscall_table_addr:
  212. .long sys_call_table
  213. syscall_badsys:
  214. mov r12, -ENOSYS
  215. rjmp syscall_return
  216. .global ret_from_fork
  217. ret_from_fork:
  218. call schedule_tail
  219. /* check for syscall tracing */
  220. get_thread_info r0
  221. ld.w r1, r0[TI_flags]
  222. andl r1, _TIF_ALLWORK_MASK, COH
  223. brne syscall_exit_work
  224. rjmp syscall_exit_cont
  225. syscall_trace_enter:
  226. pushm r8-r12
  227. call syscall_trace
  228. popm r8-r12
  229. rjmp syscall_trace_cont
  230. syscall_exit_work:
  231. bld r1, TIF_SYSCALL_TRACE
  232. brcc 1f
  233. unmask_interrupts
  234. call syscall_trace
  235. mask_interrupts
  236. ld.w r1, r0[TI_flags]
  237. 1: bld r1, TIF_NEED_RESCHED
  238. brcc 2f
  239. unmask_interrupts
  240. call schedule
  241. mask_interrupts
  242. ld.w r1, r0[TI_flags]
  243. rjmp 1b
  244. 2: mov r2, _TIF_SIGPENDING | _TIF_RESTORE_SIGMASK | _TIF_NOTIFY_RESUME
  245. tst r1, r2
  246. breq 3f
  247. unmask_interrupts
  248. mov r12, sp
  249. mov r11, r0
  250. call do_notify_resume
  251. mask_interrupts
  252. ld.w r1, r0[TI_flags]
  253. rjmp 1b
  254. 3: bld r1, TIF_BREAKPOINT
  255. brcc syscall_exit_cont
  256. rjmp enter_monitor_mode
  257. /* This function expects to find offending PC in SYSREG_RAR_EX */
  258. .type save_full_context_ex, @function
  259. .align 2
  260. save_full_context_ex:
  261. mfsr r11, SYSREG_RAR_EX
  262. sub r9, pc, . - debug_trampoline
  263. mfsr r8, SYSREG_RSR_EX
  264. cp.w r9, r11
  265. breq 3f
  266. mov r12, r8
  267. andh r8, (MODE_MASK >> 16), COH
  268. brne 2f
  269. 1: pushm r11, r12 /* PC and SR */
  270. unmask_exceptions
  271. ret r12
  272. 2: sub r10, sp, -(FRAME_SIZE_FULL - REG_LR)
  273. stdsp sp[4], r10 /* replace saved SP */
  274. rjmp 1b
  275. /*
  276. * The debug handler set up a trampoline to make us
  277. * automatically enter monitor mode upon return, but since
  278. * we're saving the full context, we must assume that the
  279. * exception handler might want to alter the return address
  280. * and/or status register. So we need to restore the original
  281. * context and enter monitor mode manually after the exception
  282. * has been handled.
  283. */
  284. 3: get_thread_info r8
  285. ld.w r11, r8[TI_rar_saved]
  286. ld.w r12, r8[TI_rsr_saved]
  287. rjmp 1b
  288. .size save_full_context_ex, . - save_full_context_ex
  289. /* Low-level exception handlers */
  290. handle_critical:
  291. /*
  292. * AT32AP700x errata:
  293. *
  294. * After a Java stack overflow or underflow trap, any CPU
  295. * memory access may cause erratic behavior. This will happen
  296. * when the four least significant bits of the JOSP system
  297. * register contains any value between 9 and 15 (inclusive).
  298. *
  299. * Possible workarounds:
  300. * - Don't use the Java Extension Module
  301. * - Ensure that the stack overflow and underflow trap
  302. * handlers do not do any memory access or trigger any
  303. * exceptions before the overflow/underflow condition is
  304. * cleared (by incrementing or decrementing the JOSP)
  305. * - Make sure that JOSP does not contain any problematic
  306. * value before doing any exception or interrupt
  307. * processing.
  308. * - Set up a critical exception handler which writes a
  309. * known-to-be-safe value, e.g. 4, to JOSP before doing
  310. * any further processing.
  311. *
  312. * We'll use the last workaround for now since we cannot
  313. * guarantee that user space processes don't use Java mode.
  314. * Non-well-behaving userland will be terminated with extreme
  315. * prejudice.
  316. */
  317. #ifdef CONFIG_CPU_AT32AP700X
  318. /*
  319. * There's a chance we can't touch memory, so temporarily
  320. * borrow PTBR to save the stack pointer while we fix things
  321. * up...
  322. */
  323. mtsr SYSREG_PTBR, sp
  324. mov sp, 4
  325. mtsr SYSREG_JOSP, sp
  326. mfsr sp, SYSREG_PTBR
  327. sub pc, -2
  328. /* Push most of pt_regs on stack. We'll do the rest later */
  329. sub sp, 4
  330. pushm r0-r12
  331. /* PTBR mirrors current_thread_info()->task->active_mm->pgd */
  332. get_thread_info r0
  333. ld.w r1, r0[TI_task]
  334. ld.w r2, r1[TSK_active_mm]
  335. ld.w r3, r2[MM_pgd]
  336. mtsr SYSREG_PTBR, r3
  337. #else
  338. sub sp, 4
  339. pushm r0-r12
  340. #endif
  341. sub r0, sp, -(14 * 4)
  342. mov r1, lr
  343. mfsr r2, SYSREG_RAR_EX
  344. mfsr r3, SYSREG_RSR_EX
  345. pushm r0-r3
  346. mfsr r12, SYSREG_ECR
  347. mov r11, sp
  348. call do_critical_exception
  349. /* We should never get here... */
  350. bad_return:
  351. sub r12, pc, (. - 1f)
  352. lddpc pc, 2f
  353. .align 2
  354. 1: .asciz "Return from critical exception!"
  355. 2: .long panic
  356. .align 1
  357. do_bus_error_write:
  358. sub sp, 4
  359. stmts --sp, r0-lr
  360. call save_full_context_ex
  361. mov r11, 1
  362. rjmp 1f
  363. do_bus_error_read:
  364. sub sp, 4
  365. stmts --sp, r0-lr
  366. call save_full_context_ex
  367. mov r11, 0
  368. 1: mfsr r12, SYSREG_BEAR
  369. mov r10, sp
  370. call do_bus_error
  371. rjmp ret_from_exception
  372. .align 1
  373. do_nmi_ll:
  374. sub sp, 4
  375. stmts --sp, r0-lr
  376. mfsr r9, SYSREG_RSR_NMI
  377. mfsr r8, SYSREG_RAR_NMI
  378. bfextu r0, r9, MODE_SHIFT, 3
  379. brne 2f
  380. 1: pushm r8, r9 /* PC and SR */
  381. mfsr r12, SYSREG_ECR
  382. mov r11, sp
  383. call do_nmi
  384. popm r8-r9
  385. mtsr SYSREG_RAR_NMI, r8
  386. tst r0, r0
  387. mtsr SYSREG_RSR_NMI, r9
  388. brne 3f
  389. ldmts sp++, r0-lr
  390. sub sp, -4 /* skip r12_orig */
  391. rete
  392. 2: sub r10, sp, -(FRAME_SIZE_FULL - REG_LR)
  393. stdsp sp[4], r10 /* replace saved SP */
  394. rjmp 1b
  395. 3: popm lr
  396. sub sp, -4 /* skip sp */
  397. popm r0-r12
  398. sub sp, -4 /* skip r12_orig */
  399. rete
  400. handle_address_fault:
  401. sub sp, 4
  402. stmts --sp, r0-lr
  403. call save_full_context_ex
  404. mfsr r12, SYSREG_ECR
  405. mov r11, sp
  406. call do_address_exception
  407. rjmp ret_from_exception
  408. handle_protection_fault:
  409. sub sp, 4
  410. stmts --sp, r0-lr
  411. call save_full_context_ex
  412. mfsr r12, SYSREG_ECR
  413. mov r11, sp
  414. call do_page_fault
  415. rjmp ret_from_exception
  416. .align 1
  417. do_illegal_opcode_ll:
  418. sub sp, 4
  419. stmts --sp, r0-lr
  420. call save_full_context_ex
  421. mfsr r12, SYSREG_ECR
  422. mov r11, sp
  423. call do_illegal_opcode
  424. rjmp ret_from_exception
  425. do_dtlb_modified:
  426. pushm r0-r3
  427. mfsr r1, SYSREG_TLBEAR
  428. mfsr r0, SYSREG_PTBR
  429. lsr r2, r1, PGDIR_SHIFT
  430. ld.w r0, r0[r2 << 2]
  431. lsl r1, (32 - PGDIR_SHIFT)
  432. lsr r1, (32 - PGDIR_SHIFT) + PAGE_SHIFT
  433. /* Translate to virtual address in P1 */
  434. andl r0, 0xf000
  435. sbr r0, 31
  436. add r2, r0, r1 << 2
  437. ld.w r3, r2[0]
  438. sbr r3, _PAGE_BIT_DIRTY
  439. mov r0, r3
  440. st.w r2[0], r3
  441. /* The page table is up-to-date. Update the TLB entry as well */
  442. andl r0, lo(_PAGE_FLAGS_HARDWARE_MASK)
  443. mtsr SYSREG_TLBELO, r0
  444. /* MMUCR[DRP] is updated automatically, so let's go... */
  445. tlbw
  446. popm r0-r3
  447. rete
  448. do_fpe_ll:
  449. sub sp, 4
  450. stmts --sp, r0-lr
  451. call save_full_context_ex
  452. unmask_interrupts
  453. mov r12, 26
  454. mov r11, sp
  455. call do_fpe
  456. rjmp ret_from_exception
  457. ret_from_exception:
  458. mask_interrupts
  459. lddsp r4, sp[REG_SR]
  460. andh r4, (MODE_MASK >> 16), COH
  461. brne fault_resume_kernel
  462. get_thread_info r0
  463. ld.w r1, r0[TI_flags]
  464. andl r1, _TIF_WORK_MASK, COH
  465. brne fault_exit_work
  466. fault_resume_user:
  467. popm r8-r9
  468. mask_exceptions
  469. mtsr SYSREG_RAR_EX, r8
  470. mtsr SYSREG_RSR_EX, r9
  471. ldmts sp++, r0-lr
  472. sub sp, -4
  473. rete
  474. fault_resume_kernel:
  475. #ifdef CONFIG_PREEMPT
  476. get_thread_info r0
  477. ld.w r2, r0[TI_preempt_count]
  478. cp.w r2, 0
  479. brne 1f
  480. ld.w r1, r0[TI_flags]
  481. bld r1, TIF_NEED_RESCHED
  482. brcc 1f
  483. lddsp r4, sp[REG_SR]
  484. bld r4, SYSREG_GM_OFFSET
  485. brcs 1f
  486. call preempt_schedule_irq
  487. 1:
  488. #endif
  489. popm r8-r9
  490. mask_exceptions
  491. mfsr r1, SYSREG_SR
  492. mtsr SYSREG_RAR_EX, r8
  493. mtsr SYSREG_RSR_EX, r9
  494. popm lr
  495. sub sp, -4 /* ignore SP */
  496. popm r0-r12
  497. sub sp, -4 /* ignore r12_orig */
  498. rete
  499. irq_exit_work:
  500. /* Switch to exception mode so that we can share the same code. */
  501. mfsr r8, SYSREG_SR
  502. cbr r8, SYSREG_M0_OFFSET
  503. orh r8, hi(SYSREG_BIT(M1) | SYSREG_BIT(M2))
  504. mtsr SYSREG_SR, r8
  505. sub pc, -2
  506. get_thread_info r0
  507. ld.w r1, r0[TI_flags]
  508. fault_exit_work:
  509. bld r1, TIF_NEED_RESCHED
  510. brcc 1f
  511. unmask_interrupts
  512. call schedule
  513. mask_interrupts
  514. ld.w r1, r0[TI_flags]
  515. rjmp fault_exit_work
  516. 1: mov r2, _TIF_SIGPENDING | _TIF_RESTORE_SIGMASK
  517. tst r1, r2
  518. breq 2f
  519. unmask_interrupts
  520. mov r12, sp
  521. mov r11, r0
  522. call do_notify_resume
  523. mask_interrupts
  524. ld.w r1, r0[TI_flags]
  525. rjmp fault_exit_work
  526. 2: bld r1, TIF_BREAKPOINT
  527. brcc fault_resume_user
  528. rjmp enter_monitor_mode
  529. .section .kprobes.text, "ax", @progbits
  530. .type handle_debug, @function
  531. handle_debug:
  532. sub sp, 4 /* r12_orig */
  533. stmts --sp, r0-lr
  534. mfsr r8, SYSREG_RAR_DBG
  535. mfsr r9, SYSREG_RSR_DBG
  536. unmask_exceptions
  537. pushm r8-r9
  538. bfextu r9, r9, SYSREG_MODE_OFFSET, SYSREG_MODE_SIZE
  539. brne debug_fixup_regs
  540. .Ldebug_fixup_cont:
  541. #ifdef CONFIG_TRACE_IRQFLAGS
  542. call trace_hardirqs_off
  543. #endif
  544. mov r12, sp
  545. call do_debug
  546. mov sp, r12
  547. lddsp r2, sp[REG_SR]
  548. bfextu r3, r2, SYSREG_MODE_OFFSET, SYSREG_MODE_SIZE
  549. brne debug_resume_kernel
  550. get_thread_info r0
  551. ld.w r1, r0[TI_flags]
  552. mov r2, _TIF_DBGWORK_MASK
  553. tst r1, r2
  554. brne debug_exit_work
  555. bld r1, TIF_SINGLE_STEP
  556. brcc 1f
  557. mfdr r4, OCD_DC
  558. sbr r4, OCD_DC_SS_BIT
  559. mtdr OCD_DC, r4
  560. 1: popm r10,r11
  561. mask_exceptions
  562. mtsr SYSREG_RSR_DBG, r11
  563. mtsr SYSREG_RAR_DBG, r10
  564. #ifdef CONFIG_TRACE_IRQFLAGS
  565. call trace_hardirqs_on
  566. 1:
  567. #endif
  568. ldmts sp++, r0-lr
  569. sub sp, -4
  570. retd
  571. .size handle_debug, . - handle_debug
  572. /* Mode of the trapped context is in r9 */
  573. .type debug_fixup_regs, @function
  574. debug_fixup_regs:
  575. mfsr r8, SYSREG_SR
  576. mov r10, r8
  577. bfins r8, r9, SYSREG_MODE_OFFSET, SYSREG_MODE_SIZE
  578. mtsr SYSREG_SR, r8
  579. sub pc, -2
  580. stdsp sp[REG_LR], lr
  581. mtsr SYSREG_SR, r10
  582. sub pc, -2
  583. sub r8, sp, -FRAME_SIZE_FULL
  584. stdsp sp[REG_SP], r8
  585. rjmp .Ldebug_fixup_cont
  586. .size debug_fixup_regs, . - debug_fixup_regs
  587. .type debug_resume_kernel, @function
  588. debug_resume_kernel:
  589. mask_exceptions
  590. popm r10, r11
  591. mtsr SYSREG_RAR_DBG, r10
  592. mtsr SYSREG_RSR_DBG, r11
  593. #ifdef CONFIG_TRACE_IRQFLAGS
  594. bld r11, SYSREG_GM_OFFSET
  595. brcc 1f
  596. call trace_hardirqs_on
  597. 1:
  598. #endif
  599. mfsr r2, SYSREG_SR
  600. mov r1, r2
  601. bfins r2, r3, SYSREG_MODE_OFFSET, SYSREG_MODE_SIZE
  602. mtsr SYSREG_SR, r2
  603. sub pc, -2
  604. popm lr
  605. mtsr SYSREG_SR, r1
  606. sub pc, -2
  607. sub sp, -4 /* skip SP */
  608. popm r0-r12
  609. sub sp, -4
  610. retd
  611. .size debug_resume_kernel, . - debug_resume_kernel
  612. .type debug_exit_work, @function
  613. debug_exit_work:
  614. /*
  615. * We must return from Monitor Mode using a retd, and we must
  616. * not schedule since that involves the D bit in SR getting
  617. * cleared by something other than the debug hardware. This
  618. * may cause undefined behaviour according to the Architecture
  619. * manual.
  620. *
  621. * So we fix up the return address and status and return to a
  622. * stub below in Exception mode. From there, we can follow the
  623. * normal exception return path.
  624. *
  625. * The real return address and status registers are stored on
  626. * the stack in the way the exception return path understands,
  627. * so no need to fix anything up there.
  628. */
  629. sub r8, pc, . - fault_exit_work
  630. mtsr SYSREG_RAR_DBG, r8
  631. mov r9, 0
  632. orh r9, hi(SR_EM | SR_GM | MODE_EXCEPTION)
  633. mtsr SYSREG_RSR_DBG, r9
  634. sub pc, -2
  635. retd
  636. .size debug_exit_work, . - debug_exit_work
  637. .set rsr_int0, SYSREG_RSR_INT0
  638. .set rsr_int1, SYSREG_RSR_INT1
  639. .set rsr_int2, SYSREG_RSR_INT2
  640. .set rsr_int3, SYSREG_RSR_INT3
  641. .set rar_int0, SYSREG_RAR_INT0
  642. .set rar_int1, SYSREG_RAR_INT1
  643. .set rar_int2, SYSREG_RAR_INT2
  644. .set rar_int3, SYSREG_RAR_INT3
  645. .macro IRQ_LEVEL level
  646. .type irq_level\level, @function
  647. irq_level\level:
  648. sub sp, 4 /* r12_orig */
  649. stmts --sp,r0-lr
  650. mfsr r8, rar_int\level
  651. mfsr r9, rsr_int\level
  652. #ifdef CONFIG_PREEMPT
  653. sub r11, pc, (. - system_call)
  654. cp.w r11, r8
  655. breq 4f
  656. #endif
  657. pushm r8-r9
  658. mov r11, sp
  659. mov r12, \level
  660. call do_IRQ
  661. lddsp r4, sp[REG_SR]
  662. bfextu r4, r4, SYSREG_M0_OFFSET, 3
  663. cp.w r4, MODE_SUPERVISOR >> SYSREG_M0_OFFSET
  664. breq 2f
  665. cp.w r4, MODE_USER >> SYSREG_M0_OFFSET
  666. #ifdef CONFIG_PREEMPT
  667. brne 3f
  668. #else
  669. brne 1f
  670. #endif
  671. get_thread_info r0
  672. ld.w r1, r0[TI_flags]
  673. andl r1, _TIF_WORK_MASK, COH
  674. brne irq_exit_work
  675. 1:
  676. #ifdef CONFIG_TRACE_IRQFLAGS
  677. call trace_hardirqs_on
  678. #endif
  679. popm r8-r9
  680. mtsr rar_int\level, r8
  681. mtsr rsr_int\level, r9
  682. ldmts sp++,r0-lr
  683. sub sp, -4 /* ignore r12_orig */
  684. rete
  685. #ifdef CONFIG_PREEMPT
  686. 4: mask_interrupts
  687. mfsr r8, rsr_int\level
  688. sbr r8, 16
  689. mtsr rsr_int\level, r8
  690. ldmts sp++, r0-lr
  691. sub sp, -4 /* ignore r12_orig */
  692. rete
  693. #endif
  694. 2: get_thread_info r0
  695. ld.w r1, r0[TI_flags]
  696. bld r1, TIF_CPU_GOING_TO_SLEEP
  697. #ifdef CONFIG_PREEMPT
  698. brcc 3f
  699. #else
  700. brcc 1b
  701. #endif
  702. sub r1, pc, . - cpu_idle_skip_sleep
  703. stdsp sp[REG_PC], r1
  704. #ifdef CONFIG_PREEMPT
  705. 3: get_thread_info r0
  706. ld.w r2, r0[TI_preempt_count]
  707. cp.w r2, 0
  708. brne 1b
  709. ld.w r1, r0[TI_flags]
  710. bld r1, TIF_NEED_RESCHED
  711. brcc 1b
  712. lddsp r4, sp[REG_SR]
  713. bld r4, SYSREG_GM_OFFSET
  714. brcs 1b
  715. call preempt_schedule_irq
  716. #endif
  717. rjmp 1b
  718. .endm
  719. .section .irq.text,"ax",@progbits
  720. .global irq_level0
  721. .global irq_level1
  722. .global irq_level2
  723. .global irq_level3
  724. IRQ_LEVEL 0
  725. IRQ_LEVEL 1
  726. IRQ_LEVEL 2
  727. IRQ_LEVEL 3
  728. .section .kprobes.text, "ax", @progbits
  729. .type enter_monitor_mode, @function
  730. enter_monitor_mode:
  731. /*
  732. * We need to enter monitor mode to do a single step. The
  733. * monitor code will alter the return address so that we
  734. * return directly to the user instead of returning here.
  735. */
  736. breakpoint
  737. rjmp breakpoint_failed
  738. .size enter_monitor_mode, . - enter_monitor_mode
  739. .type debug_trampoline, @function
  740. .global debug_trampoline
  741. debug_trampoline:
  742. /*
  743. * Save the registers on the stack so that the monitor code
  744. * can find them easily.
  745. */
  746. sub sp, 4 /* r12_orig */
  747. stmts --sp, r0-lr
  748. get_thread_info r0
  749. ld.w r8, r0[TI_rar_saved]
  750. ld.w r9, r0[TI_rsr_saved]
  751. pushm r8-r9
  752. /*
  753. * The monitor code will alter the return address so we don't
  754. * return here.
  755. */
  756. breakpoint
  757. rjmp breakpoint_failed
  758. .size debug_trampoline, . - debug_trampoline
  759. .type breakpoint_failed, @function
  760. breakpoint_failed:
  761. /*
  762. * Something went wrong. Perhaps the debug hardware isn't
  763. * enabled?
  764. */
  765. lda.w r12, msg_breakpoint_failed
  766. mov r11, sp
  767. mov r10, 9 /* SIGKILL */
  768. call die
  769. 1: rjmp 1b
  770. msg_breakpoint_failed:
  771. .asciz "Failed to enter Debug Mode"