genex.S 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle
  7. * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  8. * Copyright (C) 2001 MIPS Technologies, Inc.
  9. * Copyright (C) 2002, 2007 Maciej W. Rozycki
  10. */
  11. #include <linux/init.h>
  12. #include <asm/asm.h>
  13. #include <asm/asmmacro.h>
  14. #include <asm/cacheops.h>
  15. #include <asm/irqflags.h>
  16. #include <asm/regdef.h>
  17. #include <asm/fpregdef.h>
  18. #include <asm/mipsregs.h>
  19. #include <asm/stackframe.h>
  20. #include <asm/war.h>
  21. #include <asm/page.h>
  22. #include <asm/thread_info.h>
  23. #define PANIC_PIC(msg) \
  24. .set push; \
  25. .set reorder; \
  26. PTR_LA a0,8f; \
  27. .set noat; \
  28. PTR_LA AT, panic; \
  29. jr AT; \
  30. 9: b 9b; \
  31. .set pop; \
  32. TEXT(msg)
  33. __INIT
  34. NESTED(except_vec0_generic, 0, sp)
  35. PANIC_PIC("Exception vector 0 called")
  36. END(except_vec0_generic)
  37. NESTED(except_vec1_generic, 0, sp)
  38. PANIC_PIC("Exception vector 1 called")
  39. END(except_vec1_generic)
  40. /*
  41. * General exception vector for all other CPUs.
  42. *
  43. * Be careful when changing this, it has to be at most 128 bytes
  44. * to fit into space reserved for the exception handler.
  45. */
  46. NESTED(except_vec3_generic, 0, sp)
  47. .set push
  48. .set noat
  49. #if R5432_CP0_INTERRUPT_WAR
  50. mfc0 k0, CP0_INDEX
  51. #endif
  52. mfc0 k1, CP0_CAUSE
  53. andi k1, k1, 0x7c
  54. #ifdef CONFIG_64BIT
  55. dsll k1, k1, 1
  56. #endif
  57. PTR_L k0, exception_handlers(k1)
  58. jr k0
  59. .set pop
  60. END(except_vec3_generic)
  61. /*
  62. * General exception handler for CPUs with virtual coherency exception.
  63. *
  64. * Be careful when changing this, it has to be at most 256 (as a special
  65. * exception) bytes to fit into space reserved for the exception handler.
  66. */
  67. NESTED(except_vec3_r4000, 0, sp)
  68. .set push
  69. .set mips3
  70. .set noat
  71. mfc0 k1, CP0_CAUSE
  72. li k0, 31<<2
  73. andi k1, k1, 0x7c
  74. .set push
  75. .set noreorder
  76. .set nomacro
  77. beq k1, k0, handle_vced
  78. li k0, 14<<2
  79. beq k1, k0, handle_vcei
  80. #ifdef CONFIG_64BIT
  81. dsll k1, k1, 1
  82. #endif
  83. .set pop
  84. PTR_L k0, exception_handlers(k1)
  85. jr k0
  86. /*
  87. * Big shit, we now may have two dirty primary cache lines for the same
  88. * physical address. We can safely invalidate the line pointed to by
  89. * c0_badvaddr because after return from this exception handler the
  90. * load / store will be re-executed.
  91. */
  92. handle_vced:
  93. MFC0 k0, CP0_BADVADDR
  94. li k1, -4 # Is this ...
  95. and k0, k1 # ... really needed?
  96. mtc0 zero, CP0_TAGLO
  97. cache Index_Store_Tag_D, (k0)
  98. cache Hit_Writeback_Inv_SD, (k0)
  99. #ifdef CONFIG_PROC_FS
  100. PTR_LA k0, vced_count
  101. lw k1, (k0)
  102. addiu k1, 1
  103. sw k1, (k0)
  104. #endif
  105. eret
  106. handle_vcei:
  107. MFC0 k0, CP0_BADVADDR
  108. cache Hit_Writeback_Inv_SD, (k0) # also cleans pi
  109. #ifdef CONFIG_PROC_FS
  110. PTR_LA k0, vcei_count
  111. lw k1, (k0)
  112. addiu k1, 1
  113. sw k1, (k0)
  114. #endif
  115. eret
  116. .set pop
  117. END(except_vec3_r4000)
  118. __FINIT
  119. .align 5 /* 32 byte rollback region */
  120. LEAF(r4k_wait)
  121. .set push
  122. .set noreorder
  123. /* start of rollback region */
  124. LONG_L t0, TI_FLAGS($28)
  125. nop
  126. andi t0, _TIF_NEED_RESCHED
  127. bnez t0, 1f
  128. nop
  129. nop
  130. nop
  131. .set mips3
  132. wait
  133. /* end of rollback region (the region size must be power of two) */
  134. .set pop
  135. 1:
  136. jr ra
  137. END(r4k_wait)
  138. .macro BUILD_ROLLBACK_PROLOGUE handler
  139. FEXPORT(rollback_\handler)
  140. .set push
  141. .set noat
  142. MFC0 k0, CP0_EPC
  143. PTR_LA k1, r4k_wait
  144. ori k0, 0x1f /* 32 byte rollback region */
  145. xori k0, 0x1f
  146. bne k0, k1, 9f
  147. MTC0 k0, CP0_EPC
  148. 9:
  149. .set pop
  150. .endm
  151. .align 5
  152. BUILD_ROLLBACK_PROLOGUE handle_int
  153. NESTED(handle_int, PT_SIZE, sp)
  154. #ifdef CONFIG_TRACE_IRQFLAGS
  155. /*
  156. * Check to see if the interrupted code has just disabled
  157. * interrupts and ignore this interrupt for now if so.
  158. *
  159. * local_irq_disable() disables interrupts and then calls
  160. * trace_hardirqs_off() to track the state. If an interrupt is taken
  161. * after interrupts are disabled but before the state is updated
  162. * it will appear to restore_all that it is incorrectly returning with
  163. * interrupts disabled
  164. */
  165. .set push
  166. .set noat
  167. mfc0 k0, CP0_STATUS
  168. #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
  169. and k0, ST0_IEP
  170. bnez k0, 1f
  171. mfc0 k0, CP0_EPC
  172. .set noreorder
  173. j k0
  174. rfe
  175. #else
  176. and k0, ST0_IE
  177. bnez k0, 1f
  178. eret
  179. #endif
  180. 1:
  181. .set pop
  182. #endif
  183. SAVE_ALL
  184. CLI
  185. TRACE_IRQS_OFF
  186. LONG_L s0, TI_REGS($28)
  187. LONG_S sp, TI_REGS($28)
  188. PTR_LA ra, ret_from_irq
  189. j plat_irq_dispatch
  190. END(handle_int)
  191. __INIT
  192. /*
  193. * Special interrupt vector for MIPS64 ISA & embedded MIPS processors.
  194. * This is a dedicated interrupt exception vector which reduces the
  195. * interrupt processing overhead. The jump instruction will be replaced
  196. * at the initialization time.
  197. *
  198. * Be careful when changing this, it has to be at most 128 bytes
  199. * to fit into space reserved for the exception handler.
  200. */
  201. NESTED(except_vec4, 0, sp)
  202. 1: j 1b /* Dummy, will be replaced */
  203. END(except_vec4)
  204. /*
  205. * EJTAG debug exception handler.
  206. * The EJTAG debug exception entry point is 0xbfc00480, which
  207. * normally is in the boot PROM, so the boot PROM must do a
  208. * unconditional jump to this vector.
  209. */
  210. NESTED(except_vec_ejtag_debug, 0, sp)
  211. j ejtag_debug_handler
  212. END(except_vec_ejtag_debug)
  213. __FINIT
  214. /*
  215. * Vectored interrupt handler.
  216. * This prototype is copied to ebase + n*IntCtl.VS and patched
  217. * to invoke the handler
  218. */
  219. BUILD_ROLLBACK_PROLOGUE except_vec_vi
  220. NESTED(except_vec_vi, 0, sp)
  221. SAVE_SOME
  222. SAVE_AT
  223. .set push
  224. .set noreorder
  225. #ifdef CONFIG_MIPS_MT_SMTC
  226. /*
  227. * To keep from blindly blocking *all* interrupts
  228. * during service by SMTC kernel, we also want to
  229. * pass the IM value to be cleared.
  230. */
  231. FEXPORT(except_vec_vi_mori)
  232. ori a0, $0, 0
  233. #endif /* CONFIG_MIPS_MT_SMTC */
  234. FEXPORT(except_vec_vi_lui)
  235. lui v0, 0 /* Patched */
  236. j except_vec_vi_handler
  237. FEXPORT(except_vec_vi_ori)
  238. ori v0, 0 /* Patched */
  239. .set pop
  240. END(except_vec_vi)
  241. EXPORT(except_vec_vi_end)
  242. /*
  243. * Common Vectored Interrupt code
  244. * Complete the register saves and invoke the handler which is passed in $v0
  245. */
  246. NESTED(except_vec_vi_handler, 0, sp)
  247. SAVE_TEMP
  248. SAVE_STATIC
  249. #ifdef CONFIG_MIPS_MT_SMTC
  250. /*
  251. * SMTC has an interesting problem that interrupts are level-triggered,
  252. * and the CLI macro will clear EXL, potentially causing a duplicate
  253. * interrupt service invocation. So we need to clear the associated
  254. * IM bit of Status prior to doing CLI, and restore it after the
  255. * service routine has been invoked - we must assume that the
  256. * service routine will have cleared the state, and any active
  257. * level represents a new or otherwised unserviced event...
  258. */
  259. mfc0 t1, CP0_STATUS
  260. and t0, a0, t1
  261. #ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP
  262. mfc0 t2, CP0_TCCONTEXT
  263. or t2, t0, t2
  264. mtc0 t2, CP0_TCCONTEXT
  265. #endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */
  266. xor t1, t1, t0
  267. mtc0 t1, CP0_STATUS
  268. _ehb
  269. #endif /* CONFIG_MIPS_MT_SMTC */
  270. CLI
  271. #ifdef CONFIG_TRACE_IRQFLAGS
  272. move s0, v0
  273. #ifdef CONFIG_MIPS_MT_SMTC
  274. move s1, a0
  275. #endif
  276. TRACE_IRQS_OFF
  277. #ifdef CONFIG_MIPS_MT_SMTC
  278. move a0, s1
  279. #endif
  280. move v0, s0
  281. #endif
  282. LONG_L s0, TI_REGS($28)
  283. LONG_S sp, TI_REGS($28)
  284. PTR_LA ra, ret_from_irq
  285. jr v0
  286. END(except_vec_vi_handler)
  287. /*
  288. * EJTAG debug exception handler.
  289. */
  290. NESTED(ejtag_debug_handler, PT_SIZE, sp)
  291. .set push
  292. .set noat
  293. MTC0 k0, CP0_DESAVE
  294. mfc0 k0, CP0_DEBUG
  295. sll k0, k0, 30 # Check for SDBBP.
  296. bgez k0, ejtag_return
  297. PTR_LA k0, ejtag_debug_buffer
  298. LONG_S k1, 0(k0)
  299. SAVE_ALL
  300. move a0, sp
  301. jal ejtag_exception_handler
  302. RESTORE_ALL
  303. PTR_LA k0, ejtag_debug_buffer
  304. LONG_L k1, 0(k0)
  305. ejtag_return:
  306. MFC0 k0, CP0_DESAVE
  307. .set mips32
  308. deret
  309. .set pop
  310. END(ejtag_debug_handler)
  311. /*
  312. * This buffer is reserved for the use of the EJTAG debug
  313. * handler.
  314. */
  315. .data
  316. EXPORT(ejtag_debug_buffer)
  317. .fill LONGSIZE
  318. .previous
  319. __INIT
  320. /*
  321. * NMI debug exception handler for MIPS reference boards.
  322. * The NMI debug exception entry point is 0xbfc00000, which
  323. * normally is in the boot PROM, so the boot PROM must do a
  324. * unconditional jump to this vector.
  325. */
  326. NESTED(except_vec_nmi, 0, sp)
  327. j nmi_handler
  328. END(except_vec_nmi)
  329. __FINIT
  330. NESTED(nmi_handler, PT_SIZE, sp)
  331. .set push
  332. .set noat
  333. SAVE_ALL
  334. move a0, sp
  335. jal nmi_exception_handler
  336. RESTORE_ALL
  337. .set mips3
  338. eret
  339. .set pop
  340. END(nmi_handler)
  341. .macro __build_clear_none
  342. .endm
  343. .macro __build_clear_sti
  344. TRACE_IRQS_ON
  345. STI
  346. .endm
  347. .macro __build_clear_cli
  348. CLI
  349. TRACE_IRQS_OFF
  350. .endm
  351. .macro __build_clear_fpe
  352. .set push
  353. /* gas fails to assemble cfc1 for some archs (octeon).*/ \
  354. .set mips1
  355. cfc1 a1, fcr31
  356. li a2, ~(0x3f << 12)
  357. and a2, a1
  358. ctc1 a2, fcr31
  359. .set pop
  360. TRACE_IRQS_ON
  361. STI
  362. .endm
  363. .macro __build_clear_ade
  364. MFC0 t0, CP0_BADVADDR
  365. PTR_S t0, PT_BVADDR(sp)
  366. KMODE
  367. .endm
  368. .macro __BUILD_silent exception
  369. .endm
  370. /* Gas tries to parse the PRINT argument as a string containing
  371. string escapes and emits bogus warnings if it believes to
  372. recognize an unknown escape code. So make the arguments
  373. start with an n and gas will believe \n is ok ... */
  374. .macro __BUILD_verbose nexception
  375. LONG_L a1, PT_EPC(sp)
  376. #ifdef CONFIG_32BIT
  377. PRINT("Got \nexception at %08lx\012")
  378. #endif
  379. #ifdef CONFIG_64BIT
  380. PRINT("Got \nexception at %016lx\012")
  381. #endif
  382. .endm
  383. .macro __BUILD_count exception
  384. LONG_L t0,exception_count_\exception
  385. LONG_ADDIU t0, 1
  386. LONG_S t0,exception_count_\exception
  387. .comm exception_count\exception, 8, 8
  388. .endm
  389. .macro __BUILD_HANDLER exception handler clear verbose ext
  390. .align 5
  391. NESTED(handle_\exception, PT_SIZE, sp)
  392. .set noat
  393. SAVE_ALL
  394. FEXPORT(handle_\exception\ext)
  395. __BUILD_clear_\clear
  396. .set at
  397. __BUILD_\verbose \exception
  398. move a0, sp
  399. PTR_LA ra, ret_from_exception
  400. j do_\handler
  401. END(handle_\exception)
  402. .endm
  403. .macro BUILD_HANDLER exception handler clear verbose
  404. __BUILD_HANDLER \exception \handler \clear \verbose _int
  405. .endm
  406. BUILD_HANDLER adel ade ade silent /* #4 */
  407. BUILD_HANDLER ades ade ade silent /* #5 */
  408. BUILD_HANDLER ibe be cli silent /* #6 */
  409. BUILD_HANDLER dbe be cli silent /* #7 */
  410. BUILD_HANDLER bp bp sti silent /* #9 */
  411. BUILD_HANDLER ri ri sti silent /* #10 */
  412. BUILD_HANDLER cpu cpu sti silent /* #11 */
  413. BUILD_HANDLER ov ov sti silent /* #12 */
  414. BUILD_HANDLER tr tr sti silent /* #13 */
  415. BUILD_HANDLER fpe fpe fpe silent /* #15 */
  416. BUILD_HANDLER mdmx mdmx sti silent /* #22 */
  417. #ifdef CONFIG_HARDWARE_WATCHPOINTS
  418. /*
  419. * For watch, interrupts will be enabled after the watch
  420. * registers are read.
  421. */
  422. BUILD_HANDLER watch watch cli silent /* #23 */
  423. #else
  424. BUILD_HANDLER watch watch sti verbose /* #23 */
  425. #endif
  426. BUILD_HANDLER mcheck mcheck cli verbose /* #24 */
  427. BUILD_HANDLER mt mt sti silent /* #25 */
  428. BUILD_HANDLER dsp dsp sti silent /* #26 */
  429. BUILD_HANDLER reserved reserved sti verbose /* others */
  430. .align 5
  431. LEAF(handle_ri_rdhwr_vivt)
  432. #ifdef CONFIG_MIPS_MT_SMTC
  433. PANIC_PIC("handle_ri_rdhwr_vivt called")
  434. #else
  435. .set push
  436. .set noat
  437. .set noreorder
  438. /* check if TLB contains a entry for EPC */
  439. MFC0 k1, CP0_ENTRYHI
  440. andi k1, 0xff /* ASID_MASK */
  441. MFC0 k0, CP0_EPC
  442. PTR_SRL k0, PAGE_SHIFT + 1
  443. PTR_SLL k0, PAGE_SHIFT + 1
  444. or k1, k0
  445. MTC0 k1, CP0_ENTRYHI
  446. mtc0_tlbw_hazard
  447. tlbp
  448. tlb_probe_hazard
  449. mfc0 k1, CP0_INDEX
  450. .set pop
  451. bltz k1, handle_ri /* slow path */
  452. /* fall thru */
  453. #endif
  454. END(handle_ri_rdhwr_vivt)
  455. LEAF(handle_ri_rdhwr)
  456. .set push
  457. .set noat
  458. .set noreorder
  459. /* 0x7c03e83b: rdhwr v1,$29 */
  460. MFC0 k1, CP0_EPC
  461. lui k0, 0x7c03
  462. lw k1, (k1)
  463. ori k0, 0xe83b
  464. .set reorder
  465. bne k0, k1, handle_ri /* if not ours */
  466. /* The insn is rdhwr. No need to check CAUSE.BD here. */
  467. get_saved_sp /* k1 := current_thread_info */
  468. .set noreorder
  469. MFC0 k0, CP0_EPC
  470. #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
  471. ori k1, _THREAD_MASK
  472. xori k1, _THREAD_MASK
  473. LONG_L v1, TI_TP_VALUE(k1)
  474. LONG_ADDIU k0, 4
  475. jr k0
  476. rfe
  477. #else
  478. #ifndef CONFIG_CPU_DADDI_WORKAROUNDS
  479. LONG_ADDIU k0, 4 /* stall on $k0 */
  480. #else
  481. .set at=v1
  482. LONG_ADDIU k0, 4
  483. .set noat
  484. #endif
  485. MTC0 k0, CP0_EPC
  486. /* I hope three instructions between MTC0 and ERET are enough... */
  487. ori k1, _THREAD_MASK
  488. xori k1, _THREAD_MASK
  489. LONG_L v1, TI_TP_VALUE(k1)
  490. .set mips3
  491. eret
  492. .set mips0
  493. #endif
  494. .set pop
  495. END(handle_ri_rdhwr)
  496. #ifdef CONFIG_64BIT
  497. /* A temporary overflow handler used by check_daddi(). */
  498. __INIT
  499. BUILD_HANDLER daddi_ov daddi_ov none silent /* #12 */
  500. #endif