bmips_vec.S 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 2011 by Kevin Cernekee (cernekee@gmail.com)
  7. *
  8. * Reset/NMI/re-entry vectors for BMIPS processors
  9. */
  10. #include <asm/asm.h>
  11. #include <asm/asmmacro.h>
  12. #include <asm/cacheops.h>
  13. #include <asm/cpu.h>
  14. #include <asm/regdef.h>
  15. #include <asm/mipsregs.h>
  16. #include <asm/stackframe.h>
  17. #include <asm/addrspace.h>
  18. #include <asm/hazards.h>
  19. #include <asm/bmips.h>
  20. .macro BARRIER
  21. .set mips32
  22. _ssnop
  23. _ssnop
  24. _ssnop
  25. .set mips0
  26. .endm
  27. /***********************************************************************
  28. * Alternate CPU1 startup vector for BMIPS4350
  29. *
  30. * On some systems the bootloader has already started CPU1 and configured
  31. * it to resume execution at 0x8000_0200 (!BEV IV vector) when it is
  32. * triggered by the SW1 interrupt. If that is the case we try to move
  33. * it to a more convenient place: BMIPS_WARM_RESTART_VEC @ 0x8000_0380.
  34. ***********************************************************************/
  35. LEAF(bmips_smp_movevec)
  36. la k0, 1f
  37. li k1, CKSEG1
  38. or k0, k1
  39. jr k0
  40. 1:
  41. /* clear IV, pending IPIs */
  42. mtc0 zero, CP0_CAUSE
  43. /* re-enable IRQs to wait for SW1 */
  44. li k0, ST0_IE | ST0_BEV | STATUSF_IP1
  45. mtc0 k0, CP0_STATUS
  46. /* set up CPU1 CBR; move BASE to 0xa000_0000 */
  47. li k0, 0xff400000
  48. mtc0 k0, $22, 6
  49. /* set up relocation vector address based on thread ID */
  50. mfc0 k1, $22, 3
  51. srl k1, 16
  52. andi k1, 0x8000
  53. or k1, CKSEG1 | BMIPS_RELO_VECTOR_CONTROL_0
  54. or k0, k1
  55. li k1, 0xa0080000
  56. sw k1, 0(k0)
  57. /* wait here for SW1 interrupt from bmips_boot_secondary() */
  58. wait
  59. la k0, bmips_reset_nmi_vec
  60. li k1, CKSEG1
  61. or k0, k1
  62. jr k0
  63. END(bmips_smp_movevec)
  64. /***********************************************************************
  65. * Reset/NMI vector
  66. * For BMIPS processors that can relocate their exception vectors, this
  67. * entire function gets copied to 0x8000_0000.
  68. ***********************************************************************/
  69. NESTED(bmips_reset_nmi_vec, PT_SIZE, sp)
  70. .set push
  71. .set noat
  72. .align 4
  73. #ifdef CONFIG_SMP
  74. /* if the NMI bit is clear, assume this is a CPU1 reset instead */
  75. li k1, (1 << 19)
  76. mfc0 k0, CP0_STATUS
  77. and k0, k1
  78. beqz k0, soft_reset
  79. #if defined(CONFIG_CPU_BMIPS5000)
  80. mfc0 k0, CP0_PRID
  81. li k1, PRID_IMP_BMIPS5000
  82. /* mask with PRID_IMP_BMIPS5000 to cover both variants */
  83. andi k0, PRID_IMP_BMIPS5000
  84. bne k0, k1, 1f
  85. /* if we're not on core 0, this must be the SMP boot signal */
  86. li k1, (3 << 25)
  87. mfc0 k0, $22
  88. and k0, k1
  89. bnez k0, bmips_smp_entry
  90. 1:
  91. #endif /* CONFIG_CPU_BMIPS5000 */
  92. #endif /* CONFIG_SMP */
  93. /* nope, it's just a regular NMI */
  94. SAVE_ALL
  95. move a0, sp
  96. /* clear EXL, ERL, BEV so that TLB refills still work */
  97. mfc0 k0, CP0_STATUS
  98. li k1, ST0_ERL | ST0_EXL | ST0_BEV | ST0_IE
  99. or k0, k1
  100. xor k0, k1
  101. mtc0 k0, CP0_STATUS
  102. BARRIER
  103. /* jump to the NMI handler function */
  104. la k0, nmi_handler
  105. jr k0
  106. RESTORE_ALL
  107. .set arch=r4000
  108. eret
  109. #ifdef CONFIG_SMP
  110. soft_reset:
  111. #if defined(CONFIG_CPU_BMIPS5000)
  112. mfc0 k0, CP0_PRID
  113. andi k0, 0xff00
  114. li k1, PRID_IMP_BMIPS5200
  115. bne k0, k1, bmips_smp_entry
  116. /* if running on TP 1, jump to bmips_smp_entry */
  117. mfc0 k0, $22
  118. li k1, (1 << 24)
  119. and k1, k0
  120. bnez k1, bmips_smp_entry
  121. nop
  122. /*
  123. * running on TP0, can not be core 0 (the boot core).
  124. * Check for soft reset. Indicates a warm boot
  125. */
  126. mfc0 k0, $12
  127. li k1, (1 << 20)
  128. and k0, k1
  129. beqz k0, bmips_smp_entry
  130. /*
  131. * Warm boot.
  132. * Cache init is only done on TP0
  133. */
  134. la k0, bmips_5xxx_init
  135. jalr k0
  136. nop
  137. b bmips_smp_entry
  138. nop
  139. #endif
  140. /***********************************************************************
  141. * CPU1 reset vector (used for the initial boot only)
  142. * This is still part of bmips_reset_nmi_vec().
  143. ***********************************************************************/
  144. bmips_smp_entry:
  145. /* set up CP0 STATUS; enable FPU */
  146. li k0, 0x30000000
  147. mtc0 k0, CP0_STATUS
  148. BARRIER
  149. /* set local CP0 CONFIG to make kseg0 cacheable, write-back */
  150. mfc0 k0, CP0_CONFIG
  151. ori k0, 0x07
  152. xori k0, 0x04
  153. mtc0 k0, CP0_CONFIG
  154. mfc0 k0, CP0_PRID
  155. andi k0, 0xff00
  156. #if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380)
  157. li k1, PRID_IMP_BMIPS43XX
  158. bne k0, k1, 2f
  159. /* initialize CPU1's local I-cache */
  160. li k0, 0x80000000
  161. li k1, 0x80010000
  162. mtc0 zero, $28
  163. mtc0 zero, $28, 1
  164. BARRIER
  165. 1: cache Index_Store_Tag_I, 0(k0)
  166. addiu k0, 16
  167. bne k0, k1, 1b
  168. b 3f
  169. 2:
  170. #endif /* CONFIG_CPU_BMIPS4350 || CONFIG_CPU_BMIPS4380 */
  171. #if defined(CONFIG_CPU_BMIPS5000)
  172. /* mask with PRID_IMP_BMIPS5000 to cover both variants */
  173. li k1, PRID_IMP_BMIPS5000
  174. andi k0, PRID_IMP_BMIPS5000
  175. bne k0, k1, 3f
  176. /* set exception vector base */
  177. la k0, ebase
  178. lw k0, 0(k0)
  179. mtc0 k0, $15, 1
  180. BARRIER
  181. #endif /* CONFIG_CPU_BMIPS5000 */
  182. 3:
  183. /* jump back to kseg0 in case we need to remap the kseg1 area */
  184. la k0, 1f
  185. jr k0
  186. 1:
  187. la k0, bmips_enable_xks01
  188. jalr k0
  189. /* use temporary stack to set up upper memory TLB */
  190. li sp, BMIPS_WARM_RESTART_VEC
  191. la k0, plat_wired_tlb_setup
  192. jalr k0
  193. /* switch to permanent stack and continue booting */
  194. .global bmips_secondary_reentry
  195. bmips_secondary_reentry:
  196. la k0, bmips_smp_boot_sp
  197. lw sp, 0(k0)
  198. la k0, bmips_smp_boot_gp
  199. lw gp, 0(k0)
  200. la k0, start_secondary
  201. jr k0
  202. #endif /* CONFIG_SMP */
  203. .align 4
  204. .global bmips_reset_nmi_vec_end
  205. bmips_reset_nmi_vec_end:
  206. END(bmips_reset_nmi_vec)
  207. .set pop
  208. /***********************************************************************
  209. * CPU1 warm restart vector (used for second and subsequent boots).
  210. * Also used for S2 standby recovery (PM).
  211. * This entire function gets copied to (BMIPS_WARM_RESTART_VEC)
  212. ***********************************************************************/
  213. LEAF(bmips_smp_int_vec)
  214. .align 4
  215. mfc0 k0, CP0_STATUS
  216. ori k0, 0x01
  217. xori k0, 0x01
  218. mtc0 k0, CP0_STATUS
  219. eret
  220. .align 4
  221. .global bmips_smp_int_vec_end
  222. bmips_smp_int_vec_end:
  223. END(bmips_smp_int_vec)
  224. /***********************************************************************
  225. * XKS01 support
  226. * Certain CPUs support extending kseg0 to 1024MB.
  227. ***********************************************************************/
  228. LEAF(bmips_enable_xks01)
  229. #if defined(CONFIG_XKS01)
  230. mfc0 t0, CP0_PRID
  231. andi t2, t0, 0xff00
  232. #if defined(CONFIG_CPU_BMIPS4380)
  233. li t1, PRID_IMP_BMIPS43XX
  234. bne t2, t1, 1f
  235. andi t0, 0xff
  236. addiu t1, t0, -PRID_REV_BMIPS4380_HI
  237. bgtz t1, 2f
  238. addiu t0, -PRID_REV_BMIPS4380_LO
  239. bltz t0, 2f
  240. mfc0 t0, $22, 3
  241. li t1, 0x1ff0
  242. li t2, (1 << 12) | (1 << 9)
  243. or t0, t1
  244. xor t0, t1
  245. or t0, t2
  246. mtc0 t0, $22, 3
  247. BARRIER
  248. b 2f
  249. 1:
  250. #endif /* CONFIG_CPU_BMIPS4380 */
  251. #if defined(CONFIG_CPU_BMIPS5000)
  252. li t1, PRID_IMP_BMIPS5000
  253. /* mask with PRID_IMP_BMIPS5000 to cover both variants */
  254. andi t2, PRID_IMP_BMIPS5000
  255. bne t2, t1, 2f
  256. mfc0 t0, $22, 5
  257. li t1, 0x01ff
  258. li t2, (1 << 8) | (1 << 5)
  259. or t0, t1
  260. xor t0, t1
  261. or t0, t2
  262. mtc0 t0, $22, 5
  263. BARRIER
  264. #endif /* CONFIG_CPU_BMIPS5000 */
  265. 2:
  266. #endif /* defined(CONFIG_XKS01) */
  267. jr ra
  268. END(bmips_enable_xks01)