stackframe.h 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1994, 95, 96, 99, 2001 Ralf Baechle
  7. * Copyright (C) 1994, 1995, 1996 Paul M. Antoine.
  8. * Copyright (C) 1999 Silicon Graphics, Inc.
  9. * Copyright (C) 2007 Maciej W. Rozycki
  10. */
  11. #ifndef _ASM_STACKFRAME_H
  12. #define _ASM_STACKFRAME_H
  13. #include <linux/threads.h>
  14. #include <asm/asm.h>
  15. #include <asm/asmmacro.h>
  16. #include <asm/mipsregs.h>
  17. #include <asm/asm-offsets.h>
  18. #include <asm/thread_info.h>
  19. #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
  20. #define STATMASK 0x3f
  21. #else
  22. #define STATMASK 0x1f
  23. #endif
  24. .macro SAVE_AT
  25. .set push
  26. .set noat
  27. LONG_S $1, PT_R1(sp)
  28. .set pop
  29. .endm
  30. .macro SAVE_TEMP
  31. #ifdef CONFIG_CPU_HAS_SMARTMIPS
  32. mflhxu v1
  33. LONG_S v1, PT_LO(sp)
  34. mflhxu v1
  35. LONG_S v1, PT_HI(sp)
  36. mflhxu v1
  37. LONG_S v1, PT_ACX(sp)
  38. #elif !defined(CONFIG_CPU_MIPSR6)
  39. mfhi v1
  40. #endif
  41. #ifdef CONFIG_32BIT
  42. LONG_S $8, PT_R8(sp)
  43. LONG_S $9, PT_R9(sp)
  44. #endif
  45. LONG_S $10, PT_R10(sp)
  46. LONG_S $11, PT_R11(sp)
  47. LONG_S $12, PT_R12(sp)
  48. #if !defined(CONFIG_CPU_HAS_SMARTMIPS) && !defined(CONFIG_CPU_MIPSR6)
  49. LONG_S v1, PT_HI(sp)
  50. mflo v1
  51. #endif
  52. LONG_S $13, PT_R13(sp)
  53. LONG_S $14, PT_R14(sp)
  54. LONG_S $15, PT_R15(sp)
  55. LONG_S $24, PT_R24(sp)
  56. #if !defined(CONFIG_CPU_HAS_SMARTMIPS) && !defined(CONFIG_CPU_MIPSR6)
  57. LONG_S v1, PT_LO(sp)
  58. #endif
  59. #ifdef CONFIG_CPU_CAVIUM_OCTEON
  60. /*
  61. * The Octeon multiplier state is affected by general
  62. * multiply instructions. It must be saved before and
  63. * kernel code might corrupt it
  64. */
  65. jal octeon_mult_save
  66. #endif
  67. .endm
  68. .macro SAVE_STATIC
  69. LONG_S $16, PT_R16(sp)
  70. LONG_S $17, PT_R17(sp)
  71. LONG_S $18, PT_R18(sp)
  72. LONG_S $19, PT_R19(sp)
  73. LONG_S $20, PT_R20(sp)
  74. LONG_S $21, PT_R21(sp)
  75. LONG_S $22, PT_R22(sp)
  76. LONG_S $23, PT_R23(sp)
  77. LONG_S $30, PT_R30(sp)
  78. .endm
  79. #ifdef CONFIG_SMP
  80. .macro get_saved_sp /* SMP variation */
  81. ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG
  82. #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
  83. lui k1, %hi(kernelsp)
  84. #else
  85. lui k1, %highest(kernelsp)
  86. daddiu k1, %higher(kernelsp)
  87. dsll k1, 16
  88. daddiu k1, %hi(kernelsp)
  89. dsll k1, 16
  90. #endif
  91. LONG_SRL k0, SMP_CPUID_PTRSHIFT
  92. LONG_ADDU k1, k0
  93. LONG_L k1, %lo(kernelsp)(k1)
  94. .endm
  95. .macro set_saved_sp stackp temp temp2
  96. ASM_CPUID_MFC0 \temp, ASM_SMP_CPUID_REG
  97. LONG_SRL \temp, SMP_CPUID_PTRSHIFT
  98. LONG_S \stackp, kernelsp(\temp)
  99. .endm
  100. #else /* !CONFIG_SMP */
  101. .macro get_saved_sp /* Uniprocessor variation */
  102. #ifdef CONFIG_CPU_JUMP_WORKAROUNDS
  103. /*
  104. * Clear BTB (branch target buffer), forbid RAS (return address
  105. * stack) to workaround the Out-of-order Issue in Loongson2F
  106. * via its diagnostic register.
  107. */
  108. move k0, ra
  109. jal 1f
  110. nop
  111. 1: jal 1f
  112. nop
  113. 1: jal 1f
  114. nop
  115. 1: jal 1f
  116. nop
  117. 1: move ra, k0
  118. li k0, 3
  119. mtc0 k0, $22
  120. #endif /* CONFIG_CPU_JUMP_WORKAROUNDS */
  121. #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
  122. lui k1, %hi(kernelsp)
  123. #else
  124. lui k1, %highest(kernelsp)
  125. daddiu k1, %higher(kernelsp)
  126. dsll k1, k1, 16
  127. daddiu k1, %hi(kernelsp)
  128. dsll k1, k1, 16
  129. #endif
  130. LONG_L k1, %lo(kernelsp)(k1)
  131. .endm
  132. .macro set_saved_sp stackp temp temp2
  133. LONG_S \stackp, kernelsp
  134. .endm
  135. #endif
  136. .macro SAVE_SOME
  137. .set push
  138. .set noat
  139. .set reorder
  140. mfc0 k0, CP0_STATUS
  141. sll k0, 3 /* extract cu0 bit */
  142. .set noreorder
  143. bltz k0, 8f
  144. move k1, sp
  145. #ifdef CONFIG_EVA
  146. /*
  147. * Flush interAptiv's Return Prediction Stack (RPS) by writing
  148. * EntryHi. Toggling Config7.RPS is slower and less portable.
  149. *
  150. * The RPS isn't automatically flushed when exceptions are
  151. * taken, which can result in kernel mode speculative accesses
  152. * to user addresses if the RPS mispredicts. That's harmless
  153. * when user and kernel share the same address space, but with
  154. * EVA the same user segments may be unmapped to kernel mode,
  155. * even containing sensitive MMIO regions or invalid memory.
  156. *
  157. * This can happen when the kernel sets the return address to
  158. * ret_from_* and jr's to the exception handler, which looks
  159. * more like a tail call than a function call. If nested calls
  160. * don't evict the last user address in the RPS, it will
  161. * mispredict the return and fetch from a user controlled
  162. * address into the icache.
  163. *
  164. * More recent EVA-capable cores with MAAR to restrict
  165. * speculative accesses aren't affected.
  166. */
  167. MFC0 k0, CP0_ENTRYHI
  168. MTC0 k0, CP0_ENTRYHI
  169. #endif
  170. .set reorder
  171. /* Called from user mode, new stack. */
  172. get_saved_sp
  173. #ifndef CONFIG_CPU_DADDI_WORKAROUNDS
  174. 8: move k0, sp
  175. PTR_SUBU sp, k1, PT_SIZE
  176. #else
  177. .set at=k0
  178. 8: PTR_SUBU k1, PT_SIZE
  179. .set noat
  180. move k0, sp
  181. move sp, k1
  182. #endif
  183. LONG_S k0, PT_R29(sp)
  184. LONG_S $3, PT_R3(sp)
  185. /*
  186. * You might think that you don't need to save $0,
  187. * but the FPU emulator and gdb remote debug stub
  188. * need it to operate correctly
  189. */
  190. LONG_S $0, PT_R0(sp)
  191. mfc0 v1, CP0_STATUS
  192. LONG_S $2, PT_R2(sp)
  193. LONG_S v1, PT_STATUS(sp)
  194. LONG_S $4, PT_R4(sp)
  195. mfc0 v1, CP0_CAUSE
  196. LONG_S $5, PT_R5(sp)
  197. LONG_S v1, PT_CAUSE(sp)
  198. LONG_S $6, PT_R6(sp)
  199. MFC0 v1, CP0_EPC
  200. LONG_S $7, PT_R7(sp)
  201. #ifdef CONFIG_64BIT
  202. LONG_S $8, PT_R8(sp)
  203. LONG_S $9, PT_R9(sp)
  204. #endif
  205. LONG_S v1, PT_EPC(sp)
  206. LONG_S $25, PT_R25(sp)
  207. LONG_S $28, PT_R28(sp)
  208. LONG_S $31, PT_R31(sp)
  209. /* Set thread_info if we're coming from user mode */
  210. mfc0 k0, CP0_STATUS
  211. sll k0, 3 /* extract cu0 bit */
  212. bltz k0, 9f
  213. ori $28, sp, _THREAD_MASK
  214. xori $28, _THREAD_MASK
  215. #ifdef CONFIG_CPU_CAVIUM_OCTEON
  216. .set mips64
  217. pref 0, 0($28) /* Prefetch the current pointer */
  218. #endif
  219. 9:
  220. .set pop
  221. .endm
  222. .macro SAVE_ALL
  223. SAVE_SOME
  224. SAVE_AT
  225. SAVE_TEMP
  226. SAVE_STATIC
  227. .endm
  228. .macro RESTORE_AT
  229. .set push
  230. .set noat
  231. LONG_L $1, PT_R1(sp)
  232. .set pop
  233. .endm
  234. .macro RESTORE_TEMP
  235. #ifdef CONFIG_CPU_CAVIUM_OCTEON
  236. /* Restore the Octeon multiplier state */
  237. jal octeon_mult_restore
  238. #endif
  239. #ifdef CONFIG_CPU_HAS_SMARTMIPS
  240. LONG_L $24, PT_ACX(sp)
  241. mtlhx $24
  242. LONG_L $24, PT_HI(sp)
  243. mtlhx $24
  244. LONG_L $24, PT_LO(sp)
  245. mtlhx $24
  246. #elif !defined(CONFIG_CPU_MIPSR6)
  247. LONG_L $24, PT_LO(sp)
  248. mtlo $24
  249. LONG_L $24, PT_HI(sp)
  250. mthi $24
  251. #endif
  252. #ifdef CONFIG_32BIT
  253. LONG_L $8, PT_R8(sp)
  254. LONG_L $9, PT_R9(sp)
  255. #endif
  256. LONG_L $10, PT_R10(sp)
  257. LONG_L $11, PT_R11(sp)
  258. LONG_L $12, PT_R12(sp)
  259. LONG_L $13, PT_R13(sp)
  260. LONG_L $14, PT_R14(sp)
  261. LONG_L $15, PT_R15(sp)
  262. LONG_L $24, PT_R24(sp)
  263. .endm
  264. .macro RESTORE_STATIC
  265. LONG_L $16, PT_R16(sp)
  266. LONG_L $17, PT_R17(sp)
  267. LONG_L $18, PT_R18(sp)
  268. LONG_L $19, PT_R19(sp)
  269. LONG_L $20, PT_R20(sp)
  270. LONG_L $21, PT_R21(sp)
  271. LONG_L $22, PT_R22(sp)
  272. LONG_L $23, PT_R23(sp)
  273. LONG_L $30, PT_R30(sp)
  274. .endm
  275. #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
  276. .macro RESTORE_SOME
  277. .set push
  278. .set reorder
  279. .set noat
  280. mfc0 a0, CP0_STATUS
  281. li v1, ST0_CU1 | ST0_IM
  282. ori a0, STATMASK
  283. xori a0, STATMASK
  284. mtc0 a0, CP0_STATUS
  285. and a0, v1
  286. LONG_L v0, PT_STATUS(sp)
  287. nor v1, $0, v1
  288. and v0, v1
  289. or v0, a0
  290. mtc0 v0, CP0_STATUS
  291. LONG_L $31, PT_R31(sp)
  292. LONG_L $28, PT_R28(sp)
  293. LONG_L $25, PT_R25(sp)
  294. LONG_L $7, PT_R7(sp)
  295. LONG_L $6, PT_R6(sp)
  296. LONG_L $5, PT_R5(sp)
  297. LONG_L $4, PT_R4(sp)
  298. LONG_L $3, PT_R3(sp)
  299. LONG_L $2, PT_R2(sp)
  300. .set pop
  301. .endm
  302. .macro RESTORE_SP_AND_RET
  303. .set push
  304. .set noreorder
  305. LONG_L k0, PT_EPC(sp)
  306. LONG_L sp, PT_R29(sp)
  307. jr k0
  308. rfe
  309. .set pop
  310. .endm
  311. #else
  312. .macro RESTORE_SOME
  313. .set push
  314. .set reorder
  315. .set noat
  316. mfc0 a0, CP0_STATUS
  317. ori a0, STATMASK
  318. xori a0, STATMASK
  319. mtc0 a0, CP0_STATUS
  320. li v1, ST0_CU1 | ST0_FR | ST0_IM
  321. and a0, v1
  322. LONG_L v0, PT_STATUS(sp)
  323. nor v1, $0, v1
  324. and v0, v1
  325. or v0, a0
  326. mtc0 v0, CP0_STATUS
  327. LONG_L v1, PT_EPC(sp)
  328. MTC0 v1, CP0_EPC
  329. LONG_L $31, PT_R31(sp)
  330. LONG_L $28, PT_R28(sp)
  331. LONG_L $25, PT_R25(sp)
  332. #ifdef CONFIG_64BIT
  333. LONG_L $8, PT_R8(sp)
  334. LONG_L $9, PT_R9(sp)
  335. #endif
  336. LONG_L $7, PT_R7(sp)
  337. LONG_L $6, PT_R6(sp)
  338. LONG_L $5, PT_R5(sp)
  339. LONG_L $4, PT_R4(sp)
  340. LONG_L $3, PT_R3(sp)
  341. LONG_L $2, PT_R2(sp)
  342. .set pop
  343. .endm
  344. .macro RESTORE_SP_AND_RET
  345. LONG_L sp, PT_R29(sp)
  346. .set arch=r4000
  347. eret
  348. .set mips0
  349. .endm
  350. #endif
  351. .macro RESTORE_SP
  352. LONG_L sp, PT_R29(sp)
  353. .endm
  354. .macro RESTORE_ALL
  355. RESTORE_TEMP
  356. RESTORE_STATIC
  357. RESTORE_AT
  358. RESTORE_SOME
  359. RESTORE_SP
  360. .endm
  361. .macro RESTORE_ALL_AND_RET
  362. RESTORE_TEMP
  363. RESTORE_STATIC
  364. RESTORE_AT
  365. RESTORE_SOME
  366. RESTORE_SP_AND_RET
  367. .endm
  368. /*
  369. * Move to kernel mode and disable interrupts.
  370. * Set cp0 enable bit as sign that we're running on the kernel stack
  371. */
  372. .macro CLI
  373. mfc0 t0, CP0_STATUS
  374. li t1, ST0_CU0 | STATMASK
  375. or t0, t1
  376. xori t0, STATMASK
  377. mtc0 t0, CP0_STATUS
  378. irq_disable_hazard
  379. .endm
  380. /*
  381. * Move to kernel mode and enable interrupts.
  382. * Set cp0 enable bit as sign that we're running on the kernel stack
  383. */
  384. .macro STI
  385. mfc0 t0, CP0_STATUS
  386. li t1, ST0_CU0 | STATMASK
  387. or t0, t1
  388. xori t0, STATMASK & ~1
  389. mtc0 t0, CP0_STATUS
  390. irq_enable_hazard
  391. .endm
  392. /*
  393. * Just move to kernel mode and leave interrupts as they are. Note
  394. * for the R3000 this means copying the previous enable from IEp.
  395. * Set cp0 enable bit as sign that we're running on the kernel stack
  396. */
  397. .macro KMODE
  398. mfc0 t0, CP0_STATUS
  399. li t1, ST0_CU0 | (STATMASK & ~1)
  400. #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
  401. andi t2, t0, ST0_IEP
  402. srl t2, 2
  403. or t0, t2
  404. #endif
  405. or t0, t1
  406. xori t0, STATMASK & ~1
  407. mtc0 t0, CP0_STATUS
  408. irq_disable_hazard
  409. .endm
  410. #endif /* _ASM_STACKFRAME_H */