tlb-miss.S 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630
  1. /* tlb-miss.S: TLB miss handlers
  2. *
  3. * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/sys.h>
  12. #include <linux/linkage.h>
  13. #include <asm/page.h>
  14. #include <asm/pgtable.h>
  15. #include <asm/spr-regs.h>
  16. .section .text..tlbmiss
  17. .balign 4
  18. .globl __entry_insn_mmu_miss
  19. __entry_insn_mmu_miss:
  20. break
  21. nop
  22. .globl __entry_insn_mmu_exception
  23. __entry_insn_mmu_exception:
  24. break
  25. nop
  26. .globl __entry_data_mmu_miss
  27. __entry_data_mmu_miss:
  28. break
  29. nop
  30. .globl __entry_data_mmu_exception
  31. __entry_data_mmu_exception:
  32. break
  33. nop
  34. ###############################################################################
  35. #
  36. # handle a lookup failure of one sort or another in a kernel TLB handler
  37. # On entry:
  38. # GR29 - faulting address
  39. # SCR2 - saved CCR
  40. #
  41. ###############################################################################
  42. .type __tlb_kernel_fault,@function
  43. __tlb_kernel_fault:
  44. # see if we're supposed to re-enable single-step mode upon return
  45. sethi.p %hi(__break_tlb_miss_return_break),gr30
  46. setlo %lo(__break_tlb_miss_return_break),gr30
  47. movsg pcsr,gr31
  48. subcc gr31,gr30,gr0,icc0
  49. beq icc0,#0,__tlb_kernel_fault_sstep
  50. movsg scr2,gr30
  51. movgs gr30,ccr
  52. movgs gr29,scr2 /* save EAR0 value */
  53. sethi.p %hi(__kernel_current_task),gr29
  54. setlo %lo(__kernel_current_task),gr29
  55. ldi.p @(gr29,#0),gr29 /* restore GR29 */
  56. bra __entry_kernel_handle_mmu_fault
  57. # we've got to re-enable single-stepping
  58. __tlb_kernel_fault_sstep:
  59. sethi.p %hi(__break_tlb_miss_real_return_info),gr30
  60. setlo %lo(__break_tlb_miss_real_return_info),gr30
  61. lddi @(gr30,0),gr30
  62. movgs gr30,pcsr
  63. movgs gr31,psr
  64. movsg scr2,gr30
  65. movgs gr30,ccr
  66. movgs gr29,scr2 /* save EAR0 value */
  67. sethi.p %hi(__kernel_current_task),gr29
  68. setlo %lo(__kernel_current_task),gr29
  69. ldi.p @(gr29,#0),gr29 /* restore GR29 */
  70. bra __entry_kernel_handle_mmu_fault_sstep
  71. .size __tlb_kernel_fault, .-__tlb_kernel_fault
  72. ###############################################################################
  73. #
  74. # handle a lookup failure of one sort or another in a user TLB handler
  75. # On entry:
  76. # GR28 - faulting address
  77. # SCR2 - saved CCR
  78. #
  79. ###############################################################################
  80. .type __tlb_user_fault,@function
  81. __tlb_user_fault:
  82. # see if we're supposed to re-enable single-step mode upon return
  83. sethi.p %hi(__break_tlb_miss_return_break),gr30
  84. setlo %lo(__break_tlb_miss_return_break),gr30
  85. movsg pcsr,gr31
  86. subcc gr31,gr30,gr0,icc0
  87. beq icc0,#0,__tlb_user_fault_sstep
  88. movsg scr2,gr30
  89. movgs gr30,ccr
  90. bra __entry_uspace_handle_mmu_fault
  91. # we've got to re-enable single-stepping
  92. __tlb_user_fault_sstep:
  93. sethi.p %hi(__break_tlb_miss_real_return_info),gr30
  94. setlo %lo(__break_tlb_miss_real_return_info),gr30
  95. lddi @(gr30,0),gr30
  96. movgs gr30,pcsr
  97. movgs gr31,psr
  98. movsg scr2,gr30
  99. movgs gr30,ccr
  100. bra __entry_uspace_handle_mmu_fault_sstep
  101. .size __tlb_user_fault, .-__tlb_user_fault
  102. ###############################################################################
  103. #
  104. # Kernel instruction TLB miss handler
  105. # On entry:
  106. # GR1 - kernel stack pointer
  107. # GR28 - saved exception frame pointer
  108. # GR29 - faulting address
  109. # GR31 - EAR0 ^ SCR0
  110. # SCR0 - base of virtual range covered by cached PGE from last ITLB miss (or 0xffffffff)
  111. # DAMR3 - mapped page directory
  112. # DAMR4 - mapped page table as matched by SCR0
  113. #
  114. ###############################################################################
  115. .globl __entry_kernel_insn_tlb_miss
  116. .type __entry_kernel_insn_tlb_miss,@function
  117. __entry_kernel_insn_tlb_miss:
  118. #if 0
  119. sethi.p %hi(0xe1200004),gr30
  120. setlo %lo(0xe1200004),gr30
  121. st gr0,@(gr30,gr0)
  122. sethi.p %hi(0xffc00100),gr30
  123. setlo %lo(0xffc00100),gr30
  124. sth gr30,@(gr30,gr0)
  125. membar
  126. #endif
  127. movsg ccr,gr30 /* save CCR */
  128. movgs gr30,scr2
  129. # see if the cached page table mapping is appropriate
  130. srlicc.p gr31,#26,gr0,icc0
  131. setlos 0x3ffc,gr30
  132. srli.p gr29,#12,gr31 /* use EAR0[25:14] as PTE index */
  133. bne icc0,#0,__itlb_k_PTD_miss
  134. __itlb_k_PTD_mapped:
  135. # access the PTD with EAR0[25:14]
  136. # - DAMLR4 points to the virtual address of the appropriate page table
  137. # - the PTD holds 4096 PTEs
  138. # - the PTD must be accessed uncached
  139. # - the PTE must be marked accessed if it was valid
  140. #
  141. and gr31,gr30,gr31
  142. movsg damlr4,gr30
  143. add gr30,gr31,gr31
  144. ldi @(gr31,#0),gr30 /* fetch the PTE */
  145. andicc gr30,#_PAGE_PRESENT,gr0,icc0
  146. ori.p gr30,#_PAGE_ACCESSED,gr30
  147. beq icc0,#0,__tlb_kernel_fault /* jump if PTE invalid */
  148. sti.p gr30,@(gr31,#0) /* update the PTE */
  149. andi gr30,#~_PAGE_ACCESSED,gr30
  150. # we're using IAMR1 as an extra TLB entry
  151. # - punt the entry here (if valid) to the real TLB and then replace with the new PTE
  152. # - need to check DAMR1 lest we cause an multiple-DAT-hit exception
  153. # - IAMPR1 has no WP bit, and we mustn't lose WP information
  154. movsg iampr1,gr31
  155. andicc gr31,#xAMPRx_V,gr0,icc0
  156. setlos.p 0xfffff000,gr31
  157. beq icc0,#0,__itlb_k_nopunt /* punt not required */
  158. movsg iamlr1,gr31
  159. movgs gr31,tplr /* set TPLR.CXN */
  160. tlbpr gr31,gr0,#4,#0 /* delete matches from TLB, IAMR1, DAMR1 */
  161. movsg dampr1,gr31
  162. ori gr31,#xAMPRx_V,gr31 /* entry was invalidated by tlbpr #4 */
  163. movgs gr31,tppr
  164. movsg iamlr1,gr31 /* set TPLR.CXN */
  165. movgs gr31,tplr
  166. tlbpr gr31,gr0,#2,#0 /* save to the TLB */
  167. movsg tpxr,gr31 /* check the TLB write error flag */
  168. andicc.p gr31,#TPXR_E,gr0,icc0
  169. setlos #0xfffff000,gr31
  170. bne icc0,#0,__tlb_kernel_fault
  171. __itlb_k_nopunt:
  172. # assemble the new TLB entry
  173. and gr29,gr31,gr29
  174. movsg cxnr,gr31
  175. or gr29,gr31,gr29
  176. movgs gr29,iamlr1 /* xAMLR = address | context number */
  177. movgs gr30,iampr1
  178. movgs gr29,damlr1
  179. movgs gr30,dampr1
  180. # return, restoring registers
  181. movsg scr2,gr30
  182. movgs gr30,ccr
  183. sethi.p %hi(__kernel_current_task),gr29
  184. setlo %lo(__kernel_current_task),gr29
  185. ldi @(gr29,#0),gr29
  186. rett #0
  187. beq icc0,#3,0 /* prevent icache prefetch */
  188. # the PTE we want wasn't in the PTD we have mapped, so we need to go looking for a more
  189. # appropriate page table and map that instead
  190. # - access the PGD with EAR0[31:26]
  191. # - DAMLR3 points to the virtual address of the page directory
  192. # - the PGD holds 64 PGEs and each PGE/PME points to a set of page tables
  193. __itlb_k_PTD_miss:
  194. srli gr29,#26,gr31 /* calculate PGE offset */
  195. slli gr31,#8,gr31 /* and clear bottom bits */
  196. movsg damlr3,gr30
  197. ld @(gr31,gr30),gr30 /* access the PGE */
  198. andicc.p gr30,#_PAGE_PRESENT,gr0,icc0
  199. andicc gr30,#xAMPRx_SS,gr0,icc1
  200. # map this PTD instead and record coverage address
  201. ori.p gr30,#xAMPRx_L|xAMPRx_SS_16Kb|xAMPRx_S|xAMPRx_C|xAMPRx_V,gr30
  202. beq icc0,#0,__tlb_kernel_fault /* jump if PGE not present */
  203. slli.p gr31,#18,gr31
  204. bne icc1,#0,__itlb_k_bigpage
  205. movgs gr30,dampr4
  206. movgs gr31,scr0
  207. # we can now resume normal service
  208. setlos 0x3ffc,gr30
  209. srli.p gr29,#12,gr31 /* use EAR0[25:14] as PTE index */
  210. bra __itlb_k_PTD_mapped
  211. __itlb_k_bigpage:
  212. break
  213. nop
  214. .size __entry_kernel_insn_tlb_miss, .-__entry_kernel_insn_tlb_miss
  215. ###############################################################################
  216. #
  217. # Kernel data TLB miss handler
  218. # On entry:
  219. # GR1 - kernel stack pointer
  220. # GR28 - saved exception frame pointer
  221. # GR29 - faulting address
  222. # GR31 - EAR0 ^ SCR1
  223. # SCR1 - base of virtual range covered by cached PGE from last DTLB miss (or 0xffffffff)
  224. # DAMR3 - mapped page directory
  225. # DAMR5 - mapped page table as matched by SCR1
  226. #
  227. ###############################################################################
  228. .globl __entry_kernel_data_tlb_miss
  229. .type __entry_kernel_data_tlb_miss,@function
  230. __entry_kernel_data_tlb_miss:
  231. #if 0
  232. sethi.p %hi(0xe1200004),gr30
  233. setlo %lo(0xe1200004),gr30
  234. st gr0,@(gr30,gr0)
  235. sethi.p %hi(0xffc00100),gr30
  236. setlo %lo(0xffc00100),gr30
  237. sth gr30,@(gr30,gr0)
  238. membar
  239. #endif
  240. movsg ccr,gr30 /* save CCR */
  241. movgs gr30,scr2
  242. # see if the cached page table mapping is appropriate
  243. srlicc.p gr31,#26,gr0,icc0
  244. setlos 0x3ffc,gr30
  245. srli.p gr29,#12,gr31 /* use EAR0[25:14] as PTE index */
  246. bne icc0,#0,__dtlb_k_PTD_miss
  247. __dtlb_k_PTD_mapped:
  248. # access the PTD with EAR0[25:14]
  249. # - DAMLR5 points to the virtual address of the appropriate page table
  250. # - the PTD holds 4096 PTEs
  251. # - the PTD must be accessed uncached
  252. # - the PTE must be marked accessed if it was valid
  253. #
  254. and gr31,gr30,gr31
  255. movsg damlr5,gr30
  256. add gr30,gr31,gr31
  257. ldi @(gr31,#0),gr30 /* fetch the PTE */
  258. andicc gr30,#_PAGE_PRESENT,gr0,icc0
  259. ori.p gr30,#_PAGE_ACCESSED,gr30
  260. beq icc0,#0,__tlb_kernel_fault /* jump if PTE invalid */
  261. sti.p gr30,@(gr31,#0) /* update the PTE */
  262. andi gr30,#~_PAGE_ACCESSED,gr30
  263. # we're using DAMR1 as an extra TLB entry
  264. # - punt the entry here (if valid) to the real TLB and then replace with the new PTE
  265. # - need to check IAMR1 lest we cause an multiple-DAT-hit exception
  266. movsg dampr1,gr31
  267. andicc gr31,#xAMPRx_V,gr0,icc0
  268. setlos.p 0xfffff000,gr31
  269. beq icc0,#0,__dtlb_k_nopunt /* punt not required */
  270. movsg damlr1,gr31
  271. movgs gr31,tplr /* set TPLR.CXN */
  272. tlbpr gr31,gr0,#4,#0 /* delete matches from TLB, IAMR1, DAMR1 */
  273. movsg dampr1,gr31
  274. ori gr31,#xAMPRx_V,gr31 /* entry was invalidated by tlbpr #4 */
  275. movgs gr31,tppr
  276. movsg damlr1,gr31 /* set TPLR.CXN */
  277. movgs gr31,tplr
  278. tlbpr gr31,gr0,#2,#0 /* save to the TLB */
  279. movsg tpxr,gr31 /* check the TLB write error flag */
  280. andicc.p gr31,#TPXR_E,gr0,icc0
  281. setlos #0xfffff000,gr31
  282. bne icc0,#0,__tlb_kernel_fault
  283. __dtlb_k_nopunt:
  284. # assemble the new TLB entry
  285. and gr29,gr31,gr29
  286. movsg cxnr,gr31
  287. or gr29,gr31,gr29
  288. movgs gr29,iamlr1 /* xAMLR = address | context number */
  289. movgs gr30,iampr1
  290. movgs gr29,damlr1
  291. movgs gr30,dampr1
  292. # return, restoring registers
  293. movsg scr2,gr30
  294. movgs gr30,ccr
  295. sethi.p %hi(__kernel_current_task),gr29
  296. setlo %lo(__kernel_current_task),gr29
  297. ldi @(gr29,#0),gr29
  298. rett #0
  299. beq icc0,#3,0 /* prevent icache prefetch */
  300. # the PTE we want wasn't in the PTD we have mapped, so we need to go looking for a more
  301. # appropriate page table and map that instead
  302. # - access the PGD with EAR0[31:26]
  303. # - DAMLR3 points to the virtual address of the page directory
  304. # - the PGD holds 64 PGEs and each PGE/PME points to a set of page tables
  305. __dtlb_k_PTD_miss:
  306. srli gr29,#26,gr31 /* calculate PGE offset */
  307. slli gr31,#8,gr31 /* and clear bottom bits */
  308. movsg damlr3,gr30
  309. ld @(gr31,gr30),gr30 /* access the PGE */
  310. andicc.p gr30,#_PAGE_PRESENT,gr0,icc0
  311. andicc gr30,#xAMPRx_SS,gr0,icc1
  312. # map this PTD instead and record coverage address
  313. ori.p gr30,#xAMPRx_L|xAMPRx_SS_16Kb|xAMPRx_S|xAMPRx_C|xAMPRx_V,gr30
  314. beq icc0,#0,__tlb_kernel_fault /* jump if PGE not present */
  315. slli.p gr31,#18,gr31
  316. bne icc1,#0,__dtlb_k_bigpage
  317. movgs gr30,dampr5
  318. movgs gr31,scr1
  319. # we can now resume normal service
  320. setlos 0x3ffc,gr30
  321. srli.p gr29,#12,gr31 /* use EAR0[25:14] as PTE index */
  322. bra __dtlb_k_PTD_mapped
  323. __dtlb_k_bigpage:
  324. break
  325. nop
  326. .size __entry_kernel_data_tlb_miss, .-__entry_kernel_data_tlb_miss
  327. ###############################################################################
  328. #
  329. # Userspace instruction TLB miss handler (with PGE prediction)
  330. # On entry:
  331. # GR28 - faulting address
  332. # GR31 - EAR0 ^ SCR0
  333. # SCR0 - base of virtual range covered by cached PGE from last ITLB miss (or 0xffffffff)
  334. # DAMR3 - mapped page directory
  335. # DAMR4 - mapped page table as matched by SCR0
  336. #
  337. ###############################################################################
  338. .globl __entry_user_insn_tlb_miss
  339. .type __entry_user_insn_tlb_miss,@function
  340. __entry_user_insn_tlb_miss:
  341. #if 0
  342. sethi.p %hi(0xe1200004),gr30
  343. setlo %lo(0xe1200004),gr30
  344. st gr0,@(gr30,gr0)
  345. sethi.p %hi(0xffc00100),gr30
  346. setlo %lo(0xffc00100),gr30
  347. sth gr30,@(gr30,gr0)
  348. membar
  349. #endif
  350. movsg ccr,gr30 /* save CCR */
  351. movgs gr30,scr2
  352. # see if the cached page table mapping is appropriate
  353. srlicc.p gr31,#26,gr0,icc0
  354. setlos 0x3ffc,gr30
  355. srli.p gr28,#12,gr31 /* use EAR0[25:14] as PTE index */
  356. bne icc0,#0,__itlb_u_PTD_miss
  357. __itlb_u_PTD_mapped:
  358. # access the PTD with EAR0[25:14]
  359. # - DAMLR4 points to the virtual address of the appropriate page table
  360. # - the PTD holds 4096 PTEs
  361. # - the PTD must be accessed uncached
  362. # - the PTE must be marked accessed if it was valid
  363. #
  364. and gr31,gr30,gr31
  365. movsg damlr4,gr30
  366. add gr30,gr31,gr31
  367. ldi @(gr31,#0),gr30 /* fetch the PTE */
  368. andicc gr30,#_PAGE_PRESENT,gr0,icc0
  369. ori.p gr30,#_PAGE_ACCESSED,gr30
  370. beq icc0,#0,__tlb_user_fault /* jump if PTE invalid */
  371. sti.p gr30,@(gr31,#0) /* update the PTE */
  372. andi gr30,#~_PAGE_ACCESSED,gr30
  373. # we're using IAMR1/DAMR1 as an extra TLB entry
  374. # - punt the entry here (if valid) to the real TLB and then replace with the new PTE
  375. movsg dampr1,gr31
  376. andicc gr31,#xAMPRx_V,gr0,icc0
  377. setlos.p 0xfffff000,gr31
  378. beq icc0,#0,__itlb_u_nopunt /* punt not required */
  379. movsg dampr1,gr31
  380. movgs gr31,tppr
  381. movsg damlr1,gr31 /* set TPLR.CXN */
  382. movgs gr31,tplr
  383. tlbpr gr31,gr0,#2,#0 /* save to the TLB */
  384. movsg tpxr,gr31 /* check the TLB write error flag */
  385. andicc.p gr31,#TPXR_E,gr0,icc0
  386. setlos #0xfffff000,gr31
  387. bne icc0,#0,__tlb_user_fault
  388. __itlb_u_nopunt:
  389. # assemble the new TLB entry
  390. and gr28,gr31,gr28
  391. movsg cxnr,gr31
  392. or gr28,gr31,gr28
  393. movgs gr28,iamlr1 /* xAMLR = address | context number */
  394. movgs gr30,iampr1
  395. movgs gr28,damlr1
  396. movgs gr30,dampr1
  397. # return, restoring registers
  398. movsg scr2,gr30
  399. movgs gr30,ccr
  400. rett #0
  401. beq icc0,#3,0 /* prevent icache prefetch */
  402. # the PTE we want wasn't in the PTD we have mapped, so we need to go looking for a more
  403. # appropriate page table and map that instead
  404. # - access the PGD with EAR0[31:26]
  405. # - DAMLR3 points to the virtual address of the page directory
  406. # - the PGD holds 64 PGEs and each PGE/PME points to a set of page tables
  407. __itlb_u_PTD_miss:
  408. srli gr28,#26,gr31 /* calculate PGE offset */
  409. slli gr31,#8,gr31 /* and clear bottom bits */
  410. movsg damlr3,gr30
  411. ld @(gr31,gr30),gr30 /* access the PGE */
  412. andicc.p gr30,#_PAGE_PRESENT,gr0,icc0
  413. andicc gr30,#xAMPRx_SS,gr0,icc1
  414. # map this PTD instead and record coverage address
  415. ori.p gr30,#xAMPRx_L|xAMPRx_SS_16Kb|xAMPRx_S|xAMPRx_C|xAMPRx_V,gr30
  416. beq icc0,#0,__tlb_user_fault /* jump if PGE not present */
  417. slli.p gr31,#18,gr31
  418. bne icc1,#0,__itlb_u_bigpage
  419. movgs gr30,dampr4
  420. movgs gr31,scr0
  421. # we can now resume normal service
  422. setlos 0x3ffc,gr30
  423. srli.p gr28,#12,gr31 /* use EAR0[25:14] as PTE index */
  424. bra __itlb_u_PTD_mapped
  425. __itlb_u_bigpage:
  426. break
  427. nop
  428. .size __entry_user_insn_tlb_miss, .-__entry_user_insn_tlb_miss
  429. ###############################################################################
  430. #
  431. # Userspace data TLB miss handler
  432. # On entry:
  433. # GR28 - faulting address
  434. # GR31 - EAR0 ^ SCR1
  435. # SCR1 - base of virtual range covered by cached PGE from last DTLB miss (or 0xffffffff)
  436. # DAMR3 - mapped page directory
  437. # DAMR5 - mapped page table as matched by SCR1
  438. #
  439. ###############################################################################
  440. .globl __entry_user_data_tlb_miss
  441. .type __entry_user_data_tlb_miss,@function
  442. __entry_user_data_tlb_miss:
  443. #if 0
  444. sethi.p %hi(0xe1200004),gr30
  445. setlo %lo(0xe1200004),gr30
  446. st gr0,@(gr30,gr0)
  447. sethi.p %hi(0xffc00100),gr30
  448. setlo %lo(0xffc00100),gr30
  449. sth gr30,@(gr30,gr0)
  450. membar
  451. #endif
  452. movsg ccr,gr30 /* save CCR */
  453. movgs gr30,scr2
  454. # see if the cached page table mapping is appropriate
  455. srlicc.p gr31,#26,gr0,icc0
  456. setlos 0x3ffc,gr30
  457. srli.p gr28,#12,gr31 /* use EAR0[25:14] as PTE index */
  458. bne icc0,#0,__dtlb_u_PTD_miss
  459. __dtlb_u_PTD_mapped:
  460. # access the PTD with EAR0[25:14]
  461. # - DAMLR5 points to the virtual address of the appropriate page table
  462. # - the PTD holds 4096 PTEs
  463. # - the PTD must be accessed uncached
  464. # - the PTE must be marked accessed if it was valid
  465. #
  466. and gr31,gr30,gr31
  467. movsg damlr5,gr30
  468. __dtlb_u_using_iPTD:
  469. add gr30,gr31,gr31
  470. ldi @(gr31,#0),gr30 /* fetch the PTE */
  471. andicc gr30,#_PAGE_PRESENT,gr0,icc0
  472. ori.p gr30,#_PAGE_ACCESSED,gr30
  473. beq icc0,#0,__tlb_user_fault /* jump if PTE invalid */
  474. sti.p gr30,@(gr31,#0) /* update the PTE */
  475. andi gr30,#~_PAGE_ACCESSED,gr30
  476. # we're using DAMR1 as an extra TLB entry
  477. # - punt the entry here (if valid) to the real TLB and then replace with the new PTE
  478. movsg dampr1,gr31
  479. andicc gr31,#xAMPRx_V,gr0,icc0
  480. setlos.p 0xfffff000,gr31
  481. beq icc0,#0,__dtlb_u_nopunt /* punt not required */
  482. movsg dampr1,gr31
  483. movgs gr31,tppr
  484. movsg damlr1,gr31 /* set TPLR.CXN */
  485. movgs gr31,tplr
  486. tlbpr gr31,gr0,#2,#0 /* save to the TLB */
  487. movsg tpxr,gr31 /* check the TLB write error flag */
  488. andicc.p gr31,#TPXR_E,gr0,icc0
  489. setlos #0xfffff000,gr31
  490. bne icc0,#0,__tlb_user_fault
  491. __dtlb_u_nopunt:
  492. # assemble the new TLB entry
  493. and gr28,gr31,gr28
  494. movsg cxnr,gr31
  495. or gr28,gr31,gr28
  496. movgs gr28,iamlr1 /* xAMLR = address | context number */
  497. movgs gr30,iampr1
  498. movgs gr28,damlr1
  499. movgs gr30,dampr1
  500. # return, restoring registers
  501. movsg scr2,gr30
  502. movgs gr30,ccr
  503. rett #0
  504. beq icc0,#3,0 /* prevent icache prefetch */
  505. # the PTE we want wasn't in the PTD we have mapped, so we need to go looking for a more
  506. # appropriate page table and map that instead
  507. # - first of all, check the insn PGE cache - we may well get a hit there
  508. # - access the PGD with EAR0[31:26]
  509. # - DAMLR3 points to the virtual address of the page directory
  510. # - the PGD holds 64 PGEs and each PGE/PME points to a set of page tables
  511. __dtlb_u_PTD_miss:
  512. movsg scr0,gr31 /* consult the insn-PGE-cache key */
  513. xor gr28,gr31,gr31
  514. srlicc gr31,#26,gr0,icc0
  515. srli gr28,#12,gr31 /* use EAR0[25:14] as PTE index */
  516. bne icc0,#0,__dtlb_u_iPGE_miss
  517. # what we're looking for is covered by the insn-PGE-cache
  518. setlos 0x3ffc,gr30
  519. and gr31,gr30,gr31
  520. movsg damlr4,gr30
  521. bra __dtlb_u_using_iPTD
  522. __dtlb_u_iPGE_miss:
  523. srli gr28,#26,gr31 /* calculate PGE offset */
  524. slli gr31,#8,gr31 /* and clear bottom bits */
  525. movsg damlr3,gr30
  526. ld @(gr31,gr30),gr30 /* access the PGE */
  527. andicc.p gr30,#_PAGE_PRESENT,gr0,icc0
  528. andicc gr30,#xAMPRx_SS,gr0,icc1
  529. # map this PTD instead and record coverage address
  530. ori.p gr30,#xAMPRx_L|xAMPRx_SS_16Kb|xAMPRx_S|xAMPRx_C|xAMPRx_V,gr30
  531. beq icc0,#0,__tlb_user_fault /* jump if PGE not present */
  532. slli.p gr31,#18,gr31
  533. bne icc1,#0,__dtlb_u_bigpage
  534. movgs gr30,dampr5
  535. movgs gr31,scr1
  536. # we can now resume normal service
  537. setlos 0x3ffc,gr30
  538. srli.p gr28,#12,gr31 /* use EAR0[25:14] as PTE index */
  539. bra __dtlb_u_PTD_mapped
  540. __dtlb_u_bigpage:
  541. break
  542. nop
  543. .size __entry_user_data_tlb_miss, .-__entry_user_data_tlb_miss