slb_low.S 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284
  1. /*
  2. * Low-level SLB routines
  3. *
  4. * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM
  5. *
  6. * Based on earlier C version:
  7. * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
  8. * Copyright (c) 2001 Dave Engebretsen
  9. * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
  10. *
  11. * This program is free software; you can redistribute it and/or
  12. * modify it under the terms of the GNU General Public License
  13. * as published by the Free Software Foundation; either version
  14. * 2 of the License, or (at your option) any later version.
  15. */
  16. #include <asm/processor.h>
  17. #include <asm/ppc_asm.h>
  18. #include <asm/asm-offsets.h>
  19. #include <asm/cputable.h>
  20. #include <asm/page.h>
  21. #include <asm/mmu.h>
  22. #include <asm/pgtable.h>
  23. #include <asm/firmware.h>
  24. /* void slb_allocate_realmode(unsigned long ea);
  25. *
  26. * Create an SLB entry for the given EA (user or kernel).
  27. * r3 = faulting address, r13 = PACA
  28. * r9, r10, r11 are clobbered by this function
  29. * No other registers are examined or changed.
  30. */
  31. _GLOBAL(slb_allocate_realmode)
  32. /* r3 = faulting address */
  33. srdi r9,r3,60 /* get region */
  34. srdi r10,r3,28 /* get esid */
  35. cmpldi cr7,r9,0xc /* cmp PAGE_OFFSET for later use */
  36. /* r3 = address, r10 = esid, cr7 = <> PAGE_OFFSET */
  37. blt cr7,0f /* user or kernel? */
  38. /* kernel address: proto-VSID = ESID */
  39. /* WARNING - MAGIC: we don't use the VSID 0xfffffffff, but
  40. * this code will generate the protoVSID 0xfffffffff for the
  41. * top segment. That's ok, the scramble below will translate
  42. * it to VSID 0, which is reserved as a bad VSID - one which
  43. * will never have any pages in it. */
  44. /* Check if hitting the linear mapping or some other kernel space
  45. */
  46. bne cr7,1f
  47. /* Linear mapping encoding bits, the "li" instruction below will
  48. * be patched by the kernel at boot
  49. */
  50. _GLOBAL(slb_miss_kernel_load_linear)
  51. li r11,0
  52. BEGIN_FTR_SECTION
  53. b slb_finish_load
  54. END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
  55. b slb_finish_load_1T
  56. 1:
  57. #ifdef CONFIG_SPARSEMEM_VMEMMAP
  58. /* Check virtual memmap region. To be patches at kernel boot */
  59. cmpldi cr0,r9,0xf
  60. bne 1f
  61. _GLOBAL(slb_miss_kernel_load_vmemmap)
  62. li r11,0
  63. b 6f
  64. 1:
  65. #endif /* CONFIG_SPARSEMEM_VMEMMAP */
  66. /* vmalloc mapping gets the encoding from the PACA as the mapping
  67. * can be demoted from 64K -> 4K dynamically on some machines
  68. */
  69. clrldi r11,r10,48
  70. cmpldi r11,(VMALLOC_SIZE >> 28) - 1
  71. bgt 5f
  72. lhz r11,PACAVMALLOCSLLP(r13)
  73. b 6f
  74. 5:
  75. /* IO mapping */
  76. _GLOBAL(slb_miss_kernel_load_io)
  77. li r11,0
  78. 6:
  79. BEGIN_FTR_SECTION
  80. b slb_finish_load
  81. END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
  82. b slb_finish_load_1T
  83. 0: /* user address: proto-VSID = context << 15 | ESID. First check
  84. * if the address is within the boundaries of the user region
  85. */
  86. srdi. r9,r10,USER_ESID_BITS
  87. bne- 8f /* invalid ea bits set */
  88. /* when using slices, we extract the psize off the slice bitmaps
  89. * and then we need to get the sllp encoding off the mmu_psize_defs
  90. * array.
  91. *
  92. * XXX This is a bit inefficient especially for the normal case,
  93. * so we should try to implement a fast path for the standard page
  94. * size using the old sllp value so we avoid the array. We cannot
  95. * really do dynamic patching unfortunately as processes might flip
  96. * between 4k and 64k standard page size
  97. */
  98. #ifdef CONFIG_PPC_MM_SLICES
  99. cmpldi r10,16
  100. /* Get the slice index * 4 in r11 and matching slice size mask in r9 */
  101. ld r9,PACALOWSLICESPSIZE(r13)
  102. sldi r11,r10,2
  103. blt 5f
  104. ld r9,PACAHIGHSLICEPSIZE(r13)
  105. srdi r11,r10,(SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT - 2)
  106. andi. r11,r11,0x3c
  107. 5: /* Extract the psize and multiply to get an array offset */
  108. srd r9,r9,r11
  109. andi. r9,r9,0xf
  110. mulli r9,r9,MMUPSIZEDEFSIZE
  111. /* Now get to the array and obtain the sllp
  112. */
  113. ld r11,PACATOC(r13)
  114. ld r11,mmu_psize_defs@got(r11)
  115. add r11,r11,r9
  116. ld r11,MMUPSIZESLLP(r11)
  117. ori r11,r11,SLB_VSID_USER
  118. #else
  119. /* paca context sllp already contains the SLB_VSID_USER bits */
  120. lhz r11,PACACONTEXTSLLP(r13)
  121. #endif /* CONFIG_PPC_MM_SLICES */
  122. ld r9,PACACONTEXTID(r13)
  123. BEGIN_FTR_SECTION
  124. cmpldi r10,0x1000
  125. END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
  126. rldimi r10,r9,USER_ESID_BITS,0
  127. BEGIN_FTR_SECTION
  128. bge slb_finish_load_1T
  129. END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
  130. b slb_finish_load
  131. 8: /* invalid EA */
  132. li r10,0 /* BAD_VSID */
  133. li r11,SLB_VSID_USER /* flags don't much matter */
  134. b slb_finish_load
  135. #ifdef __DISABLED__
  136. /* void slb_allocate_user(unsigned long ea);
  137. *
  138. * Create an SLB entry for the given EA (user or kernel).
  139. * r3 = faulting address, r13 = PACA
  140. * r9, r10, r11 are clobbered by this function
  141. * No other registers are examined or changed.
  142. *
  143. * It is called with translation enabled in order to be able to walk the
  144. * page tables. This is not currently used.
  145. */
  146. _GLOBAL(slb_allocate_user)
  147. /* r3 = faulting address */
  148. srdi r10,r3,28 /* get esid */
  149. crset 4*cr7+lt /* set "user" flag for later */
  150. /* check if we fit in the range covered by the pagetables*/
  151. srdi. r9,r3,PGTABLE_EADDR_SIZE
  152. crnot 4*cr0+eq,4*cr0+eq
  153. beqlr
  154. /* now we need to get to the page tables in order to get the page
  155. * size encoding from the PMD. In the future, we'll be able to deal
  156. * with 1T segments too by getting the encoding from the PGD instead
  157. */
  158. ld r9,PACAPGDIR(r13)
  159. cmpldi cr0,r9,0
  160. beqlr
  161. rlwinm r11,r10,8,25,28
  162. ldx r9,r9,r11 /* get pgd_t */
  163. cmpldi cr0,r9,0
  164. beqlr
  165. rlwinm r11,r10,3,17,28
  166. ldx r9,r9,r11 /* get pmd_t */
  167. cmpldi cr0,r9,0
  168. beqlr
  169. /* build vsid flags */
  170. andi. r11,r9,SLB_VSID_LLP
  171. ori r11,r11,SLB_VSID_USER
  172. /* get context to calculate proto-VSID */
  173. ld r9,PACACONTEXTID(r13)
  174. rldimi r10,r9,USER_ESID_BITS,0
  175. /* fall through slb_finish_load */
  176. #endif /* __DISABLED__ */
  177. /*
  178. * Finish loading of an SLB entry and return
  179. *
  180. * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET
  181. */
  182. slb_finish_load:
  183. ASM_VSID_SCRAMBLE(r10,r9,256M)
  184. rldimi r11,r10,SLB_VSID_SHIFT,16 /* combine VSID and flags */
  185. /* r3 = EA, r11 = VSID data */
  186. /*
  187. * Find a slot, round robin. Previously we tried to find a
  188. * free slot first but that took too long. Unfortunately we
  189. * dont have any LRU information to help us choose a slot.
  190. */
  191. 7: ld r10,PACASTABRR(r13)
  192. addi r10,r10,1
  193. /* This gets soft patched on boot. */
  194. _GLOBAL(slb_compare_rr_to_size)
  195. cmpldi r10,0
  196. blt+ 4f
  197. li r10,SLB_NUM_BOLTED
  198. 4:
  199. std r10,PACASTABRR(r13)
  200. 3:
  201. rldimi r3,r10,0,36 /* r3= EA[0:35] | entry */
  202. oris r10,r3,SLB_ESID_V@h /* r3 |= SLB_ESID_V */
  203. /* r3 = ESID data, r11 = VSID data */
  204. /*
  205. * No need for an isync before or after this slbmte. The exception
  206. * we enter with and the rfid we exit with are context synchronizing.
  207. */
  208. slbmte r11,r10
  209. /* we're done for kernel addresses */
  210. crclr 4*cr0+eq /* set result to "success" */
  211. bgelr cr7
  212. /* Update the slb cache */
  213. lhz r3,PACASLBCACHEPTR(r13) /* offset = paca->slb_cache_ptr */
  214. cmpldi r3,SLB_CACHE_ENTRIES
  215. bge 1f
  216. /* still room in the slb cache */
  217. sldi r11,r3,1 /* r11 = offset * sizeof(u16) */
  218. rldicl r10,r10,36,28 /* get low 16 bits of the ESID */
  219. add r11,r11,r13 /* r11 = (u16 *)paca + offset */
  220. sth r10,PACASLBCACHE(r11) /* paca->slb_cache[offset] = esid */
  221. addi r3,r3,1 /* offset++ */
  222. b 2f
  223. 1: /* offset >= SLB_CACHE_ENTRIES */
  224. li r3,SLB_CACHE_ENTRIES+1
  225. 2:
  226. sth r3,PACASLBCACHEPTR(r13) /* paca->slb_cache_ptr = offset */
  227. crclr 4*cr0+eq /* set result to "success" */
  228. blr
  229. /*
  230. * Finish loading of a 1T SLB entry (for the kernel linear mapping) and return.
  231. *
  232. * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9
  233. */
  234. slb_finish_load_1T:
  235. srdi r10,r10,40-28 /* get 1T ESID */
  236. ASM_VSID_SCRAMBLE(r10,r9,1T)
  237. rldimi r11,r10,SLB_VSID_SHIFT_1T,16 /* combine VSID and flags */
  238. li r10,MMU_SEGSIZE_1T
  239. rldimi r11,r10,SLB_VSID_SSIZE_SHIFT,0 /* insert segment size */
  240. /* r3 = EA, r11 = VSID data */
  241. clrrdi r3,r3,SID_SHIFT_1T /* clear out non-ESID bits */
  242. b 7b