slb.c 9.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333
  1. /*
  2. * PowerPC64 SLB support.
  3. *
  4. * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM
  5. * Based on earlier code written by:
  6. * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
  7. * Copyright (c) 2001 Dave Engebretsen
  8. * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
  9. *
  10. *
  11. * This program is free software; you can redistribute it and/or
  12. * modify it under the terms of the GNU General Public License
  13. * as published by the Free Software Foundation; either version
  14. * 2 of the License, or (at your option) any later version.
  15. */
  16. #include <asm/pgtable.h>
  17. #include <asm/mmu.h>
  18. #include <asm/mmu_context.h>
  19. #include <asm/paca.h>
  20. #include <asm/cputable.h>
  21. #include <asm/cacheflush.h>
  22. #include <asm/smp.h>
  23. #include <linux/compiler.h>
  24. #include <asm/udbg.h>
  25. #include <asm/code-patching.h>
  26. extern void slb_allocate_realmode(unsigned long ea);
  27. extern void slb_allocate_user(unsigned long ea);
  28. static void slb_allocate(unsigned long ea)
  29. {
  30. /* Currently, we do real mode for all SLBs including user, but
  31. * that will change if we bring back dynamic VSIDs
  32. */
  33. slb_allocate_realmode(ea);
  34. }
  35. #define slb_esid_mask(ssize) \
  36. (((ssize) == MMU_SEGSIZE_256M)? ESID_MASK: ESID_MASK_1T)
  37. static inline unsigned long mk_esid_data(unsigned long ea, int ssize,
  38. unsigned long slot)
  39. {
  40. return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | slot;
  41. }
  42. #define slb_vsid_shift(ssize) \
  43. ((ssize) == MMU_SEGSIZE_256M? SLB_VSID_SHIFT: SLB_VSID_SHIFT_1T)
  44. static inline unsigned long mk_vsid_data(unsigned long ea, int ssize,
  45. unsigned long flags)
  46. {
  47. return (get_kernel_vsid(ea, ssize) << slb_vsid_shift(ssize)) | flags |
  48. ((unsigned long) ssize << SLB_VSID_SSIZE_SHIFT);
  49. }
  50. static inline void slb_shadow_update(unsigned long ea, int ssize,
  51. unsigned long flags,
  52. unsigned long entry)
  53. {
  54. /*
  55. * Clear the ESID first so the entry is not valid while we are
  56. * updating it. No write barriers are needed here, provided
  57. * we only update the current CPU's SLB shadow buffer.
  58. */
  59. get_slb_shadow()->save_area[entry].esid = 0;
  60. get_slb_shadow()->save_area[entry].vsid = mk_vsid_data(ea, ssize, flags);
  61. get_slb_shadow()->save_area[entry].esid = mk_esid_data(ea, ssize, entry);
  62. }
  63. static inline void slb_shadow_clear(unsigned long entry)
  64. {
  65. get_slb_shadow()->save_area[entry].esid = 0;
  66. }
  67. static inline void create_shadowed_slbe(unsigned long ea, int ssize,
  68. unsigned long flags,
  69. unsigned long entry)
  70. {
  71. /*
  72. * Updating the shadow buffer before writing the SLB ensures
  73. * we don't get a stale entry here if we get preempted by PHYP
  74. * between these two statements.
  75. */
  76. slb_shadow_update(ea, ssize, flags, entry);
  77. asm volatile("slbmte %0,%1" :
  78. : "r" (mk_vsid_data(ea, ssize, flags)),
  79. "r" (mk_esid_data(ea, ssize, entry))
  80. : "memory" );
  81. }
  82. static void __slb_flush_and_rebolt(void)
  83. {
  84. /* If you change this make sure you change SLB_NUM_BOLTED
  85. * appropriately too. */
  86. unsigned long linear_llp, vmalloc_llp, lflags, vflags;
  87. unsigned long ksp_esid_data, ksp_vsid_data;
  88. linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
  89. vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp;
  90. lflags = SLB_VSID_KERNEL | linear_llp;
  91. vflags = SLB_VSID_KERNEL | vmalloc_llp;
  92. ksp_esid_data = mk_esid_data(get_paca()->kstack, mmu_kernel_ssize, 2);
  93. if ((ksp_esid_data & ~0xfffffffUL) <= PAGE_OFFSET) {
  94. ksp_esid_data &= ~SLB_ESID_V;
  95. ksp_vsid_data = 0;
  96. slb_shadow_clear(2);
  97. } else {
  98. /* Update stack entry; others don't change */
  99. slb_shadow_update(get_paca()->kstack, mmu_kernel_ssize, lflags, 2);
  100. ksp_vsid_data = get_slb_shadow()->save_area[2].vsid;
  101. }
  102. /* We need to do this all in asm, so we're sure we don't touch
  103. * the stack between the slbia and rebolting it. */
  104. asm volatile("isync\n"
  105. "slbia\n"
  106. /* Slot 1 - first VMALLOC segment */
  107. "slbmte %0,%1\n"
  108. /* Slot 2 - kernel stack */
  109. "slbmte %2,%3\n"
  110. "isync"
  111. :: "r"(mk_vsid_data(VMALLOC_START, mmu_kernel_ssize, vflags)),
  112. "r"(mk_esid_data(VMALLOC_START, mmu_kernel_ssize, 1)),
  113. "r"(ksp_vsid_data),
  114. "r"(ksp_esid_data)
  115. : "memory");
  116. }
  117. void slb_flush_and_rebolt(void)
  118. {
  119. WARN_ON(!irqs_disabled());
  120. /*
  121. * We can't take a PMU exception in the following code, so hard
  122. * disable interrupts.
  123. */
  124. hard_irq_disable();
  125. __slb_flush_and_rebolt();
  126. get_paca()->slb_cache_ptr = 0;
  127. }
  128. void slb_vmalloc_update(void)
  129. {
  130. unsigned long vflags;
  131. vflags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_vmalloc_psize].sllp;
  132. slb_shadow_update(VMALLOC_START, mmu_kernel_ssize, vflags, 1);
  133. slb_flush_and_rebolt();
  134. }
  135. /* Helper function to compare esids. There are four cases to handle.
  136. * 1. The system is not 1T segment size capable. Use the GET_ESID compare.
  137. * 2. The system is 1T capable, both addresses are < 1T, use the GET_ESID compare.
  138. * 3. The system is 1T capable, only one of the two addresses is > 1T. This is not a match.
  139. * 4. The system is 1T capable, both addresses are > 1T, use the GET_ESID_1T macro to compare.
  140. */
  141. static inline int esids_match(unsigned long addr1, unsigned long addr2)
  142. {
  143. int esid_1t_count;
  144. /* System is not 1T segment size capable. */
  145. if (!mmu_has_feature(MMU_FTR_1T_SEGMENT))
  146. return (GET_ESID(addr1) == GET_ESID(addr2));
  147. esid_1t_count = (((addr1 >> SID_SHIFT_1T) != 0) +
  148. ((addr2 >> SID_SHIFT_1T) != 0));
  149. /* both addresses are < 1T */
  150. if (esid_1t_count == 0)
  151. return (GET_ESID(addr1) == GET_ESID(addr2));
  152. /* One address < 1T, the other > 1T. Not a match */
  153. if (esid_1t_count == 1)
  154. return 0;
  155. /* Both addresses are > 1T. */
  156. return (GET_ESID_1T(addr1) == GET_ESID_1T(addr2));
  157. }
  158. /* Flush all user entries from the segment table of the current processor. */
  159. void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
  160. {
  161. unsigned long offset;
  162. unsigned long slbie_data = 0;
  163. unsigned long pc = KSTK_EIP(tsk);
  164. unsigned long stack = KSTK_ESP(tsk);
  165. unsigned long exec_base;
  166. /*
  167. * We need interrupts hard-disabled here, not just soft-disabled,
  168. * so that a PMU interrupt can't occur, which might try to access
  169. * user memory (to get a stack trace) and possible cause an SLB miss
  170. * which would update the slb_cache/slb_cache_ptr fields in the PACA.
  171. */
  172. hard_irq_disable();
  173. offset = get_paca()->slb_cache_ptr;
  174. if (!mmu_has_feature(MMU_FTR_NO_SLBIE_B) &&
  175. offset <= SLB_CACHE_ENTRIES) {
  176. int i;
  177. asm volatile("isync" : : : "memory");
  178. for (i = 0; i < offset; i++) {
  179. slbie_data = (unsigned long)get_paca()->slb_cache[i]
  180. << SID_SHIFT; /* EA */
  181. slbie_data |= user_segment_size(slbie_data)
  182. << SLBIE_SSIZE_SHIFT;
  183. slbie_data |= SLBIE_C; /* C set for user addresses */
  184. asm volatile("slbie %0" : : "r" (slbie_data));
  185. }
  186. asm volatile("isync" : : : "memory");
  187. } else {
  188. __slb_flush_and_rebolt();
  189. }
  190. /* Workaround POWER5 < DD2.1 issue */
  191. if (offset == 1 || offset > SLB_CACHE_ENTRIES)
  192. asm volatile("slbie %0" : : "r" (slbie_data));
  193. get_paca()->slb_cache_ptr = 0;
  194. get_paca()->context = mm->context;
  195. /*
  196. * preload some userspace segments into the SLB.
  197. * Almost all 32 and 64bit PowerPC executables are linked at
  198. * 0x10000000 so it makes sense to preload this segment.
  199. */
  200. exec_base = 0x10000000;
  201. if (is_kernel_addr(pc) || is_kernel_addr(stack) ||
  202. is_kernel_addr(exec_base))
  203. return;
  204. slb_allocate(pc);
  205. if (!esids_match(pc, stack))
  206. slb_allocate(stack);
  207. if (!esids_match(pc, exec_base) &&
  208. !esids_match(stack, exec_base))
  209. slb_allocate(exec_base);
  210. }
  211. static inline void patch_slb_encoding(unsigned int *insn_addr,
  212. unsigned int immed)
  213. {
  214. int insn = (*insn_addr & 0xffff0000) | immed;
  215. patch_instruction(insn_addr, insn);
  216. }
  217. void slb_set_size(u16 size)
  218. {
  219. extern unsigned int *slb_compare_rr_to_size;
  220. if (mmu_slb_size == size)
  221. return;
  222. mmu_slb_size = size;
  223. patch_slb_encoding(slb_compare_rr_to_size, mmu_slb_size);
  224. }
  225. void slb_initialize(void)
  226. {
  227. unsigned long linear_llp, vmalloc_llp, io_llp;
  228. unsigned long lflags, vflags;
  229. static int slb_encoding_inited;
  230. extern unsigned int *slb_miss_kernel_load_linear;
  231. extern unsigned int *slb_miss_kernel_load_io;
  232. extern unsigned int *slb_compare_rr_to_size;
  233. #ifdef CONFIG_SPARSEMEM_VMEMMAP
  234. extern unsigned int *slb_miss_kernel_load_vmemmap;
  235. unsigned long vmemmap_llp;
  236. #endif
  237. /* Prepare our SLB miss handler based on our page size */
  238. linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
  239. io_llp = mmu_psize_defs[mmu_io_psize].sllp;
  240. vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp;
  241. get_paca()->vmalloc_sllp = SLB_VSID_KERNEL | vmalloc_llp;
  242. #ifdef CONFIG_SPARSEMEM_VMEMMAP
  243. vmemmap_llp = mmu_psize_defs[mmu_vmemmap_psize].sllp;
  244. #endif
  245. if (!slb_encoding_inited) {
  246. slb_encoding_inited = 1;
  247. patch_slb_encoding(slb_miss_kernel_load_linear,
  248. SLB_VSID_KERNEL | linear_llp);
  249. patch_slb_encoding(slb_miss_kernel_load_io,
  250. SLB_VSID_KERNEL | io_llp);
  251. patch_slb_encoding(slb_compare_rr_to_size,
  252. mmu_slb_size);
  253. pr_devel("SLB: linear LLP = %04lx\n", linear_llp);
  254. pr_devel("SLB: io LLP = %04lx\n", io_llp);
  255. #ifdef CONFIG_SPARSEMEM_VMEMMAP
  256. patch_slb_encoding(slb_miss_kernel_load_vmemmap,
  257. SLB_VSID_KERNEL | vmemmap_llp);
  258. pr_devel("SLB: vmemmap LLP = %04lx\n", vmemmap_llp);
  259. #endif
  260. }
  261. get_paca()->stab_rr = SLB_NUM_BOLTED;
  262. lflags = SLB_VSID_KERNEL | linear_llp;
  263. vflags = SLB_VSID_KERNEL | vmalloc_llp;
  264. /* Invalidate the entire SLB (even slot 0) & all the ERATS */
  265. asm volatile("isync":::"memory");
  266. asm volatile("slbmte %0,%0"::"r" (0) : "memory");
  267. asm volatile("isync; slbia; isync":::"memory");
  268. create_shadowed_slbe(PAGE_OFFSET, mmu_kernel_ssize, lflags, 0);
  269. create_shadowed_slbe(VMALLOC_START, mmu_kernel_ssize, vflags, 1);
  270. /* For the boot cpu, we're running on the stack in init_thread_union,
  271. * which is in the first segment of the linear mapping, and also
  272. * get_paca()->kstack hasn't been initialized yet.
  273. * For secondary cpus, we need to bolt the kernel stack entry now.
  274. */
  275. slb_shadow_clear(2);
  276. if (raw_smp_processor_id() != boot_cpuid &&
  277. (get_paca()->kstack & slb_esid_mask(mmu_kernel_ssize)) > PAGE_OFFSET)
  278. create_shadowed_slbe(get_paca()->kstack,
  279. mmu_kernel_ssize, lflags, 2);
  280. asm volatile("isync":::"memory");
  281. }