tsb.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555
  1. /* arch/sparc64/mm/tsb.c
  2. *
  3. * Copyright (C) 2006, 2008 David S. Miller <davem@davemloft.net>
  4. */
  5. #include <linux/kernel.h>
  6. #include <linux/preempt.h>
  7. #include <linux/slab.h>
  8. #include <asm/page.h>
  9. #include <asm/pgtable.h>
  10. #include <asm/mmu_context.h>
  11. #include <asm/tsb.h>
  12. #include <asm/tlb.h>
  13. #include <asm/oplib.h>
  14. extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
  15. static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long hash_shift, unsigned long nentries)
  16. {
  17. vaddr >>= hash_shift;
  18. return vaddr & (nentries - 1);
  19. }
  20. static inline int tag_compare(unsigned long tag, unsigned long vaddr)
  21. {
  22. return (tag == (vaddr >> 22));
  23. }
  24. /* TSB flushes need only occur on the processor initiating the address
  25. * space modification, not on each cpu the address space has run on.
  26. * Only the TLB flush needs that treatment.
  27. */
  28. void flush_tsb_kernel_range(unsigned long start, unsigned long end)
  29. {
  30. unsigned long v;
  31. for (v = start; v < end; v += PAGE_SIZE) {
  32. unsigned long hash = tsb_hash(v, PAGE_SHIFT,
  33. KERNEL_TSB_NENTRIES);
  34. struct tsb *ent = &swapper_tsb[hash];
  35. if (tag_compare(ent->tag, v))
  36. ent->tag = (1UL << TSB_TAG_INVALID_BIT);
  37. }
  38. }
  39. static void __flush_tsb_one_entry(unsigned long tsb, unsigned long v,
  40. unsigned long hash_shift,
  41. unsigned long nentries)
  42. {
  43. unsigned long tag, ent, hash;
  44. v &= ~0x1UL;
  45. hash = tsb_hash(v, hash_shift, nentries);
  46. ent = tsb + (hash * sizeof(struct tsb));
  47. tag = (v >> 22UL);
  48. tsb_flush(ent, tag);
  49. }
  50. static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift,
  51. unsigned long tsb, unsigned long nentries)
  52. {
  53. unsigned long i;
  54. for (i = 0; i < tb->tlb_nr; i++)
  55. __flush_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift, nentries);
  56. }
  57. void flush_tsb_user(struct tlb_batch *tb)
  58. {
  59. struct mm_struct *mm = tb->mm;
  60. unsigned long nentries, base, flags;
  61. spin_lock_irqsave(&mm->context.lock, flags);
  62. base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
  63. nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
  64. if (tlb_type == cheetah_plus || tlb_type == hypervisor)
  65. base = __pa(base);
  66. __flush_tsb_one(tb, PAGE_SHIFT, base, nentries);
  67. #ifdef CONFIG_HUGETLB_PAGE
  68. if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
  69. base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
  70. nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
  71. if (tlb_type == cheetah_plus || tlb_type == hypervisor)
  72. base = __pa(base);
  73. __flush_tsb_one(tb, HPAGE_SHIFT, base, nentries);
  74. }
  75. #endif
  76. spin_unlock_irqrestore(&mm->context.lock, flags);
  77. }
  78. void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr)
  79. {
  80. unsigned long nentries, base, flags;
  81. spin_lock_irqsave(&mm->context.lock, flags);
  82. base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
  83. nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
  84. if (tlb_type == cheetah_plus || tlb_type == hypervisor)
  85. base = __pa(base);
  86. __flush_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries);
  87. #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
  88. if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
  89. base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
  90. nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
  91. if (tlb_type == cheetah_plus || tlb_type == hypervisor)
  92. base = __pa(base);
  93. __flush_tsb_one_entry(base, vaddr, HPAGE_SHIFT, nentries);
  94. }
  95. #endif
  96. spin_unlock_irqrestore(&mm->context.lock, flags);
  97. }
  98. #if defined(CONFIG_SPARC64_PAGE_SIZE_8KB)
  99. #define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K
  100. #define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_8K
  101. #elif defined(CONFIG_SPARC64_PAGE_SIZE_64KB)
  102. #define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_64K
  103. #define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_64K
  104. #else
  105. #error Broken base page size setting...
  106. #endif
  107. #ifdef CONFIG_HUGETLB_PAGE
  108. #if defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
  109. #define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_64K
  110. #define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_64K
  111. #elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
  112. #define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_512K
  113. #define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_512K
  114. #elif defined(CONFIG_HUGETLB_PAGE_SIZE_4MB)
  115. #define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_4MB
  116. #define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_4MB
  117. #else
  118. #error Broken huge page size setting...
  119. #endif
  120. #endif
  121. static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsigned long tsb_bytes)
  122. {
  123. unsigned long tsb_reg, base, tsb_paddr;
  124. unsigned long page_sz, tte;
  125. mm->context.tsb_block[tsb_idx].tsb_nentries =
  126. tsb_bytes / sizeof(struct tsb);
  127. switch (tsb_idx) {
  128. case MM_TSB_BASE:
  129. base = TSBMAP_8K_BASE;
  130. break;
  131. #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
  132. case MM_TSB_HUGE:
  133. base = TSBMAP_4M_BASE;
  134. break;
  135. #endif
  136. default:
  137. BUG();
  138. }
  139. tte = pgprot_val(PAGE_KERNEL_LOCKED);
  140. tsb_paddr = __pa(mm->context.tsb_block[tsb_idx].tsb);
  141. BUG_ON(tsb_paddr & (tsb_bytes - 1UL));
  142. /* Use the smallest page size that can map the whole TSB
  143. * in one TLB entry.
  144. */
  145. switch (tsb_bytes) {
  146. case 8192 << 0:
  147. tsb_reg = 0x0UL;
  148. #ifdef DCACHE_ALIASING_POSSIBLE
  149. base += (tsb_paddr & 8192);
  150. #endif
  151. page_sz = 8192;
  152. break;
  153. case 8192 << 1:
  154. tsb_reg = 0x1UL;
  155. page_sz = 64 * 1024;
  156. break;
  157. case 8192 << 2:
  158. tsb_reg = 0x2UL;
  159. page_sz = 64 * 1024;
  160. break;
  161. case 8192 << 3:
  162. tsb_reg = 0x3UL;
  163. page_sz = 64 * 1024;
  164. break;
  165. case 8192 << 4:
  166. tsb_reg = 0x4UL;
  167. page_sz = 512 * 1024;
  168. break;
  169. case 8192 << 5:
  170. tsb_reg = 0x5UL;
  171. page_sz = 512 * 1024;
  172. break;
  173. case 8192 << 6:
  174. tsb_reg = 0x6UL;
  175. page_sz = 512 * 1024;
  176. break;
  177. case 8192 << 7:
  178. tsb_reg = 0x7UL;
  179. page_sz = 4 * 1024 * 1024;
  180. break;
  181. default:
  182. printk(KERN_ERR "TSB[%s:%d]: Impossible TSB size %lu, killing process.\n",
  183. current->comm, current->pid, tsb_bytes);
  184. do_exit(SIGSEGV);
  185. }
  186. tte |= pte_sz_bits(page_sz);
  187. if (tlb_type == cheetah_plus || tlb_type == hypervisor) {
  188. /* Physical mapping, no locked TLB entry for TSB. */
  189. tsb_reg |= tsb_paddr;
  190. mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg;
  191. mm->context.tsb_block[tsb_idx].tsb_map_vaddr = 0;
  192. mm->context.tsb_block[tsb_idx].tsb_map_pte = 0;
  193. } else {
  194. tsb_reg |= base;
  195. tsb_reg |= (tsb_paddr & (page_sz - 1UL));
  196. tte |= (tsb_paddr & ~(page_sz - 1UL));
  197. mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg;
  198. mm->context.tsb_block[tsb_idx].tsb_map_vaddr = base;
  199. mm->context.tsb_block[tsb_idx].tsb_map_pte = tte;
  200. }
  201. /* Setup the Hypervisor TSB descriptor. */
  202. if (tlb_type == hypervisor) {
  203. struct hv_tsb_descr *hp = &mm->context.tsb_descr[tsb_idx];
  204. switch (tsb_idx) {
  205. case MM_TSB_BASE:
  206. hp->pgsz_idx = HV_PGSZ_IDX_BASE;
  207. break;
  208. #ifdef CONFIG_HUGETLB_PAGE
  209. case MM_TSB_HUGE:
  210. hp->pgsz_idx = HV_PGSZ_IDX_HUGE;
  211. break;
  212. #endif
  213. default:
  214. BUG();
  215. }
  216. hp->assoc = 1;
  217. hp->num_ttes = tsb_bytes / 16;
  218. hp->ctx_idx = 0;
  219. switch (tsb_idx) {
  220. case MM_TSB_BASE:
  221. hp->pgsz_mask = HV_PGSZ_MASK_BASE;
  222. break;
  223. #ifdef CONFIG_HUGETLB_PAGE
  224. case MM_TSB_HUGE:
  225. hp->pgsz_mask = HV_PGSZ_MASK_HUGE;
  226. break;
  227. #endif
  228. default:
  229. BUG();
  230. }
  231. hp->tsb_base = tsb_paddr;
  232. hp->resv = 0;
  233. }
  234. }
  235. struct kmem_cache *pgtable_cache __read_mostly;
  236. static struct kmem_cache *tsb_caches[8] __read_mostly;
  237. static const char *tsb_cache_names[8] = {
  238. "tsb_8KB",
  239. "tsb_16KB",
  240. "tsb_32KB",
  241. "tsb_64KB",
  242. "tsb_128KB",
  243. "tsb_256KB",
  244. "tsb_512KB",
  245. "tsb_1MB",
  246. };
  247. void __init pgtable_cache_init(void)
  248. {
  249. unsigned long i;
  250. pgtable_cache = kmem_cache_create("pgtable_cache",
  251. PAGE_SIZE, PAGE_SIZE,
  252. 0,
  253. _clear_page);
  254. if (!pgtable_cache) {
  255. prom_printf("pgtable_cache_init(): Could not create!\n");
  256. prom_halt();
  257. }
  258. for (i = 0; i < 8; i++) {
  259. unsigned long size = 8192 << i;
  260. const char *name = tsb_cache_names[i];
  261. tsb_caches[i] = kmem_cache_create(name,
  262. size, size,
  263. 0, NULL);
  264. if (!tsb_caches[i]) {
  265. prom_printf("Could not create %s cache\n", name);
  266. prom_halt();
  267. }
  268. }
  269. }
  270. int sysctl_tsb_ratio = -2;
  271. static unsigned long tsb_size_to_rss_limit(unsigned long new_size)
  272. {
  273. unsigned long num_ents = (new_size / sizeof(struct tsb));
  274. if (sysctl_tsb_ratio < 0)
  275. return num_ents - (num_ents >> -sysctl_tsb_ratio);
  276. else
  277. return num_ents + (num_ents >> sysctl_tsb_ratio);
  278. }
  279. /* When the RSS of an address space exceeds tsb_rss_limit for a TSB,
  280. * do_sparc64_fault() invokes this routine to try and grow it.
  281. *
  282. * When we reach the maximum TSB size supported, we stick ~0UL into
  283. * tsb_rss_limit for that TSB so the grow checks in do_sparc64_fault()
  284. * will not trigger any longer.
  285. *
  286. * The TSB can be anywhere from 8K to 1MB in size, in increasing powers
  287. * of two. The TSB must be aligned to it's size, so f.e. a 512K TSB
  288. * must be 512K aligned. It also must be physically contiguous, so we
  289. * cannot use vmalloc().
  290. *
  291. * The idea here is to grow the TSB when the RSS of the process approaches
  292. * the number of entries that the current TSB can hold at once. Currently,
  293. * we trigger when the RSS hits 3/4 of the TSB capacity.
  294. */
  295. void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long rss)
  296. {
  297. unsigned long max_tsb_size = 1 * 1024 * 1024;
  298. unsigned long new_size, old_size, flags;
  299. struct tsb *old_tsb, *new_tsb;
  300. unsigned long new_cache_index, old_cache_index;
  301. unsigned long new_rss_limit;
  302. gfp_t gfp_flags;
  303. if (max_tsb_size > (PAGE_SIZE << MAX_ORDER))
  304. max_tsb_size = (PAGE_SIZE << MAX_ORDER);
  305. new_cache_index = 0;
  306. for (new_size = 8192; new_size < max_tsb_size; new_size <<= 1UL) {
  307. new_rss_limit = tsb_size_to_rss_limit(new_size);
  308. if (new_rss_limit > rss)
  309. break;
  310. new_cache_index++;
  311. }
  312. if (new_size == max_tsb_size)
  313. new_rss_limit = ~0UL;
  314. retry_tsb_alloc:
  315. gfp_flags = GFP_KERNEL;
  316. if (new_size > (PAGE_SIZE * 2))
  317. gfp_flags = __GFP_NOWARN | __GFP_NORETRY;
  318. new_tsb = kmem_cache_alloc_node(tsb_caches[new_cache_index],
  319. gfp_flags, numa_node_id());
  320. if (unlikely(!new_tsb)) {
  321. /* Not being able to fork due to a high-order TSB
  322. * allocation failure is very bad behavior. Just back
  323. * down to a 0-order allocation and force no TSB
  324. * growing for this address space.
  325. */
  326. if (mm->context.tsb_block[tsb_index].tsb == NULL &&
  327. new_cache_index > 0) {
  328. new_cache_index = 0;
  329. new_size = 8192;
  330. new_rss_limit = ~0UL;
  331. goto retry_tsb_alloc;
  332. }
  333. /* If we failed on a TSB grow, we are under serious
  334. * memory pressure so don't try to grow any more.
  335. */
  336. if (mm->context.tsb_block[tsb_index].tsb != NULL)
  337. mm->context.tsb_block[tsb_index].tsb_rss_limit = ~0UL;
  338. return;
  339. }
  340. /* Mark all tags as invalid. */
  341. tsb_init(new_tsb, new_size);
  342. /* Ok, we are about to commit the changes. If we are
  343. * growing an existing TSB the locking is very tricky,
  344. * so WATCH OUT!
  345. *
  346. * We have to hold mm->context.lock while committing to the
  347. * new TSB, this synchronizes us with processors in
  348. * flush_tsb_user() and switch_mm() for this address space.
  349. *
  350. * But even with that lock held, processors run asynchronously
  351. * accessing the old TSB via TLB miss handling. This is OK
  352. * because those actions are just propagating state from the
  353. * Linux page tables into the TSB, page table mappings are not
  354. * being changed. If a real fault occurs, the processor will
  355. * synchronize with us when it hits flush_tsb_user(), this is
  356. * also true for the case where vmscan is modifying the page
  357. * tables. The only thing we need to be careful with is to
  358. * skip any locked TSB entries during copy_tsb().
  359. *
  360. * When we finish committing to the new TSB, we have to drop
  361. * the lock and ask all other cpus running this address space
  362. * to run tsb_context_switch() to see the new TSB table.
  363. */
  364. spin_lock_irqsave(&mm->context.lock, flags);
  365. old_tsb = mm->context.tsb_block[tsb_index].tsb;
  366. old_cache_index =
  367. (mm->context.tsb_block[tsb_index].tsb_reg_val & 0x7UL);
  368. old_size = (mm->context.tsb_block[tsb_index].tsb_nentries *
  369. sizeof(struct tsb));
  370. /* Handle multiple threads trying to grow the TSB at the same time.
  371. * One will get in here first, and bump the size and the RSS limit.
  372. * The others will get in here next and hit this check.
  373. */
  374. if (unlikely(old_tsb &&
  375. (rss < mm->context.tsb_block[tsb_index].tsb_rss_limit))) {
  376. spin_unlock_irqrestore(&mm->context.lock, flags);
  377. kmem_cache_free(tsb_caches[new_cache_index], new_tsb);
  378. return;
  379. }
  380. mm->context.tsb_block[tsb_index].tsb_rss_limit = new_rss_limit;
  381. if (old_tsb) {
  382. extern void copy_tsb(unsigned long old_tsb_base,
  383. unsigned long old_tsb_size,
  384. unsigned long new_tsb_base,
  385. unsigned long new_tsb_size);
  386. unsigned long old_tsb_base = (unsigned long) old_tsb;
  387. unsigned long new_tsb_base = (unsigned long) new_tsb;
  388. if (tlb_type == cheetah_plus || tlb_type == hypervisor) {
  389. old_tsb_base = __pa(old_tsb_base);
  390. new_tsb_base = __pa(new_tsb_base);
  391. }
  392. copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size);
  393. }
  394. mm->context.tsb_block[tsb_index].tsb = new_tsb;
  395. setup_tsb_params(mm, tsb_index, new_size);
  396. spin_unlock_irqrestore(&mm->context.lock, flags);
  397. /* If old_tsb is NULL, we're being invoked for the first time
  398. * from init_new_context().
  399. */
  400. if (old_tsb) {
  401. /* Reload it on the local cpu. */
  402. tsb_context_switch(mm);
  403. /* Now force other processors to do the same. */
  404. preempt_disable();
  405. smp_tsb_sync(mm);
  406. preempt_enable();
  407. /* Now it is safe to free the old tsb. */
  408. kmem_cache_free(tsb_caches[old_cache_index], old_tsb);
  409. }
  410. }
  411. int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
  412. {
  413. #ifdef CONFIG_HUGETLB_PAGE
  414. unsigned long huge_pte_count;
  415. #endif
  416. unsigned int i;
  417. spin_lock_init(&mm->context.lock);
  418. mm->context.sparc64_ctx_val = 0UL;
  419. #ifdef CONFIG_HUGETLB_PAGE
  420. /* We reset it to zero because the fork() page copying
  421. * will re-increment the counters as the parent PTEs are
  422. * copied into the child address space.
  423. */
  424. huge_pte_count = mm->context.huge_pte_count;
  425. mm->context.huge_pte_count = 0;
  426. #endif
  427. /* copy_mm() copies over the parent's mm_struct before calling
  428. * us, so we need to zero out the TSB pointer or else tsb_grow()
  429. * will be confused and think there is an older TSB to free up.
  430. */
  431. for (i = 0; i < MM_NUM_TSBS; i++)
  432. mm->context.tsb_block[i].tsb = NULL;
  433. /* If this is fork, inherit the parent's TSB size. We would
  434. * grow it to that size on the first page fault anyways.
  435. */
  436. tsb_grow(mm, MM_TSB_BASE, get_mm_rss(mm));
  437. #ifdef CONFIG_HUGETLB_PAGE
  438. if (unlikely(huge_pte_count))
  439. tsb_grow(mm, MM_TSB_HUGE, huge_pte_count);
  440. #endif
  441. if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb))
  442. return -ENOMEM;
  443. return 0;
  444. }
  445. static void tsb_destroy_one(struct tsb_config *tp)
  446. {
  447. unsigned long cache_index;
  448. if (!tp->tsb)
  449. return;
  450. cache_index = tp->tsb_reg_val & 0x7UL;
  451. kmem_cache_free(tsb_caches[cache_index], tp->tsb);
  452. tp->tsb = NULL;
  453. tp->tsb_reg_val = 0UL;
  454. }
  455. void destroy_context(struct mm_struct *mm)
  456. {
  457. unsigned long flags, i;
  458. for (i = 0; i < MM_NUM_TSBS; i++)
  459. tsb_destroy_one(&mm->context.tsb_block[i]);
  460. spin_lock_irqsave(&ctx_alloc_lock, flags);
  461. if (CTX_VALID(mm->context)) {
  462. unsigned long nr = CTX_NRBITS(mm->context);
  463. mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63));
  464. }
  465. spin_unlock_irqrestore(&ctx_alloc_lock, flags);
  466. }