mmu_context_book3s64.c 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195
  1. /*
  2. * MMU context allocation for 64-bit kernels.
  3. *
  4. * Copyright (C) 2004 Anton Blanchard, IBM Corp. <anton@samba.org>
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. *
  11. */
  12. #include <linux/sched.h>
  13. #include <linux/kernel.h>
  14. #include <linux/errno.h>
  15. #include <linux/string.h>
  16. #include <linux/types.h>
  17. #include <linux/mm.h>
  18. #include <linux/spinlock.h>
  19. #include <linux/idr.h>
  20. #include <linux/export.h>
  21. #include <linux/gfp.h>
  22. #include <linux/slab.h>
  23. #include <asm/mmu_context.h>
  24. #include <asm/pgalloc.h>
  25. #include "icswx.h"
  26. static DEFINE_SPINLOCK(mmu_context_lock);
  27. static DEFINE_IDA(mmu_context_ida);
  28. int __init_new_context(void)
  29. {
  30. int index;
  31. int err;
  32. again:
  33. if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL))
  34. return -ENOMEM;
  35. spin_lock(&mmu_context_lock);
  36. err = ida_get_new_above(&mmu_context_ida, 1, &index);
  37. spin_unlock(&mmu_context_lock);
  38. if (err == -EAGAIN)
  39. goto again;
  40. else if (err)
  41. return err;
  42. if (index > MAX_USER_CONTEXT) {
  43. spin_lock(&mmu_context_lock);
  44. ida_remove(&mmu_context_ida, index);
  45. spin_unlock(&mmu_context_lock);
  46. return -ENOMEM;
  47. }
  48. return index;
  49. }
  50. EXPORT_SYMBOL_GPL(__init_new_context);
  51. static int radix__init_new_context(struct mm_struct *mm, int index)
  52. {
  53. unsigned long rts_field;
  54. /*
  55. * set the process table entry,
  56. */
  57. rts_field = radix__get_tree_size();
  58. process_tb[index].prtb0 = cpu_to_be64(rts_field | __pa(mm->pgd) | RADIX_PGD_INDEX_SIZE);
  59. return 0;
  60. }
  61. int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
  62. {
  63. int index;
  64. index = __init_new_context();
  65. if (index < 0)
  66. return index;
  67. if (radix_enabled()) {
  68. radix__init_new_context(mm, index);
  69. } else {
  70. /* The old code would re-promote on fork, we don't do that
  71. * when using slices as it could cause problem promoting slices
  72. * that have been forced down to 4K
  73. *
  74. * For book3s we have MMU_NO_CONTEXT set to be ~0. Hence check
  75. * explicitly against context.id == 0. This ensures that we
  76. * properly initialize context slice details for newly allocated
  77. * mm's (which will have id == 0) and don't alter context slice
  78. * inherited via fork (which will have id != 0).
  79. *
  80. * We should not be calling init_new_context() on init_mm. Hence a
  81. * check against 0 is ok.
  82. */
  83. if (mm->context.id == 0)
  84. slice_set_user_psize(mm, mmu_virtual_psize);
  85. subpage_prot_init_new_context(mm);
  86. }
  87. mm->context.id = index;
  88. #ifdef CONFIG_PPC_ICSWX
  89. mm->context.cop_lockp = kmalloc(sizeof(spinlock_t), GFP_KERNEL);
  90. if (!mm->context.cop_lockp) {
  91. __destroy_context(index);
  92. subpage_prot_free(mm);
  93. mm->context.id = MMU_NO_CONTEXT;
  94. return -ENOMEM;
  95. }
  96. spin_lock_init(mm->context.cop_lockp);
  97. #endif /* CONFIG_PPC_ICSWX */
  98. #ifdef CONFIG_PPC_64K_PAGES
  99. mm->context.pte_frag = NULL;
  100. #endif
  101. #ifdef CONFIG_SPAPR_TCE_IOMMU
  102. mm_iommu_init(mm);
  103. #endif
  104. return 0;
  105. }
  106. void __destroy_context(int context_id)
  107. {
  108. spin_lock(&mmu_context_lock);
  109. ida_remove(&mmu_context_ida, context_id);
  110. spin_unlock(&mmu_context_lock);
  111. }
  112. EXPORT_SYMBOL_GPL(__destroy_context);
  113. #ifdef CONFIG_PPC_64K_PAGES
  114. static void destroy_pagetable_page(struct mm_struct *mm)
  115. {
  116. int count;
  117. void *pte_frag;
  118. struct page *page;
  119. pte_frag = mm->context.pte_frag;
  120. if (!pte_frag)
  121. return;
  122. page = virt_to_page(pte_frag);
  123. /* drop all the pending references */
  124. count = ((unsigned long)pte_frag & ~PAGE_MASK) >> PTE_FRAG_SIZE_SHIFT;
  125. /* We allow PTE_FRAG_NR fragments from a PTE page */
  126. if (page_ref_sub_and_test(page, PTE_FRAG_NR - count)) {
  127. pgtable_page_dtor(page);
  128. free_hot_cold_page(page, 0);
  129. }
  130. }
  131. #else
  132. static inline void destroy_pagetable_page(struct mm_struct *mm)
  133. {
  134. return;
  135. }
  136. #endif
  137. void destroy_context(struct mm_struct *mm)
  138. {
  139. #ifdef CONFIG_SPAPR_TCE_IOMMU
  140. WARN_ON_ONCE(!list_empty(&mm->context.iommu_group_mem_list));
  141. #endif
  142. #ifdef CONFIG_PPC_ICSWX
  143. drop_cop(mm->context.acop, mm);
  144. kfree(mm->context.cop_lockp);
  145. mm->context.cop_lockp = NULL;
  146. #endif /* CONFIG_PPC_ICSWX */
  147. if (radix_enabled()) {
  148. /*
  149. * Radix doesn't have a valid bit in the process table
  150. * entries. However we know that at least P9 implementation
  151. * will avoid caching an entry with an invalid RTS field,
  152. * and 0 is invalid. So this will do.
  153. */
  154. process_tb[mm->context.id].prtb0 = 0;
  155. } else
  156. subpage_prot_free(mm);
  157. destroy_pagetable_page(mm);
  158. __destroy_context(mm->context.id);
  159. mm->context.id = MMU_NO_CONTEXT;
  160. }
  161. #ifdef CONFIG_PPC_RADIX_MMU
  162. void radix__switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
  163. {
  164. asm volatile("isync": : :"memory");
  165. mtspr(SPRN_PID, next->context.id);
  166. asm volatile("isync \n"
  167. PPC_SLBIA(0x7)
  168. : : :"memory");
  169. }
  170. #endif