tlb.h 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289
  1. /*
  2. * arch/arm/include/asm/tlb.h
  3. *
  4. * Copyright (C) 2002 Russell King
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * Experimentation shows that on a StrongARM, it appears to be faster
  11. * to use the "invalidate whole tlb" rather than "invalidate single
  12. * tlb" for this.
  13. *
  14. * This appears true for both the process fork+exit case, as well as
  15. * the munmap-large-area case.
  16. */
  17. #ifndef __ASMARM_TLB_H
  18. #define __ASMARM_TLB_H
  19. #include <asm/cacheflush.h>
  20. #ifndef CONFIG_MMU
  21. #include <linux/pagemap.h>
  22. #define tlb_flush(tlb) ((void) tlb)
  23. #include <asm-generic/tlb.h>
  24. #else /* !CONFIG_MMU */
  25. #include <linux/swap.h>
  26. #include <asm/pgalloc.h>
  27. #include <asm/tlbflush.h>
  28. #define MMU_GATHER_BUNDLE 8
  29. #ifdef CONFIG_HAVE_RCU_TABLE_FREE
  30. static inline void __tlb_remove_table(void *_table)
  31. {
  32. free_page_and_swap_cache((struct page *)_table);
  33. }
  34. struct mmu_table_batch {
  35. struct rcu_head rcu;
  36. unsigned int nr;
  37. void *tables[0];
  38. };
  39. #define MAX_TABLE_BATCH \
  40. ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
  41. extern void tlb_table_flush(struct mmu_gather *tlb);
  42. extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
  43. #define tlb_remove_entry(tlb, entry) tlb_remove_table(tlb, entry)
  44. #else
  45. #define tlb_remove_entry(tlb, entry) tlb_remove_page(tlb, entry)
  46. #endif /* CONFIG_HAVE_RCU_TABLE_FREE */
  47. /*
  48. * TLB handling. This allows us to remove pages from the page
  49. * tables, and efficiently handle the TLB issues.
  50. */
  51. struct mmu_gather {
  52. struct mm_struct *mm;
  53. #ifdef CONFIG_HAVE_RCU_TABLE_FREE
  54. struct mmu_table_batch *batch;
  55. unsigned int need_flush;
  56. #endif
  57. unsigned int fullmm;
  58. struct vm_area_struct *vma;
  59. unsigned long start, end;
  60. unsigned long range_start;
  61. unsigned long range_end;
  62. unsigned int nr;
  63. unsigned int max;
  64. struct page **pages;
  65. struct page *local[MMU_GATHER_BUNDLE];
  66. };
  67. DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
  68. /*
  69. * This is unnecessarily complex. There's three ways the TLB shootdown
  70. * code is used:
  71. * 1. Unmapping a range of vmas. See zap_page_range(), unmap_region().
  72. * tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called.
  73. * tlb->vma will be non-NULL.
  74. * 2. Unmapping all vmas. See exit_mmap().
  75. * tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called.
  76. * tlb->vma will be non-NULL. Additionally, page tables will be freed.
  77. * 3. Unmapping argument pages. See shift_arg_pages().
  78. * tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called.
  79. * tlb->vma will be NULL.
  80. */
  81. static inline void tlb_flush(struct mmu_gather *tlb)
  82. {
  83. if (tlb->fullmm || !tlb->vma)
  84. flush_tlb_mm(tlb->mm);
  85. else if (tlb->range_end > 0) {
  86. flush_tlb_range(tlb->vma, tlb->range_start, tlb->range_end);
  87. tlb->range_start = TASK_SIZE;
  88. tlb->range_end = 0;
  89. }
  90. }
  91. static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr)
  92. {
  93. if (!tlb->fullmm) {
  94. if (addr < tlb->range_start)
  95. tlb->range_start = addr;
  96. if (addr + PAGE_SIZE > tlb->range_end)
  97. tlb->range_end = addr + PAGE_SIZE;
  98. }
  99. }
  100. static inline void __tlb_alloc_page(struct mmu_gather *tlb)
  101. {
  102. unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
  103. if (addr) {
  104. tlb->pages = (void *)addr;
  105. tlb->max = PAGE_SIZE / sizeof(struct page *);
  106. }
  107. }
  108. static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
  109. {
  110. tlb_flush(tlb);
  111. #ifdef CONFIG_HAVE_RCU_TABLE_FREE
  112. tlb_table_flush(tlb);
  113. #endif
  114. }
  115. static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
  116. {
  117. free_pages_and_swap_cache(tlb->pages, tlb->nr);
  118. tlb->nr = 0;
  119. if (tlb->pages == tlb->local)
  120. __tlb_alloc_page(tlb);
  121. }
  122. static inline void tlb_flush_mmu(struct mmu_gather *tlb)
  123. {
  124. tlb_flush_mmu_tlbonly(tlb);
  125. tlb_flush_mmu_free(tlb);
  126. }
  127. static inline void
  128. tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
  129. {
  130. tlb->mm = mm;
  131. tlb->fullmm = !(start | (end+1));
  132. tlb->start = start;
  133. tlb->end = end;
  134. tlb->vma = NULL;
  135. tlb->max = ARRAY_SIZE(tlb->local);
  136. tlb->pages = tlb->local;
  137. tlb->nr = 0;
  138. __tlb_alloc_page(tlb);
  139. #ifdef CONFIG_HAVE_RCU_TABLE_FREE
  140. tlb->batch = NULL;
  141. #endif
  142. }
  143. static inline void
  144. tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
  145. {
  146. tlb_flush_mmu(tlb);
  147. /* keep the page table cache within bounds */
  148. check_pgt_cache();
  149. if (tlb->pages != tlb->local)
  150. free_pages((unsigned long)tlb->pages, 0);
  151. }
  152. /*
  153. * Memorize the range for the TLB flush.
  154. */
  155. static inline void
  156. tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr)
  157. {
  158. tlb_add_flush(tlb, addr);
  159. }
  160. /*
  161. * In the case of tlb vma handling, we can optimise these away in the
  162. * case where we're doing a full MM flush. When we're doing a munmap,
  163. * the vmas are adjusted to only cover the region to be torn down.
  164. */
  165. static inline void
  166. tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
  167. {
  168. if (!tlb->fullmm) {
  169. flush_cache_range(vma, vma->vm_start, vma->vm_end);
  170. tlb->vma = vma;
  171. tlb->range_start = TASK_SIZE;
  172. tlb->range_end = 0;
  173. }
  174. }
  175. static inline void
  176. tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
  177. {
  178. if (!tlb->fullmm)
  179. tlb_flush(tlb);
  180. }
  181. static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
  182. {
  183. if (tlb->nr == tlb->max)
  184. return true;
  185. tlb->pages[tlb->nr++] = page;
  186. return false;
  187. }
  188. static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
  189. {
  190. if (__tlb_remove_page(tlb, page)) {
  191. tlb_flush_mmu(tlb);
  192. __tlb_remove_page(tlb, page);
  193. }
  194. }
  195. static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
  196. struct page *page, int page_size)
  197. {
  198. return __tlb_remove_page(tlb, page);
  199. }
  200. static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb,
  201. struct page *page)
  202. {
  203. return __tlb_remove_page(tlb, page);
  204. }
  205. static inline void tlb_remove_page_size(struct mmu_gather *tlb,
  206. struct page *page, int page_size)
  207. {
  208. return tlb_remove_page(tlb, page);
  209. }
  210. static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
  211. unsigned long addr)
  212. {
  213. pgtable_page_dtor(pte);
  214. #ifdef CONFIG_ARM_LPAE
  215. tlb_add_flush(tlb, addr);
  216. #else
  217. /*
  218. * With the classic ARM MMU, a pte page has two corresponding pmd
  219. * entries, each covering 1MB.
  220. */
  221. addr &= PMD_MASK;
  222. tlb_add_flush(tlb, addr + SZ_1M - PAGE_SIZE);
  223. tlb_add_flush(tlb, addr + SZ_1M);
  224. #endif
  225. tlb_remove_entry(tlb, pte);
  226. }
  227. static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
  228. unsigned long addr)
  229. {
  230. #ifdef CONFIG_ARM_LPAE
  231. tlb_add_flush(tlb, addr);
  232. tlb_remove_entry(tlb, virt_to_page(pmdp));
  233. #endif
  234. }
  235. static inline void
  236. tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr)
  237. {
  238. tlb_add_flush(tlb, addr);
  239. }
  240. #define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
  241. #define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
  242. #define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp)
  243. #define tlb_migrate_finish(mm) do { } while (0)
  244. #endif /* CONFIG_MMU */
  245. #endif