123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289 |
- /*
- * arch/arm/include/asm/tlb.h
- *
- * Copyright (C) 2002 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Experimentation shows that on a StrongARM, it appears to be faster
- * to use the "invalidate whole tlb" rather than "invalidate single
- * tlb" for this.
- *
- * This appears true for both the process fork+exit case, as well as
- * the munmap-large-area case.
- */
- #ifndef __ASMARM_TLB_H
- #define __ASMARM_TLB_H
- #include <asm/cacheflush.h>
- #ifndef CONFIG_MMU
- #include <linux/pagemap.h>
- #define tlb_flush(tlb) ((void) tlb)
- #include <asm-generic/tlb.h>
- #else /* !CONFIG_MMU */
- #include <linux/swap.h>
- #include <asm/pgalloc.h>
- #include <asm/tlbflush.h>
- #define MMU_GATHER_BUNDLE 8
- #ifdef CONFIG_HAVE_RCU_TABLE_FREE
- static inline void __tlb_remove_table(void *_table)
- {
- free_page_and_swap_cache((struct page *)_table);
- }
- struct mmu_table_batch {
- struct rcu_head rcu;
- unsigned int nr;
- void *tables[0];
- };
- #define MAX_TABLE_BATCH \
- ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
- extern void tlb_table_flush(struct mmu_gather *tlb);
- extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
- #define tlb_remove_entry(tlb, entry) tlb_remove_table(tlb, entry)
- #else
- #define tlb_remove_entry(tlb, entry) tlb_remove_page(tlb, entry)
- #endif /* CONFIG_HAVE_RCU_TABLE_FREE */
- /*
- * TLB handling. This allows us to remove pages from the page
- * tables, and efficiently handle the TLB issues.
- */
- struct mmu_gather {
- struct mm_struct *mm;
- #ifdef CONFIG_HAVE_RCU_TABLE_FREE
- struct mmu_table_batch *batch;
- unsigned int need_flush;
- #endif
- unsigned int fullmm;
- struct vm_area_struct *vma;
- unsigned long start, end;
- unsigned long range_start;
- unsigned long range_end;
- unsigned int nr;
- unsigned int max;
- struct page **pages;
- struct page *local[MMU_GATHER_BUNDLE];
- };
- DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
- /*
- * This is unnecessarily complex. There's three ways the TLB shootdown
- * code is used:
- * 1. Unmapping a range of vmas. See zap_page_range(), unmap_region().
- * tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called.
- * tlb->vma will be non-NULL.
- * 2. Unmapping all vmas. See exit_mmap().
- * tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called.
- * tlb->vma will be non-NULL. Additionally, page tables will be freed.
- * 3. Unmapping argument pages. See shift_arg_pages().
- * tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called.
- * tlb->vma will be NULL.
- */
- static inline void tlb_flush(struct mmu_gather *tlb)
- {
- if (tlb->fullmm || !tlb->vma)
- flush_tlb_mm(tlb->mm);
- else if (tlb->range_end > 0) {
- flush_tlb_range(tlb->vma, tlb->range_start, tlb->range_end);
- tlb->range_start = TASK_SIZE;
- tlb->range_end = 0;
- }
- }
- static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr)
- {
- if (!tlb->fullmm) {
- if (addr < tlb->range_start)
- tlb->range_start = addr;
- if (addr + PAGE_SIZE > tlb->range_end)
- tlb->range_end = addr + PAGE_SIZE;
- }
- }
- static inline void __tlb_alloc_page(struct mmu_gather *tlb)
- {
- unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
- if (addr) {
- tlb->pages = (void *)addr;
- tlb->max = PAGE_SIZE / sizeof(struct page *);
- }
- }
- static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
- {
- tlb_flush(tlb);
- #ifdef CONFIG_HAVE_RCU_TABLE_FREE
- tlb_table_flush(tlb);
- #endif
- }
- static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
- {
- free_pages_and_swap_cache(tlb->pages, tlb->nr);
- tlb->nr = 0;
- if (tlb->pages == tlb->local)
- __tlb_alloc_page(tlb);
- }
- static inline void tlb_flush_mmu(struct mmu_gather *tlb)
- {
- tlb_flush_mmu_tlbonly(tlb);
- tlb_flush_mmu_free(tlb);
- }
- static inline void
- tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
- {
- tlb->mm = mm;
- tlb->fullmm = !(start | (end+1));
- tlb->start = start;
- tlb->end = end;
- tlb->vma = NULL;
- tlb->max = ARRAY_SIZE(tlb->local);
- tlb->pages = tlb->local;
- tlb->nr = 0;
- __tlb_alloc_page(tlb);
- #ifdef CONFIG_HAVE_RCU_TABLE_FREE
- tlb->batch = NULL;
- #endif
- }
- static inline void
- tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
- {
- tlb_flush_mmu(tlb);
- /* keep the page table cache within bounds */
- check_pgt_cache();
- if (tlb->pages != tlb->local)
- free_pages((unsigned long)tlb->pages, 0);
- }
- /*
- * Memorize the range for the TLB flush.
- */
- static inline void
- tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr)
- {
- tlb_add_flush(tlb, addr);
- }
- /*
- * In the case of tlb vma handling, we can optimise these away in the
- * case where we're doing a full MM flush. When we're doing a munmap,
- * the vmas are adjusted to only cover the region to be torn down.
- */
- static inline void
- tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
- {
- if (!tlb->fullmm) {
- flush_cache_range(vma, vma->vm_start, vma->vm_end);
- tlb->vma = vma;
- tlb->range_start = TASK_SIZE;
- tlb->range_end = 0;
- }
- }
- static inline void
- tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
- {
- if (!tlb->fullmm)
- tlb_flush(tlb);
- }
- static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
- {
- if (tlb->nr == tlb->max)
- return true;
- tlb->pages[tlb->nr++] = page;
- return false;
- }
- static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
- {
- if (__tlb_remove_page(tlb, page)) {
- tlb_flush_mmu(tlb);
- __tlb_remove_page(tlb, page);
- }
- }
- static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
- struct page *page, int page_size)
- {
- return __tlb_remove_page(tlb, page);
- }
- static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb,
- struct page *page)
- {
- return __tlb_remove_page(tlb, page);
- }
- static inline void tlb_remove_page_size(struct mmu_gather *tlb,
- struct page *page, int page_size)
- {
- return tlb_remove_page(tlb, page);
- }
- static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
- unsigned long addr)
- {
- pgtable_page_dtor(pte);
- #ifdef CONFIG_ARM_LPAE
- tlb_add_flush(tlb, addr);
- #else
- /*
- * With the classic ARM MMU, a pte page has two corresponding pmd
- * entries, each covering 1MB.
- */
- addr &= PMD_MASK;
- tlb_add_flush(tlb, addr + SZ_1M - PAGE_SIZE);
- tlb_add_flush(tlb, addr + SZ_1M);
- #endif
- tlb_remove_entry(tlb, pte);
- }
- static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
- unsigned long addr)
- {
- #ifdef CONFIG_ARM_LPAE
- tlb_add_flush(tlb, addr);
- tlb_remove_entry(tlb, virt_to_page(pmdp));
- #endif
- }
- static inline void
- tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr)
- {
- tlb_add_flush(tlb, addr);
- }
- #define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
- #define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
- #define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp)
- #define tlb_migrate_finish(mm) do { } while (0)
- #endif /* CONFIG_MMU */
- #endif
|