123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305 |
- #ifndef _ASM_IA64_TLB_H
- #define _ASM_IA64_TLB_H
- /*
- * Based on <asm-generic/tlb.h>.
- *
- * Copyright (C) 2002-2003 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- */
- /*
- * Removing a translation from a page table (including TLB-shootdown) is a four-step
- * procedure:
- *
- * (1) Flush (virtual) caches --- ensures virtual memory is coherent with kernel memory
- * (this is a no-op on ia64).
- * (2) Clear the relevant portions of the page-table
- * (3) Flush the TLBs --- ensures that stale content is gone from CPU TLBs
- * (4) Release the pages that were freed up in step (2).
- *
- * Note that the ordering of these steps is crucial to avoid races on MP machines.
- *
- * The Linux kernel defines several platform-specific hooks for TLB-shootdown. When
- * unmapping a portion of the virtual address space, these hooks are called according to
- * the following template:
- *
- * tlb <- tlb_gather_mmu(mm, start, end); // start unmap for address space MM
- * {
- * for each vma that needs a shootdown do {
- * tlb_start_vma(tlb, vma);
- * for each page-table-entry PTE that needs to be removed do {
- * tlb_remove_tlb_entry(tlb, pte, address);
- * if (pte refers to a normal page) {
- * tlb_remove_page(tlb, page);
- * }
- * }
- * tlb_end_vma(tlb, vma);
- * }
- * }
- * tlb_finish_mmu(tlb, start, end); // finish unmap for address space MM
- */
- #include <linux/mm.h>
- #include <linux/pagemap.h>
- #include <linux/swap.h>
- #include <asm/pgalloc.h>
- #include <asm/processor.h>
- #include <asm/tlbflush.h>
- #include <asm/machvec.h>
- /*
- * If we can't allocate a page to make a big batch of page pointers
- * to work on, then just handle a few from the on-stack structure.
- */
- #define IA64_GATHER_BUNDLE 8
- struct mmu_gather {
- struct mm_struct *mm;
- unsigned int nr;
- unsigned int max;
- unsigned char fullmm; /* non-zero means full mm flush */
- unsigned char need_flush; /* really unmapped some PTEs? */
- unsigned long start, end;
- unsigned long start_addr;
- unsigned long end_addr;
- struct page **pages;
- struct page *local[IA64_GATHER_BUNDLE];
- };
- struct ia64_tr_entry {
- u64 ifa;
- u64 itir;
- u64 pte;
- u64 rr;
- }; /*Record for tr entry!*/
- extern int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size);
- extern void ia64_ptr_entry(u64 target_mask, int slot);
- extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
- /*
- region register macros
- */
- #define RR_TO_VE(val) (((val) >> 0) & 0x0000000000000001)
- #define RR_VE(val) (((val) & 0x0000000000000001) << 0)
- #define RR_VE_MASK 0x0000000000000001L
- #define RR_VE_SHIFT 0
- #define RR_TO_PS(val) (((val) >> 2) & 0x000000000000003f)
- #define RR_PS(val) (((val) & 0x000000000000003f) << 2)
- #define RR_PS_MASK 0x00000000000000fcL
- #define RR_PS_SHIFT 2
- #define RR_RID_MASK 0x00000000ffffff00L
- #define RR_TO_RID(val) ((val >> 8) & 0xffffff)
- static inline void
- ia64_tlb_flush_mmu_tlbonly(struct mmu_gather *tlb, unsigned long start, unsigned long end)
- {
- tlb->need_flush = 0;
- if (tlb->fullmm) {
- /*
- * Tearing down the entire address space. This happens both as a result
- * of exit() and execve(). The latter case necessitates the call to
- * flush_tlb_mm() here.
- */
- flush_tlb_mm(tlb->mm);
- } else if (unlikely (end - start >= 1024*1024*1024*1024UL
- || REGION_NUMBER(start) != REGION_NUMBER(end - 1)))
- {
- /*
- * If we flush more than a tera-byte or across regions, we're probably
- * better off just flushing the entire TLB(s). This should be very rare
- * and is not worth optimizing for.
- */
- flush_tlb_all();
- } else {
- /*
- * XXX fix me: flush_tlb_range() should take an mm pointer instead of a
- * vma pointer.
- */
- struct vm_area_struct vma;
- vma.vm_mm = tlb->mm;
- /* flush the address range from the tlb: */
- flush_tlb_range(&vma, start, end);
- /* now flush the virt. page-table area mapping the address range: */
- flush_tlb_range(&vma, ia64_thash(start), ia64_thash(end));
- }
- }
- static inline void
- ia64_tlb_flush_mmu_free(struct mmu_gather *tlb)
- {
- unsigned long i;
- unsigned int nr;
- /* lastly, release the freed pages */
- nr = tlb->nr;
- tlb->nr = 0;
- tlb->start_addr = ~0UL;
- for (i = 0; i < nr; ++i)
- free_page_and_swap_cache(tlb->pages[i]);
- }
- /*
- * Flush the TLB for address range START to END and, if not in fast mode, release the
- * freed pages that where gathered up to this point.
- */
- static inline void
- ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end)
- {
- if (!tlb->need_flush)
- return;
- ia64_tlb_flush_mmu_tlbonly(tlb, start, end);
- ia64_tlb_flush_mmu_free(tlb);
- }
- static inline void __tlb_alloc_page(struct mmu_gather *tlb)
- {
- unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
- if (addr) {
- tlb->pages = (void *)addr;
- tlb->max = PAGE_SIZE / sizeof(void *);
- }
- }
- static inline void
- tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
- {
- tlb->mm = mm;
- tlb->max = ARRAY_SIZE(tlb->local);
- tlb->pages = tlb->local;
- tlb->nr = 0;
- tlb->fullmm = !(start | (end+1));
- tlb->start = start;
- tlb->end = end;
- tlb->start_addr = ~0UL;
- }
- /*
- * Called at the end of the shootdown operation to free up any resources that were
- * collected.
- */
- static inline void
- tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
- {
- /*
- * Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and
- * tlb->end_addr.
- */
- ia64_tlb_flush_mmu(tlb, start, end);
- /* keep the page table cache within bounds */
- check_pgt_cache();
- if (tlb->pages != tlb->local)
- free_pages((unsigned long)tlb->pages, 0);
- }
- /*
- * Logically, this routine frees PAGE. On MP machines, the actual freeing of the page
- * must be delayed until after the TLB has been flushed (see comments at the beginning of
- * this file).
- */
- static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
- {
- if (tlb->nr == tlb->max)
- return true;
- tlb->need_flush = 1;
- if (!tlb->nr && tlb->pages == tlb->local)
- __tlb_alloc_page(tlb);
- tlb->pages[tlb->nr++] = page;
- return false;
- }
- static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
- {
- ia64_tlb_flush_mmu_tlbonly(tlb, tlb->start_addr, tlb->end_addr);
- }
- static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
- {
- ia64_tlb_flush_mmu_free(tlb);
- }
- static inline void tlb_flush_mmu(struct mmu_gather *tlb)
- {
- ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr);
- }
- static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
- {
- if (__tlb_remove_page(tlb, page)) {
- tlb_flush_mmu(tlb);
- __tlb_remove_page(tlb, page);
- }
- }
- static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
- struct page *page, int page_size)
- {
- return __tlb_remove_page(tlb, page);
- }
- static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb,
- struct page *page)
- {
- return __tlb_remove_page(tlb, page);
- }
- static inline void tlb_remove_page_size(struct mmu_gather *tlb,
- struct page *page, int page_size)
- {
- return tlb_remove_page(tlb, page);
- }
- /*
- * Remove TLB entry for PTE mapped at virtual address ADDRESS. This is called for any
- * PTE, not just those pointing to (normal) physical memory.
- */
- static inline void
- __tlb_remove_tlb_entry (struct mmu_gather *tlb, pte_t *ptep, unsigned long address)
- {
- if (tlb->start_addr == ~0UL)
- tlb->start_addr = address;
- tlb->end_addr = address + PAGE_SIZE;
- }
- #define tlb_migrate_finish(mm) platform_tlb_migrate_finish(mm)
- #define tlb_start_vma(tlb, vma) do { } while (0)
- #define tlb_end_vma(tlb, vma) do { } while (0)
- #define tlb_remove_tlb_entry(tlb, ptep, addr) \
- do { \
- tlb->need_flush = 1; \
- __tlb_remove_tlb_entry(tlb, ptep, addr); \
- } while (0)
- #define pte_free_tlb(tlb, ptep, address) \
- do { \
- tlb->need_flush = 1; \
- __pte_free_tlb(tlb, ptep, address); \
- } while (0)
- #define pmd_free_tlb(tlb, ptep, address) \
- do { \
- tlb->need_flush = 1; \
- __pmd_free_tlb(tlb, ptep, address); \
- } while (0)
- #define pud_free_tlb(tlb, pudp, address) \
- do { \
- tlb->need_flush = 1; \
- __pud_free_tlb(tlb, pudp, address); \
- } while (0)
- #endif /* _ASM_IA64_TLB_H */
|