tlb.c 2.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134
  1. /* arch/sparc64/mm/tlb.c
  2. *
  3. * Copyright (C) 2004 David S. Miller <davem@redhat.com>
  4. */
  5. #include <linux/kernel.h>
  6. #include <linux/init.h>
  7. #include <linux/percpu.h>
  8. #include <linux/mm.h>
  9. #include <linux/swap.h>
  10. #include <linux/preempt.h>
  11. #include <asm/pgtable.h>
  12. #include <asm/pgalloc.h>
  13. #include <asm/tlbflush.h>
  14. #include <asm/cacheflush.h>
  15. #include <asm/mmu_context.h>
  16. #include <asm/tlb.h>
  17. /* Heavily inspired by the ppc64 code. */
  18. static DEFINE_PER_CPU(struct tlb_batch, tlb_batch);
  19. void flush_tlb_pending(void)
  20. {
  21. struct tlb_batch *tb = &get_cpu_var(tlb_batch);
  22. struct mm_struct *mm = tb->mm;
  23. if (!tb->tlb_nr)
  24. goto out;
  25. flush_tsb_user(tb);
  26. if (CTX_VALID(mm->context)) {
  27. if (tb->tlb_nr == 1) {
  28. global_flush_tlb_page(mm, tb->vaddrs[0]);
  29. } else {
  30. #ifdef CONFIG_SMP
  31. smp_flush_tlb_pending(tb->mm, tb->tlb_nr,
  32. &tb->vaddrs[0]);
  33. #else
  34. __flush_tlb_pending(CTX_HWBITS(tb->mm->context),
  35. tb->tlb_nr, &tb->vaddrs[0]);
  36. #endif
  37. }
  38. }
  39. tb->tlb_nr = 0;
  40. out:
  41. put_cpu_var(tlb_batch);
  42. }
  43. void arch_enter_lazy_mmu_mode(void)
  44. {
  45. struct tlb_batch *tb = &__get_cpu_var(tlb_batch);
  46. tb->active = 1;
  47. }
  48. void arch_leave_lazy_mmu_mode(void)
  49. {
  50. struct tlb_batch *tb = &__get_cpu_var(tlb_batch);
  51. if (tb->tlb_nr)
  52. flush_tlb_pending();
  53. tb->active = 0;
  54. }
  55. void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
  56. pte_t *ptep, pte_t orig, int fullmm)
  57. {
  58. struct tlb_batch *tb = &get_cpu_var(tlb_batch);
  59. unsigned long nr;
  60. vaddr &= PAGE_MASK;
  61. if (pte_exec(orig))
  62. vaddr |= 0x1UL;
  63. if (tlb_type != hypervisor &&
  64. pte_dirty(orig)) {
  65. unsigned long paddr, pfn = pte_pfn(orig);
  66. struct address_space *mapping;
  67. struct page *page;
  68. if (!pfn_valid(pfn))
  69. goto no_cache_flush;
  70. page = pfn_to_page(pfn);
  71. if (PageReserved(page))
  72. goto no_cache_flush;
  73. /* A real file page? */
  74. mapping = page_mapping(page);
  75. if (!mapping)
  76. goto no_cache_flush;
  77. paddr = (unsigned long) page_address(page);
  78. if ((paddr ^ vaddr) & (1 << 13))
  79. flush_dcache_page_all(mm, page);
  80. }
  81. no_cache_flush:
  82. if (fullmm) {
  83. put_cpu_var(tlb_batch);
  84. return;
  85. }
  86. nr = tb->tlb_nr;
  87. if (unlikely(nr != 0 && mm != tb->mm)) {
  88. flush_tlb_pending();
  89. nr = 0;
  90. }
  91. if (!tb->active) {
  92. flush_tsb_user_page(mm, vaddr);
  93. global_flush_tlb_page(mm, vaddr);
  94. goto out;
  95. }
  96. if (nr == 0)
  97. tb->mm = mm;
  98. tb->vaddrs[nr] = vaddr;
  99. tb->tlb_nr = ++nr;
  100. if (nr >= TLB_BATCH_NR)
  101. flush_tlb_pending();
  102. out:
  103. put_cpu_var(tlb_batch);
  104. }