mmu_context.h 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132
  1. /*
  2. * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation, version 2.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11. * NON INFRINGEMENT. See the GNU General Public License for
  12. * more details.
  13. */
  14. #ifndef _ASM_TILE_MMU_CONTEXT_H
  15. #define _ASM_TILE_MMU_CONTEXT_H
  16. #include <linux/smp.h>
  17. #include <asm/setup.h>
  18. #include <asm/page.h>
  19. #include <asm/pgalloc.h>
  20. #include <asm/pgtable.h>
  21. #include <asm/tlbflush.h>
  22. #include <asm/homecache.h>
  23. #include <asm-generic/mm_hooks.h>
  24. static inline int
  25. init_new_context(struct task_struct *tsk, struct mm_struct *mm)
  26. {
  27. return 0;
  28. }
  29. /* Note that arch/tile/kernel/head.S also calls hv_install_context() */
  30. static inline void __install_page_table(pgd_t *pgdir, int asid, pgprot_t prot)
  31. {
  32. /* FIXME: DIRECTIO should not always be set. FIXME. */
  33. int rc = hv_install_context(__pa(pgdir), prot, asid, HV_CTX_DIRECTIO);
  34. if (rc < 0)
  35. panic("hv_install_context failed: %d", rc);
  36. }
  37. static inline void install_page_table(pgd_t *pgdir, int asid)
  38. {
  39. pte_t *ptep = virt_to_pte(NULL, (unsigned long)pgdir);
  40. __install_page_table(pgdir, asid, *ptep);
  41. }
  42. /*
  43. * "Lazy" TLB mode is entered when we are switching to a kernel task,
  44. * which borrows the mm of the previous task. The goal of this
  45. * optimization is to avoid having to install a new page table. On
  46. * early x86 machines (where the concept originated) you couldn't do
  47. * anything short of a full page table install for invalidation, so
  48. * handling a remote TLB invalidate required doing a page table
  49. * re-install. Someone clearly decided that it was silly to keep
  50. * doing this while in "lazy" TLB mode, so the optimization involves
  51. * installing the swapper page table instead the first time one
  52. * occurs, and clearing the cpu out of cpu_vm_mask, so the cpu running
  53. * the kernel task doesn't need to take any more interrupts. At that
  54. * point it's then necessary to explicitly reinstall it when context
  55. * switching back to the original mm.
  56. *
  57. * On Tile, we have to do a page-table install whenever DMA is enabled,
  58. * so in that case lazy mode doesn't help anyway. And more generally,
  59. * we have efficient per-page TLB shootdown, and don't expect to spend
  60. * that much time in kernel tasks in general, so just leaving the
  61. * kernel task borrowing the old page table, but handling TLB
  62. * shootdowns, is a reasonable thing to do. And importantly, this
  63. * lets us use the hypervisor's internal APIs for TLB shootdown, which
  64. * means we don't have to worry about having TLB shootdowns blocked
  65. * when Linux is disabling interrupts; see the page migration code for
  66. * an example of where it's important for TLB shootdowns to complete
  67. * even when interrupts are disabled at the Linux level.
  68. */
  69. static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *t)
  70. {
  71. #if CHIP_HAS_TILE_DMA()
  72. /*
  73. * We have to do an "identity" page table switch in order to
  74. * clear any pending DMA interrupts.
  75. */
  76. if (current->thread.tile_dma_state.enabled)
  77. install_page_table(mm->pgd, __get_cpu_var(current_asid));
  78. #endif
  79. }
  80. static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
  81. struct task_struct *tsk)
  82. {
  83. if (likely(prev != next)) {
  84. int cpu = smp_processor_id();
  85. /* Pick new ASID. */
  86. int asid = __get_cpu_var(current_asid) + 1;
  87. if (asid > max_asid) {
  88. asid = min_asid;
  89. local_flush_tlb();
  90. }
  91. __get_cpu_var(current_asid) = asid;
  92. /* Clear cpu from the old mm, and set it in the new one. */
  93. cpumask_clear_cpu(cpu, mm_cpumask(prev));
  94. cpumask_set_cpu(cpu, mm_cpumask(next));
  95. /* Re-load page tables */
  96. install_page_table(next->pgd, asid);
  97. /* See how we should set the red/black cache info */
  98. check_mm_caching(prev, next);
  99. /*
  100. * Since we're changing to a new mm, we have to flush
  101. * the icache in case some physical page now being mapped
  102. * has subsequently been repurposed and has new code.
  103. */
  104. __flush_icache();
  105. }
  106. }
  107. static inline void activate_mm(struct mm_struct *prev_mm,
  108. struct mm_struct *next_mm)
  109. {
  110. switch_mm(prev_mm, next_mm, NULL);
  111. }
  112. #define destroy_context(mm) do { } while (0)
  113. #define deactivate_mm(tsk, mm) do { } while (0)
  114. #endif /* _ASM_TILE_MMU_CONTEXT_H */