mmu_context.h 3.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139
  1. #ifndef _ASM_X86_MMU_CONTEXT_H
  2. #define _ASM_X86_MMU_CONTEXT_H
  3. #include <asm/desc.h>
  4. #include <linux/atomic.h>
  5. #include <asm/pgalloc.h>
  6. #include <asm/tlbflush.h>
  7. #include <asm/paravirt.h>
  8. #ifndef CONFIG_PARAVIRT
  9. #include <asm-generic/mm_hooks.h>
  10. static inline void paravirt_activate_mm(struct mm_struct *prev,
  11. struct mm_struct *next)
  12. {
  13. }
  14. #endif /* !CONFIG_PARAVIRT */
  15. /*
  16. * ldt_structs can be allocated, used, and freed, but they are never
  17. * modified while live.
  18. */
  19. struct ldt_struct {
  20. /*
  21. * Xen requires page-aligned LDTs with special permissions. This is
  22. * needed to prevent us from installing evil descriptors such as
  23. * call gates. On native, we could merge the ldt_struct and LDT
  24. * allocations, but it's not worth trying to optimize.
  25. */
  26. struct desc_struct *entries;
  27. int size;
  28. };
  29. static inline void load_mm_ldt(struct mm_struct *mm)
  30. {
  31. struct ldt_struct *ldt;
  32. /* smp_read_barrier_depends synchronizes with barrier in install_ldt */
  33. ldt = ACCESS_ONCE(mm->context.ldt);
  34. smp_read_barrier_depends();
  35. /*
  36. * Any change to mm->context.ldt is followed by an IPI to all
  37. * CPUs with the mm active. The LDT will not be freed until
  38. * after the IPI is handled by all such CPUs. This means that,
  39. * if the ldt_struct changes before we return, the values we see
  40. * will be safe, and the new values will be loaded before we run
  41. * any user code.
  42. *
  43. * NB: don't try to convert this to use RCU without extreme care.
  44. * We would still need IRQs off, because we don't want to change
  45. * the local LDT after an IPI loaded a newer value than the one
  46. * that we can see.
  47. */
  48. if (unlikely(ldt))
  49. set_ldt(ldt->entries, ldt->size);
  50. else
  51. clear_LDT();
  52. DEBUG_LOCKS_WARN_ON(preemptible());
  53. }
  54. /*
  55. * Used for LDT copy/destruction.
  56. */
  57. int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
  58. void destroy_context(struct mm_struct *mm);
  59. static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
  60. {
  61. #ifdef CONFIG_SMP
  62. if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
  63. percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
  64. #endif
  65. }
  66. static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
  67. struct task_struct *tsk)
  68. {
  69. unsigned cpu = smp_processor_id();
  70. if (likely(prev != next)) {
  71. #ifdef CONFIG_SMP
  72. percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
  73. percpu_write(cpu_tlbstate.active_mm, next);
  74. #endif
  75. cpumask_set_cpu(cpu, mm_cpumask(next));
  76. /* Re-load page tables */
  77. load_cr3(next->pgd);
  78. /* stop flush ipis for the previous mm */
  79. cpumask_clear_cpu(cpu, mm_cpumask(prev));
  80. /*
  81. * load the LDT, if the LDT is different:
  82. */
  83. if (unlikely(prev->context.ldt != next->context.ldt))
  84. load_mm_ldt(next);
  85. }
  86. #ifdef CONFIG_SMP
  87. else {
  88. percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
  89. BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
  90. if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next))) {
  91. /* We were in lazy tlb mode and leave_mm disabled
  92. * tlb flush IPI delivery. We must reload CR3
  93. * to make sure to use no freed page tables.
  94. */
  95. load_cr3(next->pgd);
  96. load_mm_ldt(next);
  97. }
  98. }
  99. #endif
  100. }
  101. #define activate_mm(prev, next) \
  102. do { \
  103. paravirt_activate_mm((prev), (next)); \
  104. switch_mm((prev), (next), NULL); \
  105. } while (0);
  106. #ifdef CONFIG_X86_32
  107. #define deactivate_mm(tsk, mm) \
  108. do { \
  109. lazy_load_gs(0); \
  110. } while (0)
  111. #else
  112. #define deactivate_mm(tsk, mm) \
  113. do { \
  114. load_gs_index(0); \
  115. loadsegment(fs, 0); \
  116. } while (0)
  117. #endif
  118. #endif /* _ASM_X86_MMU_CONTEXT_H */