mmu_context.h 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142
  1. /*
  2. * include/asm-xtensa/mmu_context.h
  3. *
  4. * Switch an MMU context.
  5. *
  6. * This file is subject to the terms and conditions of the GNU General Public
  7. * License. See the file "COPYING" in the main directory of this archive
  8. * for more details.
  9. *
  10. * Copyright (C) 2001 - 2005 Tensilica Inc.
  11. */
  12. #ifndef _XTENSA_MMU_CONTEXT_H
  13. #define _XTENSA_MMU_CONTEXT_H
  14. #ifndef CONFIG_MMU
  15. #include <asm/nommu_context.h>
  16. #else
  17. #include <linux/stringify.h>
  18. #include <linux/sched.h>
  19. #include <variant/core.h>
  20. #include <asm/pgtable.h>
  21. #include <asm/cacheflush.h>
  22. #include <asm/tlbflush.h>
  23. #include <asm-generic/mm_hooks.h>
  24. #if (XCHAL_HAVE_TLBS != 1)
  25. # error "Linux must have an MMU!"
  26. #endif
  27. extern unsigned long asid_cache;
  28. /*
  29. * NO_CONTEXT is the invalid ASID value that we don't ever assign to
  30. * any user or kernel context.
  31. *
  32. * 0 invalid
  33. * 1 kernel
  34. * 2 reserved
  35. * 3 reserved
  36. * 4...255 available
  37. */
  38. #define NO_CONTEXT 0
  39. #define ASID_USER_FIRST 4
  40. #define ASID_MASK ((1 << XCHAL_MMU_ASID_BITS) - 1)
  41. #define ASID_INSERT(x) (0x03020001 | (((x) & ASID_MASK) << 8))
  42. static inline void set_rasid_register (unsigned long val)
  43. {
  44. __asm__ __volatile__ (" wsr %0, "__stringify(RASID)"\n\t"
  45. " isync\n" : : "a" (val));
  46. }
  47. static inline unsigned long get_rasid_register (void)
  48. {
  49. unsigned long tmp;
  50. __asm__ __volatile__ (" rsr %0,"__stringify(RASID)"\n\t" : "=a" (tmp));
  51. return tmp;
  52. }
  53. static inline void
  54. __get_new_mmu_context(struct mm_struct *mm)
  55. {
  56. extern void flush_tlb_all(void);
  57. if (! (++asid_cache & ASID_MASK) ) {
  58. flush_tlb_all(); /* start new asid cycle */
  59. asid_cache += ASID_USER_FIRST;
  60. }
  61. mm->context = asid_cache;
  62. }
  63. static inline void
  64. __load_mmu_context(struct mm_struct *mm)
  65. {
  66. set_rasid_register(ASID_INSERT(mm->context));
  67. invalidate_page_directory();
  68. }
  69. /*
  70. * Initialize the context related info for a new mm_struct
  71. * instance.
  72. */
  73. static inline int
  74. init_new_context(struct task_struct *tsk, struct mm_struct *mm)
  75. {
  76. mm->context = NO_CONTEXT;
  77. return 0;
  78. }
  79. /*
  80. * After we have set current->mm to a new value, this activates
  81. * the context for the new mm so we see the new mappings.
  82. */
  83. static inline void
  84. activate_mm(struct mm_struct *prev, struct mm_struct *next)
  85. {
  86. /* Unconditionally get a new ASID. */
  87. __get_new_mmu_context(next);
  88. __load_mmu_context(next);
  89. }
  90. static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
  91. struct task_struct *tsk)
  92. {
  93. unsigned long asid = asid_cache;
  94. /* Check if our ASID is of an older version and thus invalid */
  95. if (next->context == NO_CONTEXT || ((next->context^asid) & ~ASID_MASK))
  96. __get_new_mmu_context(next);
  97. __load_mmu_context(next);
  98. }
  99. #define deactivate_mm(tsk, mm) do { } while(0)
  100. /*
  101. * Destroy context related info for an mm_struct that is about
  102. * to be put to rest.
  103. */
  104. static inline void destroy_context(struct mm_struct *mm)
  105. {
  106. invalidate_page_directory();
  107. }
  108. static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
  109. {
  110. /* Nothing to do. */
  111. }
  112. #endif /* CONFIG_MMU */
  113. #endif /* _XTENSA_MMU_CONTEXT_H */