cacheflush.h 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167
  1. #ifndef _ASM_X86_CACHEFLUSH_H
  2. #define _ASM_X86_CACHEFLUSH_H
  3. /* Caches aren't brain-dead on the intel. */
  4. #include <asm-generic/cacheflush.h>
  5. #include <asm/special_insns.h>
  6. #ifdef CONFIG_X86_PAT
  7. /*
  8. * X86 PAT uses page flags WC and Uncached together to keep track of
  9. * memory type of pages that have backing page struct. X86 PAT supports 3
  10. * different memory types, _PAGE_CACHE_WB, _PAGE_CACHE_WC and
  11. * _PAGE_CACHE_UC_MINUS and fourth state where page's memory type has not
  12. * been changed from its default (value of -1 used to denote this).
  13. * Note we do not support _PAGE_CACHE_UC here.
  14. */
  15. #define _PGMT_DEFAULT 0
  16. #define _PGMT_WC (1UL << PG_arch_1)
  17. #define _PGMT_UC_MINUS (1UL << PG_uncached)
  18. #define _PGMT_WB (1UL << PG_uncached | 1UL << PG_arch_1)
  19. #define _PGMT_MASK (1UL << PG_uncached | 1UL << PG_arch_1)
  20. #define _PGMT_CLEAR_MASK (~_PGMT_MASK)
  21. static inline unsigned long get_page_memtype(struct page *pg)
  22. {
  23. unsigned long pg_flags = pg->flags & _PGMT_MASK;
  24. if (pg_flags == _PGMT_DEFAULT)
  25. return -1;
  26. else if (pg_flags == _PGMT_WC)
  27. return _PAGE_CACHE_WC;
  28. else if (pg_flags == _PGMT_UC_MINUS)
  29. return _PAGE_CACHE_UC_MINUS;
  30. else
  31. return _PAGE_CACHE_WB;
  32. }
  33. static inline void set_page_memtype(struct page *pg, unsigned long memtype)
  34. {
  35. unsigned long memtype_flags = _PGMT_DEFAULT;
  36. unsigned long old_flags;
  37. unsigned long new_flags;
  38. switch (memtype) {
  39. case _PAGE_CACHE_WC:
  40. memtype_flags = _PGMT_WC;
  41. break;
  42. case _PAGE_CACHE_UC_MINUS:
  43. memtype_flags = _PGMT_UC_MINUS;
  44. break;
  45. case _PAGE_CACHE_WB:
  46. memtype_flags = _PGMT_WB;
  47. break;
  48. }
  49. do {
  50. old_flags = pg->flags;
  51. new_flags = (old_flags & _PGMT_CLEAR_MASK) | memtype_flags;
  52. } while (cmpxchg(&pg->flags, old_flags, new_flags) != old_flags);
  53. }
  54. #else
  55. static inline unsigned long get_page_memtype(struct page *pg) { return -1; }
  56. static inline void set_page_memtype(struct page *pg, unsigned long memtype) { }
  57. #endif
  58. /*
  59. * The set_memory_* API can be used to change various attributes of a virtual
  60. * address range. The attributes include:
  61. * Cachability : UnCached, WriteCombining, WriteBack
  62. * Executability : eXeutable, NoteXecutable
  63. * Read/Write : ReadOnly, ReadWrite
  64. * Presence : NotPresent
  65. *
  66. * Within a category, the attributes are mutually exclusive.
  67. *
  68. * The implementation of this API will take care of various aspects that
  69. * are associated with changing such attributes, such as:
  70. * - Flushing TLBs
  71. * - Flushing CPU caches
  72. * - Making sure aliases of the memory behind the mapping don't violate
  73. * coherency rules as defined by the CPU in the system.
  74. *
  75. * What this API does not do:
  76. * - Provide exclusion between various callers - including callers that
  77. * operation on other mappings of the same physical page
  78. * - Restore default attributes when a page is freed
  79. * - Guarantee that mappings other than the requested one are
  80. * in any state, other than that these do not violate rules for
  81. * the CPU you have. Do not depend on any effects on other mappings,
  82. * CPUs other than the one you have may have more relaxed rules.
  83. * The caller is required to take care of these.
  84. */
  85. int _set_memory_uc(unsigned long addr, int numpages);
  86. int _set_memory_wc(unsigned long addr, int numpages);
  87. int _set_memory_wb(unsigned long addr, int numpages);
  88. int set_memory_uc(unsigned long addr, int numpages);
  89. int set_memory_wc(unsigned long addr, int numpages);
  90. int set_memory_wb(unsigned long addr, int numpages);
  91. int set_memory_x(unsigned long addr, int numpages);
  92. int set_memory_nx(unsigned long addr, int numpages);
  93. int set_memory_ro(unsigned long addr, int numpages);
  94. int set_memory_rw(unsigned long addr, int numpages);
  95. int set_memory_np(unsigned long addr, int numpages);
  96. int set_memory_4k(unsigned long addr, int numpages);
  97. int set_memory_array_uc(unsigned long *addr, int addrinarray);
  98. int set_memory_array_wc(unsigned long *addr, int addrinarray);
  99. int set_memory_array_wb(unsigned long *addr, int addrinarray);
  100. int set_pages_array_uc(struct page **pages, int addrinarray);
  101. int set_pages_array_wc(struct page **pages, int addrinarray);
  102. int set_pages_array_wb(struct page **pages, int addrinarray);
  103. /*
  104. * For legacy compatibility with the old APIs, a few functions
  105. * are provided that work on a "struct page".
  106. * These functions operate ONLY on the 1:1 kernel mapping of the
  107. * memory that the struct page represents, and internally just
  108. * call the set_memory_* function. See the description of the
  109. * set_memory_* function for more details on conventions.
  110. *
  111. * These APIs should be considered *deprecated* and are likely going to
  112. * be removed in the future.
  113. * The reason for this is the implicit operation on the 1:1 mapping only,
  114. * making this not a generally useful API.
  115. *
  116. * Specifically, many users of the old APIs had a virtual address,
  117. * called virt_to_page() or vmalloc_to_page() on that address to
  118. * get a struct page* that the old API required.
  119. * To convert these cases, use set_memory_*() on the original
  120. * virtual address, do not use these functions.
  121. */
  122. int set_pages_uc(struct page *page, int numpages);
  123. int set_pages_wb(struct page *page, int numpages);
  124. int set_pages_x(struct page *page, int numpages);
  125. int set_pages_nx(struct page *page, int numpages);
  126. int set_pages_ro(struct page *page, int numpages);
  127. int set_pages_rw(struct page *page, int numpages);
  128. void clflush_cache_range(void *addr, unsigned int size);
  129. #ifdef CONFIG_DEBUG_RODATA
  130. void mark_rodata_ro(void);
  131. extern const int rodata_test_data;
  132. extern int kernel_set_to_readonly;
  133. void set_kernel_text_rw(void);
  134. void set_kernel_text_ro(void);
  135. #else
  136. static inline void set_kernel_text_rw(void) { }
  137. static inline void set_kernel_text_ro(void) { }
  138. #endif
  139. #ifdef CONFIG_DEBUG_RODATA_TEST
  140. int rodata_test(void);
  141. #else
  142. static inline int rodata_test(void)
  143. {
  144. return 0;
  145. }
  146. #endif
  147. #endif /* _ASM_X86_CACHEFLUSH_H */