page_ref.h 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183
  1. #ifndef _LINUX_PAGE_REF_H
  2. #define _LINUX_PAGE_REF_H
  3. #include <linux/atomic.h>
  4. #include <linux/mm_types.h>
  5. #include <linux/page-flags.h>
  6. #include <linux/tracepoint-defs.h>
  7. extern struct tracepoint __tracepoint_page_ref_set;
  8. extern struct tracepoint __tracepoint_page_ref_mod;
  9. extern struct tracepoint __tracepoint_page_ref_mod_and_test;
  10. extern struct tracepoint __tracepoint_page_ref_mod_and_return;
  11. extern struct tracepoint __tracepoint_page_ref_mod_unless;
  12. extern struct tracepoint __tracepoint_page_ref_freeze;
  13. extern struct tracepoint __tracepoint_page_ref_unfreeze;
  14. #ifdef CONFIG_DEBUG_PAGE_REF
  15. /*
  16. * Ideally we would want to use the trace_<tracepoint>_enabled() helper
  17. * functions. But due to include header file issues, that is not
  18. * feasible. Instead we have to open code the static key functions.
  19. *
  20. * See trace_##name##_enabled(void) in include/linux/tracepoint.h
  21. */
  22. #define page_ref_tracepoint_active(t) static_key_false(&(t).key)
  23. extern void __page_ref_set(struct page *page, int v);
  24. extern void __page_ref_mod(struct page *page, int v);
  25. extern void __page_ref_mod_and_test(struct page *page, int v, int ret);
  26. extern void __page_ref_mod_and_return(struct page *page, int v, int ret);
  27. extern void __page_ref_mod_unless(struct page *page, int v, int u);
  28. extern void __page_ref_freeze(struct page *page, int v, int ret);
  29. extern void __page_ref_unfreeze(struct page *page, int v);
  30. #else
  31. #define page_ref_tracepoint_active(t) false
  32. static inline void __page_ref_set(struct page *page, int v)
  33. {
  34. }
  35. static inline void __page_ref_mod(struct page *page, int v)
  36. {
  37. }
  38. static inline void __page_ref_mod_and_test(struct page *page, int v, int ret)
  39. {
  40. }
  41. static inline void __page_ref_mod_and_return(struct page *page, int v, int ret)
  42. {
  43. }
  44. static inline void __page_ref_mod_unless(struct page *page, int v, int u)
  45. {
  46. }
  47. static inline void __page_ref_freeze(struct page *page, int v, int ret)
  48. {
  49. }
  50. static inline void __page_ref_unfreeze(struct page *page, int v)
  51. {
  52. }
  53. #endif
  54. static inline int page_ref_count(struct page *page)
  55. {
  56. return atomic_read(&page->_refcount);
  57. }
  58. static inline int page_count(struct page *page)
  59. {
  60. return atomic_read(&compound_head(page)->_refcount);
  61. }
  62. static inline void set_page_count(struct page *page, int v)
  63. {
  64. atomic_set(&page->_refcount, v);
  65. if (page_ref_tracepoint_active(__tracepoint_page_ref_set))
  66. __page_ref_set(page, v);
  67. }
  68. /*
  69. * Setup the page count before being freed into the page allocator for
  70. * the first time (boot or memory hotplug)
  71. */
  72. static inline void init_page_count(struct page *page)
  73. {
  74. set_page_count(page, 1);
  75. }
  76. static inline void page_ref_add(struct page *page, int nr)
  77. {
  78. atomic_add(nr, &page->_refcount);
  79. if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
  80. __page_ref_mod(page, nr);
  81. }
  82. static inline void page_ref_sub(struct page *page, int nr)
  83. {
  84. atomic_sub(nr, &page->_refcount);
  85. if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
  86. __page_ref_mod(page, -nr);
  87. }
  88. static inline void page_ref_inc(struct page *page)
  89. {
  90. atomic_inc(&page->_refcount);
  91. if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
  92. __page_ref_mod(page, 1);
  93. }
  94. static inline void page_ref_dec(struct page *page)
  95. {
  96. atomic_dec(&page->_refcount);
  97. if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
  98. __page_ref_mod(page, -1);
  99. }
  100. static inline int page_ref_sub_and_test(struct page *page, int nr)
  101. {
  102. int ret = atomic_sub_and_test(nr, &page->_refcount);
  103. if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_test))
  104. __page_ref_mod_and_test(page, -nr, ret);
  105. return ret;
  106. }
  107. static inline int page_ref_inc_return(struct page *page)
  108. {
  109. int ret = atomic_inc_return(&page->_refcount);
  110. if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_return))
  111. __page_ref_mod_and_return(page, 1, ret);
  112. return ret;
  113. }
  114. static inline int page_ref_dec_and_test(struct page *page)
  115. {
  116. int ret = atomic_dec_and_test(&page->_refcount);
  117. if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_test))
  118. __page_ref_mod_and_test(page, -1, ret);
  119. return ret;
  120. }
  121. static inline int page_ref_dec_return(struct page *page)
  122. {
  123. int ret = atomic_dec_return(&page->_refcount);
  124. if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_return))
  125. __page_ref_mod_and_return(page, -1, ret);
  126. return ret;
  127. }
  128. static inline int page_ref_add_unless(struct page *page, int nr, int u)
  129. {
  130. int ret = atomic_add_unless(&page->_refcount, nr, u);
  131. if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_unless))
  132. __page_ref_mod_unless(page, nr, ret);
  133. return ret;
  134. }
  135. static inline int page_ref_freeze(struct page *page, int count)
  136. {
  137. int ret = likely(atomic_cmpxchg(&page->_refcount, count, 0) == count);
  138. if (page_ref_tracepoint_active(__tracepoint_page_ref_freeze))
  139. __page_ref_freeze(page, count, ret);
  140. return ret;
  141. }
  142. static inline void page_ref_unfreeze(struct page *page, int count)
  143. {
  144. VM_BUG_ON_PAGE(page_count(page) != 0, page);
  145. VM_BUG_ON(count == 0);
  146. atomic_set(&page->_refcount, count);
  147. if (page_ref_tracepoint_active(__tracepoint_page_ref_unfreeze))
  148. __page_ref_unfreeze(page, count);
  149. }
  150. #endif