vmstat.h 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268
  1. #ifndef _LINUX_VMSTAT_H
  2. #define _LINUX_VMSTAT_H
  3. #include <linux/types.h>
  4. #include <linux/percpu.h>
  5. #include <linux/mm.h>
  6. #include <linux/mmzone.h>
  7. #include <linux/vm_event_item.h>
  8. #include <linux/atomic.h>
  9. extern int sysctl_stat_interval;
  10. #ifdef CONFIG_VM_EVENT_COUNTERS
  11. /*
  12. * Light weight per cpu counter implementation.
  13. *
  14. * Counters should only be incremented and no critical kernel component
  15. * should rely on the counter values.
  16. *
  17. * Counters are handled completely inline. On many platforms the code
  18. * generated will simply be the increment of a global address.
  19. */
  20. struct vm_event_state {
  21. unsigned long event[NR_VM_EVENT_ITEMS];
  22. };
  23. DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
  24. static inline void __count_vm_event(enum vm_event_item item)
  25. {
  26. __this_cpu_inc(vm_event_states.event[item]);
  27. }
  28. static inline void count_vm_event(enum vm_event_item item)
  29. {
  30. this_cpu_inc(vm_event_states.event[item]);
  31. }
  32. static inline void __count_vm_events(enum vm_event_item item, long delta)
  33. {
  34. __this_cpu_add(vm_event_states.event[item], delta);
  35. }
  36. static inline void count_vm_events(enum vm_event_item item, long delta)
  37. {
  38. this_cpu_add(vm_event_states.event[item], delta);
  39. }
  40. extern void all_vm_events(unsigned long *);
  41. #ifdef CONFIG_HOTPLUG
  42. extern void vm_events_fold_cpu(int cpu);
  43. #else
  44. static inline void vm_events_fold_cpu(int cpu)
  45. {
  46. }
  47. #endif
  48. #else
  49. /* Disable counters */
  50. static inline void count_vm_event(enum vm_event_item item)
  51. {
  52. }
  53. static inline void count_vm_events(enum vm_event_item item, long delta)
  54. {
  55. }
  56. static inline void __count_vm_event(enum vm_event_item item)
  57. {
  58. }
  59. static inline void __count_vm_events(enum vm_event_item item, long delta)
  60. {
  61. }
  62. static inline void all_vm_events(unsigned long *ret)
  63. {
  64. }
  65. static inline void vm_events_fold_cpu(int cpu)
  66. {
  67. }
  68. #endif /* CONFIG_VM_EVENT_COUNTERS */
  69. #define __count_zone_vm_events(item, zone, delta) \
  70. __count_vm_events(item##_NORMAL - ZONE_NORMAL + \
  71. zone_idx(zone), delta)
  72. /*
  73. * Zone based page accounting with per cpu differentials.
  74. */
  75. extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
  76. static inline void zone_page_state_add(long x, struct zone *zone,
  77. enum zone_stat_item item)
  78. {
  79. atomic_long_add(x, &zone->vm_stat[item]);
  80. atomic_long_add(x, &vm_stat[item]);
  81. }
  82. static inline unsigned long global_page_state(enum zone_stat_item item)
  83. {
  84. long x = atomic_long_read(&vm_stat[item]);
  85. #ifdef CONFIG_SMP
  86. if (x < 0)
  87. x = 0;
  88. #endif
  89. return x;
  90. }
  91. static inline unsigned long zone_page_state(struct zone *zone,
  92. enum zone_stat_item item)
  93. {
  94. long x = atomic_long_read(&zone->vm_stat[item]);
  95. #ifdef CONFIG_SMP
  96. if (x < 0)
  97. x = 0;
  98. #endif
  99. return x;
  100. }
  101. /*
  102. * More accurate version that also considers the currently pending
  103. * deltas. For that we need to loop over all cpus to find the current
  104. * deltas. There is no synchronization so the result cannot be
  105. * exactly accurate either.
  106. */
  107. static inline unsigned long zone_page_state_snapshot(struct zone *zone,
  108. enum zone_stat_item item)
  109. {
  110. long x = atomic_long_read(&zone->vm_stat[item]);
  111. #ifdef CONFIG_SMP
  112. int cpu;
  113. for_each_online_cpu(cpu)
  114. x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];
  115. if (x < 0)
  116. x = 0;
  117. #endif
  118. return x;
  119. }
  120. #ifdef CONFIG_NUMA
  121. /*
  122. * Determine the per node value of a stat item. This function
  123. * is called frequently in a NUMA machine, so try to be as
  124. * frugal as possible.
  125. */
  126. static inline unsigned long node_page_state(int node,
  127. enum zone_stat_item item)
  128. {
  129. struct zone *zones = NODE_DATA(node)->node_zones;
  130. return
  131. #ifdef CONFIG_ZONE_DMA
  132. zone_page_state(&zones[ZONE_DMA], item) +
  133. #endif
  134. #ifdef CONFIG_ZONE_DMA32
  135. zone_page_state(&zones[ZONE_DMA32], item) +
  136. #endif
  137. #ifdef CONFIG_HIGHMEM
  138. zone_page_state(&zones[ZONE_HIGHMEM], item) +
  139. #endif
  140. zone_page_state(&zones[ZONE_NORMAL], item) +
  141. zone_page_state(&zones[ZONE_MOVABLE], item);
  142. }
  143. extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp);
  144. #else
  145. #define node_page_state(node, item) global_page_state(item)
  146. #define zone_statistics(_zl, _z, gfp) do { } while (0)
  147. #endif /* CONFIG_NUMA */
  148. #define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
  149. #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
  150. static inline void zap_zone_vm_stats(struct zone *zone)
  151. {
  152. memset(zone->vm_stat, 0, sizeof(zone->vm_stat));
  153. }
  154. extern void inc_zone_state(struct zone *, enum zone_stat_item);
  155. #ifdef CONFIG_SMP
  156. void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int);
  157. void __inc_zone_page_state(struct page *, enum zone_stat_item);
  158. void __dec_zone_page_state(struct page *, enum zone_stat_item);
  159. void mod_zone_page_state(struct zone *, enum zone_stat_item, int);
  160. void inc_zone_page_state(struct page *, enum zone_stat_item);
  161. void dec_zone_page_state(struct page *, enum zone_stat_item);
  162. extern void inc_zone_state(struct zone *, enum zone_stat_item);
  163. extern void __inc_zone_state(struct zone *, enum zone_stat_item);
  164. extern void dec_zone_state(struct zone *, enum zone_stat_item);
  165. extern void __dec_zone_state(struct zone *, enum zone_stat_item);
  166. void refresh_cpu_vm_stats(int);
  167. void refresh_zone_stat_thresholds(void);
  168. int calculate_pressure_threshold(struct zone *zone);
  169. int calculate_normal_threshold(struct zone *zone);
  170. void set_pgdat_percpu_threshold(pg_data_t *pgdat,
  171. int (*calculate_pressure)(struct zone *));
  172. #else /* CONFIG_SMP */
  173. /*
  174. * We do not maintain differentials in a single processor configuration.
  175. * The functions directly modify the zone and global counters.
  176. */
  177. static inline void __mod_zone_page_state(struct zone *zone,
  178. enum zone_stat_item item, int delta)
  179. {
  180. zone_page_state_add(delta, zone, item);
  181. }
  182. static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
  183. {
  184. atomic_long_inc(&zone->vm_stat[item]);
  185. atomic_long_inc(&vm_stat[item]);
  186. }
  187. static inline void __inc_zone_page_state(struct page *page,
  188. enum zone_stat_item item)
  189. {
  190. __inc_zone_state(page_zone(page), item);
  191. }
  192. static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
  193. {
  194. atomic_long_dec(&zone->vm_stat[item]);
  195. atomic_long_dec(&vm_stat[item]);
  196. }
  197. static inline void __dec_zone_page_state(struct page *page,
  198. enum zone_stat_item item)
  199. {
  200. __dec_zone_state(page_zone(page), item);
  201. }
  202. /*
  203. * We only use atomic operations to update counters. So there is no need to
  204. * disable interrupts.
  205. */
  206. #define inc_zone_page_state __inc_zone_page_state
  207. #define dec_zone_page_state __dec_zone_page_state
  208. #define mod_zone_page_state __mod_zone_page_state
  209. #define set_pgdat_percpu_threshold(pgdat, callback) { }
  210. static inline void refresh_cpu_vm_stats(int cpu) { }
  211. static inline void refresh_zone_stat_thresholds(void) { }
  212. #endif /* CONFIG_SMP */
  213. static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
  214. int migratetype)
  215. {
  216. __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
  217. if (is_migrate_cma(migratetype))
  218. __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
  219. }
  220. extern const char * const vmstat_text[];
  221. #endif /* _LINUX_VMSTAT_H */