percpu_counter.h 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191
  1. #ifndef _LINUX_PERCPU_COUNTER_H
  2. #define _LINUX_PERCPU_COUNTER_H
  3. /*
  4. * A simple "approximate counter" for use in ext2 and ext3 superblocks.
  5. *
  6. * WARNING: these things are HUGE. 4 kbytes per counter on 32-way P4.
  7. */
  8. #include <linux/spinlock.h>
  9. #include <linux/smp.h>
  10. #include <linux/list.h>
  11. #include <linux/threads.h>
  12. #include <linux/percpu.h>
  13. #include <linux/types.h>
  14. #include <linux/gfp.h>
  15. #ifdef CONFIG_SMP
  16. struct percpu_counter {
  17. raw_spinlock_t lock;
  18. s64 count;
  19. #ifdef CONFIG_HOTPLUG_CPU
  20. struct list_head list; /* All percpu_counters are on a list */
  21. #endif
  22. s32 __percpu *counters;
  23. };
  24. extern int percpu_counter_batch;
  25. int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
  26. struct lock_class_key *key);
  27. #define percpu_counter_init(fbc, value, gfp) \
  28. ({ \
  29. static struct lock_class_key __key; \
  30. \
  31. __percpu_counter_init(fbc, value, gfp, &__key); \
  32. })
  33. void percpu_counter_destroy(struct percpu_counter *fbc);
  34. void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
  35. void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch);
  36. s64 __percpu_counter_sum(struct percpu_counter *fbc);
  37. int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch);
  38. static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
  39. {
  40. return __percpu_counter_compare(fbc, rhs, percpu_counter_batch);
  41. }
  42. static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
  43. {
  44. __percpu_counter_add(fbc, amount, percpu_counter_batch);
  45. }
  46. static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
  47. {
  48. s64 ret = __percpu_counter_sum(fbc);
  49. return ret < 0 ? 0 : ret;
  50. }
  51. static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
  52. {
  53. return __percpu_counter_sum(fbc);
  54. }
  55. static inline s64 percpu_counter_read(struct percpu_counter *fbc)
  56. {
  57. return fbc->count;
  58. }
  59. /*
  60. * It is possible for the percpu_counter_read() to return a small negative
  61. * number for some counter which should never be negative.
  62. *
  63. */
  64. static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
  65. {
  66. s64 ret = fbc->count;
  67. barrier(); /* Prevent reloads of fbc->count */
  68. if (ret >= 0)
  69. return ret;
  70. return 0;
  71. }
  72. static inline int percpu_counter_initialized(struct percpu_counter *fbc)
  73. {
  74. return (fbc->counters != NULL);
  75. }
  76. #else /* !CONFIG_SMP */
  77. struct percpu_counter {
  78. s64 count;
  79. };
  80. static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount,
  81. gfp_t gfp)
  82. {
  83. fbc->count = amount;
  84. return 0;
  85. }
  86. static inline void percpu_counter_destroy(struct percpu_counter *fbc)
  87. {
  88. }
  89. static inline void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
  90. {
  91. fbc->count = amount;
  92. }
  93. static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
  94. {
  95. if (fbc->count > rhs)
  96. return 1;
  97. else if (fbc->count < rhs)
  98. return -1;
  99. else
  100. return 0;
  101. }
  102. static inline int
  103. __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
  104. {
  105. return percpu_counter_compare(fbc, rhs);
  106. }
  107. static inline void
  108. percpu_counter_add(struct percpu_counter *fbc, s64 amount)
  109. {
  110. preempt_disable();
  111. fbc->count += amount;
  112. preempt_enable();
  113. }
  114. static inline void
  115. __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
  116. {
  117. percpu_counter_add(fbc, amount);
  118. }
  119. static inline s64 percpu_counter_read(struct percpu_counter *fbc)
  120. {
  121. return fbc->count;
  122. }
  123. /*
  124. * percpu_counter is intended to track positive numbers. In the UP case the
  125. * number should never be negative.
  126. */
  127. static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
  128. {
  129. return fbc->count;
  130. }
  131. static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
  132. {
  133. return percpu_counter_read_positive(fbc);
  134. }
  135. static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
  136. {
  137. return percpu_counter_read(fbc);
  138. }
  139. static inline int percpu_counter_initialized(struct percpu_counter *fbc)
  140. {
  141. return 1;
  142. }
  143. #endif /* CONFIG_SMP */
  144. static inline void percpu_counter_inc(struct percpu_counter *fbc)
  145. {
  146. percpu_counter_add(fbc, 1);
  147. }
  148. static inline void percpu_counter_dec(struct percpu_counter *fbc)
  149. {
  150. percpu_counter_add(fbc, -1);
  151. }
  152. static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount)
  153. {
  154. percpu_counter_add(fbc, -amount);
  155. }
  156. #endif /* _LINUX_PERCPU_COUNTER_H */