percpu_counter.h 3.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178
  1. #ifndef _LINUX_PERCPU_COUNTER_H
  2. #define _LINUX_PERCPU_COUNTER_H
  3. /*
  4. * A simple "approximate counter" for use in ext2 and ext3 superblocks.
  5. *
  6. * WARNING: these things are HUGE. 4 kbytes per counter on 32-way P4.
  7. */
  8. #include <linux/spinlock.h>
  9. #include <linux/smp.h>
  10. #include <linux/list.h>
  11. #include <linux/threads.h>
  12. #include <linux/percpu.h>
  13. #include <linux/types.h>
  14. #ifdef CONFIG_SMP
  15. struct percpu_counter {
  16. raw_spinlock_t lock;
  17. s64 count;
  18. #ifdef CONFIG_HOTPLUG_CPU
  19. struct list_head list; /* All percpu_counters are on a list */
  20. #endif
  21. s32 __percpu *counters;
  22. };
  23. extern int percpu_counter_batch;
  24. int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
  25. struct lock_class_key *key);
  26. #define percpu_counter_init(fbc, value) \
  27. ({ \
  28. static struct lock_class_key __key; \
  29. \
  30. __percpu_counter_init(fbc, value, &__key); \
  31. })
  32. void percpu_counter_destroy(struct percpu_counter *fbc);
  33. void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
  34. void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch);
  35. s64 __percpu_counter_sum(struct percpu_counter *fbc);
  36. int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs);
  37. static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
  38. {
  39. __percpu_counter_add(fbc, amount, percpu_counter_batch);
  40. }
  41. static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
  42. {
  43. s64 ret = __percpu_counter_sum(fbc);
  44. return ret < 0 ? 0 : ret;
  45. }
  46. static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
  47. {
  48. return __percpu_counter_sum(fbc);
  49. }
  50. static inline s64 percpu_counter_read(struct percpu_counter *fbc)
  51. {
  52. return fbc->count;
  53. }
  54. /*
  55. * It is possible for the percpu_counter_read() to return a small negative
  56. * number for some counter which should never be negative.
  57. *
  58. */
  59. static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
  60. {
  61. s64 ret = fbc->count;
  62. barrier(); /* Prevent reloads of fbc->count */
  63. if (ret >= 0)
  64. return ret;
  65. return 0;
  66. }
  67. static inline int percpu_counter_initialized(struct percpu_counter *fbc)
  68. {
  69. return (fbc->counters != NULL);
  70. }
  71. #else
  72. struct percpu_counter {
  73. s64 count;
  74. };
  75. static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount)
  76. {
  77. fbc->count = amount;
  78. return 0;
  79. }
  80. static inline void percpu_counter_destroy(struct percpu_counter *fbc)
  81. {
  82. }
  83. static inline void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
  84. {
  85. fbc->count = amount;
  86. }
  87. static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
  88. {
  89. if (fbc->count > rhs)
  90. return 1;
  91. else if (fbc->count < rhs)
  92. return -1;
  93. else
  94. return 0;
  95. }
  96. static inline void
  97. percpu_counter_add(struct percpu_counter *fbc, s64 amount)
  98. {
  99. preempt_disable();
  100. fbc->count += amount;
  101. preempt_enable();
  102. }
  103. static inline void
  104. __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
  105. {
  106. percpu_counter_add(fbc, amount);
  107. }
  108. static inline s64 percpu_counter_read(struct percpu_counter *fbc)
  109. {
  110. return fbc->count;
  111. }
  112. /*
  113. * percpu_counter is intended to track positive numbers. In the UP case the
  114. * number should never be negative.
  115. */
  116. static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
  117. {
  118. return fbc->count;
  119. }
  120. static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
  121. {
  122. return percpu_counter_read_positive(fbc);
  123. }
  124. static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
  125. {
  126. return percpu_counter_read(fbc);
  127. }
  128. static inline int percpu_counter_initialized(struct percpu_counter *fbc)
  129. {
  130. return 1;
  131. }
  132. #endif /* CONFIG_SMP */
  133. static inline void percpu_counter_inc(struct percpu_counter *fbc)
  134. {
  135. percpu_counter_add(fbc, 1);
  136. }
  137. static inline void percpu_counter_dec(struct percpu_counter *fbc)
  138. {
  139. percpu_counter_add(fbc, -1);
  140. }
  141. static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount)
  142. {
  143. percpu_counter_add(fbc, -amount);
  144. }
  145. #endif /* _LINUX_PERCPU_COUNTER_H */