percpu_counter.h 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179
  1. #ifndef _LINUX_PERCPU_COUNTER_H
  2. #define _LINUX_PERCPU_COUNTER_H
  3. /*
  4. * A simple "approximate counter" for use in ext2 and ext3 superblocks.
  5. *
  6. * WARNING: these things are HUGE. 4 kbytes per counter on 32-way P4.
  7. */
  8. #include <linux/spinlock.h>
  9. #include <linux/smp.h>
  10. #include <linux/list.h>
  11. #include <linux/threads.h>
  12. #include <linux/percpu.h>
  13. #include <linux/types.h>
  14. #include <linux/gfp.h>
  15. #ifdef CONFIG_SMP
  16. struct percpu_counter {
  17. raw_spinlock_t lock;
  18. s64 count;
  19. #ifdef CONFIG_HOTPLUG_CPU
  20. struct list_head list; /* All percpu_counters are on a list */
  21. #endif
  22. s32 __percpu *counters;
  23. };
  24. extern int percpu_counter_batch;
  25. int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
  26. struct lock_class_key *key);
  27. #define percpu_counter_init(fbc, value, gfp) \
  28. ({ \
  29. static struct lock_class_key __key; \
  30. \
  31. __percpu_counter_init(fbc, value, gfp, &__key); \
  32. })
  33. void percpu_counter_destroy(struct percpu_counter *fbc);
  34. void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
  35. void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch);
  36. s64 __percpu_counter_sum(struct percpu_counter *fbc);
  37. int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs);
  38. static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
  39. {
  40. __percpu_counter_add(fbc, amount, percpu_counter_batch);
  41. }
  42. static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
  43. {
  44. s64 ret = __percpu_counter_sum(fbc);
  45. return ret < 0 ? 0 : ret;
  46. }
  47. static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
  48. {
  49. return __percpu_counter_sum(fbc);
  50. }
  51. static inline s64 percpu_counter_read(struct percpu_counter *fbc)
  52. {
  53. return fbc->count;
  54. }
  55. /*
  56. * It is possible for the percpu_counter_read() to return a small negative
  57. * number for some counter which should never be negative.
  58. *
  59. */
  60. static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
  61. {
  62. s64 ret = fbc->count;
  63. barrier(); /* Prevent reloads of fbc->count */
  64. if (ret >= 0)
  65. return ret;
  66. return 0;
  67. }
  68. static inline int percpu_counter_initialized(struct percpu_counter *fbc)
  69. {
  70. return (fbc->counters != NULL);
  71. }
  72. #else /* !CONFIG_SMP */
  73. struct percpu_counter {
  74. s64 count;
  75. };
  76. static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount,
  77. gfp_t gfp)
  78. {
  79. fbc->count = amount;
  80. return 0;
  81. }
  82. static inline void percpu_counter_destroy(struct percpu_counter *fbc)
  83. {
  84. }
  85. static inline void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
  86. {
  87. fbc->count = amount;
  88. }
  89. static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
  90. {
  91. if (fbc->count > rhs)
  92. return 1;
  93. else if (fbc->count < rhs)
  94. return -1;
  95. else
  96. return 0;
  97. }
  98. static inline void
  99. percpu_counter_add(struct percpu_counter *fbc, s64 amount)
  100. {
  101. preempt_disable();
  102. fbc->count += amount;
  103. preempt_enable();
  104. }
  105. static inline void
  106. __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
  107. {
  108. percpu_counter_add(fbc, amount);
  109. }
  110. static inline s64 percpu_counter_read(struct percpu_counter *fbc)
  111. {
  112. return fbc->count;
  113. }
  114. /*
  115. * percpu_counter is intended to track positive numbers. In the UP case the
  116. * number should never be negative.
  117. */
  118. static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
  119. {
  120. return fbc->count;
  121. }
  122. static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
  123. {
  124. return percpu_counter_read_positive(fbc);
  125. }
  126. static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
  127. {
  128. return percpu_counter_read(fbc);
  129. }
  130. static inline int percpu_counter_initialized(struct percpu_counter *fbc)
  131. {
  132. return 1;
  133. }
  134. #endif /* CONFIG_SMP */
  135. static inline void percpu_counter_inc(struct percpu_counter *fbc)
  136. {
  137. percpu_counter_add(fbc, 1);
  138. }
  139. static inline void percpu_counter_dec(struct percpu_counter *fbc)
  140. {
  141. percpu_counter_add(fbc, -1);
  142. }
  143. static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount)
  144. {
  145. percpu_counter_add(fbc, -amount);
  146. }
  147. #endif /* _LINUX_PERCPU_COUNTER_H */