percpu_counter.h 2.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121
  1. #ifndef _LINUX_PERCPU_COUNTER_H
  2. #define _LINUX_PERCPU_COUNTER_H
  3. /*
  4. * A simple "approximate counter" for use in ext2 and ext3 superblocks.
  5. *
  6. * WARNING: these things are HUGE. 4 kbytes per counter on 32-way P4.
  7. */
  8. #include <linux/spinlock.h>
  9. #include <linux/smp.h>
  10. #include <linux/list.h>
  11. #include <linux/threads.h>
  12. #include <linux/percpu.h>
  13. #include <linux/types.h>
  14. #ifdef CONFIG_SMP
  15. struct percpu_counter {
  16. spinlock_t lock;
  17. s64 count;
  18. #ifdef CONFIG_HOTPLUG_CPU
  19. struct list_head list; /* All percpu_counters are on a list */
  20. #endif
  21. s32 *counters;
  22. };
  23. #if NR_CPUS >= 16
  24. #define FBC_BATCH (NR_CPUS*2)
  25. #else
  26. #define FBC_BATCH (NR_CPUS*4)
  27. #endif
  28. void percpu_counter_init(struct percpu_counter *fbc, s64 amount);
  29. void percpu_counter_destroy(struct percpu_counter *fbc);
  30. void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch);
  31. s64 percpu_counter_sum(struct percpu_counter *fbc);
  32. static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
  33. {
  34. __percpu_counter_add(fbc, amount, FBC_BATCH);
  35. }
  36. static inline s64 percpu_counter_read(struct percpu_counter *fbc)
  37. {
  38. return fbc->count;
  39. }
  40. /*
  41. * It is possible for the percpu_counter_read() to return a small negative
  42. * number for some counter which should never be negative.
  43. *
  44. */
  45. static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
  46. {
  47. s64 ret = fbc->count;
  48. barrier(); /* Prevent reloads of fbc->count */
  49. if (ret >= 0)
  50. return ret;
  51. return 1;
  52. }
  53. #else
  54. struct percpu_counter {
  55. s64 count;
  56. };
  57. static inline void percpu_counter_init(struct percpu_counter *fbc, s64 amount)
  58. {
  59. fbc->count = amount;
  60. }
  61. static inline void percpu_counter_destroy(struct percpu_counter *fbc)
  62. {
  63. }
  64. #define __percpu_counter_add(fbc, amount, batch) \
  65. percpu_counter_add(fbc, amount)
  66. static inline void
  67. percpu_counter_add(struct percpu_counter *fbc, s64 amount)
  68. {
  69. preempt_disable();
  70. fbc->count += amount;
  71. preempt_enable();
  72. }
  73. static inline s64 percpu_counter_read(struct percpu_counter *fbc)
  74. {
  75. return fbc->count;
  76. }
  77. static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
  78. {
  79. return fbc->count;
  80. }
  81. static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
  82. {
  83. return percpu_counter_read_positive(fbc);
  84. }
  85. #endif /* CONFIG_SMP */
  86. static inline void percpu_counter_inc(struct percpu_counter *fbc)
  87. {
  88. percpu_counter_add(fbc, 1);
  89. }
  90. static inline void percpu_counter_dec(struct percpu_counter *fbc)
  91. {
  92. percpu_counter_add(fbc, -1);
  93. }
  94. static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount)
  95. {
  96. percpu_counter_add(fbc, -amount);
  97. }
  98. #endif /* _LINUX_PERCPU_COUNTER_H */