percpu_counter.c 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126
  1. /*
  2. * Fast batching percpu counters.
  3. */
  4. #include <linux/percpu_counter.h>
  5. #include <linux/notifier.h>
  6. #include <linux/mutex.h>
  7. #include <linux/init.h>
  8. #include <linux/cpu.h>
  9. #include <linux/module.h>
  10. #ifdef CONFIG_HOTPLUG_CPU
  11. static LIST_HEAD(percpu_counters);
  12. static DEFINE_MUTEX(percpu_counters_lock);
  13. #endif
  14. void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
  15. {
  16. int cpu;
  17. spin_lock(&fbc->lock);
  18. for_each_possible_cpu(cpu) {
  19. s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
  20. *pcount = 0;
  21. }
  22. fbc->count = amount;
  23. spin_unlock(&fbc->lock);
  24. }
  25. EXPORT_SYMBOL(percpu_counter_set);
  26. void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
  27. {
  28. s64 count;
  29. s32 *pcount;
  30. int cpu = get_cpu();
  31. pcount = per_cpu_ptr(fbc->counters, cpu);
  32. count = *pcount + amount;
  33. if (count >= batch || count <= -batch) {
  34. spin_lock(&fbc->lock);
  35. fbc->count += count;
  36. *pcount = 0;
  37. spin_unlock(&fbc->lock);
  38. } else {
  39. *pcount = count;
  40. }
  41. put_cpu();
  42. }
  43. EXPORT_SYMBOL(__percpu_counter_add);
  44. /*
  45. * Add up all the per-cpu counts, return the result. This is a more accurate
  46. * but much slower version of percpu_counter_read_positive()
  47. */
  48. s64 __percpu_counter_sum(struct percpu_counter *fbc)
  49. {
  50. s64 ret;
  51. int cpu;
  52. spin_lock(&fbc->lock);
  53. ret = fbc->count;
  54. for_each_online_cpu(cpu) {
  55. s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
  56. ret += *pcount;
  57. }
  58. spin_unlock(&fbc->lock);
  59. return ret;
  60. }
  61. EXPORT_SYMBOL(__percpu_counter_sum);
  62. void percpu_counter_init(struct percpu_counter *fbc, s64 amount)
  63. {
  64. spin_lock_init(&fbc->lock);
  65. fbc->count = amount;
  66. fbc->counters = alloc_percpu(s32);
  67. #ifdef CONFIG_HOTPLUG_CPU
  68. mutex_lock(&percpu_counters_lock);
  69. list_add(&fbc->list, &percpu_counters);
  70. mutex_unlock(&percpu_counters_lock);
  71. #endif
  72. }
  73. EXPORT_SYMBOL(percpu_counter_init);
  74. void percpu_counter_destroy(struct percpu_counter *fbc)
  75. {
  76. free_percpu(fbc->counters);
  77. #ifdef CONFIG_HOTPLUG_CPU
  78. mutex_lock(&percpu_counters_lock);
  79. list_del(&fbc->list);
  80. mutex_unlock(&percpu_counters_lock);
  81. #endif
  82. }
  83. EXPORT_SYMBOL(percpu_counter_destroy);
  84. #ifdef CONFIG_HOTPLUG_CPU
  85. static int __cpuinit percpu_counter_hotcpu_callback(struct notifier_block *nb,
  86. unsigned long action, void *hcpu)
  87. {
  88. unsigned int cpu;
  89. struct percpu_counter *fbc;
  90. if (action != CPU_DEAD)
  91. return NOTIFY_OK;
  92. cpu = (unsigned long)hcpu;
  93. mutex_lock(&percpu_counters_lock);
  94. list_for_each_entry(fbc, &percpu_counters, list) {
  95. s32 *pcount;
  96. spin_lock(&fbc->lock);
  97. pcount = per_cpu_ptr(fbc->counters, cpu);
  98. fbc->count += *pcount;
  99. *pcount = 0;
  100. spin_unlock(&fbc->lock);
  101. }
  102. mutex_unlock(&percpu_counters_lock);
  103. return NOTIFY_OK;
  104. }
  105. static int __init percpu_counter_startup(void)
  106. {
  107. hotcpu_notifier(percpu_counter_hotcpu_callback, 0);
  108. return 0;
  109. }
  110. module_init(percpu_counter_startup);
  111. #endif