percpu_counter.c 2.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112
  1. /*
  2. * Fast batching percpu counters.
  3. */
  4. #include <linux/percpu_counter.h>
  5. #include <linux/notifier.h>
  6. #include <linux/mutex.h>
  7. #include <linux/init.h>
  8. #include <linux/cpu.h>
  9. #include <linux/module.h>
  10. #ifdef CONFIG_HOTPLUG_CPU
  11. static LIST_HEAD(percpu_counters);
  12. static DEFINE_MUTEX(percpu_counters_lock);
  13. #endif
  14. void percpu_counter_mod(struct percpu_counter *fbc, s32 amount)
  15. {
  16. long count;
  17. s32 *pcount;
  18. int cpu = get_cpu();
  19. pcount = per_cpu_ptr(fbc->counters, cpu);
  20. count = *pcount + amount;
  21. if (count >= FBC_BATCH || count <= -FBC_BATCH) {
  22. spin_lock(&fbc->lock);
  23. fbc->count += count;
  24. *pcount = 0;
  25. spin_unlock(&fbc->lock);
  26. } else {
  27. *pcount = count;
  28. }
  29. put_cpu();
  30. }
  31. EXPORT_SYMBOL(percpu_counter_mod);
  32. /*
  33. * Add up all the per-cpu counts, return the result. This is a more accurate
  34. * but much slower version of percpu_counter_read_positive()
  35. */
  36. s64 percpu_counter_sum(struct percpu_counter *fbc)
  37. {
  38. s64 ret;
  39. int cpu;
  40. spin_lock(&fbc->lock);
  41. ret = fbc->count;
  42. for_each_online_cpu(cpu) {
  43. s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
  44. ret += *pcount;
  45. }
  46. spin_unlock(&fbc->lock);
  47. return ret < 0 ? 0 : ret;
  48. }
  49. EXPORT_SYMBOL(percpu_counter_sum);
  50. void percpu_counter_init(struct percpu_counter *fbc, s64 amount)
  51. {
  52. spin_lock_init(&fbc->lock);
  53. fbc->count = amount;
  54. fbc->counters = alloc_percpu(s32);
  55. #ifdef CONFIG_HOTPLUG_CPU
  56. mutex_lock(&percpu_counters_lock);
  57. list_add(&fbc->list, &percpu_counters);
  58. mutex_unlock(&percpu_counters_lock);
  59. #endif
  60. }
  61. EXPORT_SYMBOL(percpu_counter_init);
  62. void percpu_counter_destroy(struct percpu_counter *fbc)
  63. {
  64. free_percpu(fbc->counters);
  65. #ifdef CONFIG_HOTPLUG_CPU
  66. mutex_lock(&percpu_counters_lock);
  67. list_del(&fbc->list);
  68. mutex_unlock(&percpu_counters_lock);
  69. #endif
  70. }
  71. EXPORT_SYMBOL(percpu_counter_destroy);
  72. #ifdef CONFIG_HOTPLUG_CPU
  73. static int __cpuinit percpu_counter_hotcpu_callback(struct notifier_block *nb,
  74. unsigned long action, void *hcpu)
  75. {
  76. unsigned int cpu;
  77. struct percpu_counter *fbc;
  78. if (action != CPU_DEAD)
  79. return NOTIFY_OK;
  80. cpu = (unsigned long)hcpu;
  81. mutex_lock(&percpu_counters_lock);
  82. list_for_each_entry(fbc, &percpu_counters, list) {
  83. s32 *pcount;
  84. spin_lock(&fbc->lock);
  85. pcount = per_cpu_ptr(fbc->counters, cpu);
  86. fbc->count += *pcount;
  87. *pcount = 0;
  88. spin_unlock(&fbc->lock);
  89. }
  90. mutex_unlock(&percpu_counters_lock);
  91. return NOTIFY_OK;
  92. }
  93. static int __init percpu_counter_startup(void)
  94. {
  95. hotcpu_notifier(percpu_counter_hotcpu_callback, 0);
  96. return 0;
  97. }
  98. module_init(percpu_counter_startup);
  99. #endif