|
@@ -489,13 +489,34 @@ void percpu_counter_mod(struct percpu_counter *fbc, long amount)
|
|
|
if (count >= FBC_BATCH || count <= -FBC_BATCH) {
|
|
|
spin_lock(&fbc->lock);
|
|
|
fbc->count += count;
|
|
|
+ *pcount = 0;
|
|
|
spin_unlock(&fbc->lock);
|
|
|
- count = 0;
|
|
|
+ } else {
|
|
|
+ *pcount = count;
|
|
|
}
|
|
|
- *pcount = count;
|
|
|
put_cpu();
|
|
|
}
|
|
|
EXPORT_SYMBOL(percpu_counter_mod);
|
|
|
+
|
|
|
+/*
|
|
|
+ * Add up all the per-cpu counts, return the result. This is a more accurate
|
|
|
+ * but much slower version of percpu_counter_read_positive()
|
|
|
+ */
|
|
|
+long percpu_counter_sum(struct percpu_counter *fbc)
|
|
|
+{
|
|
|
+ long ret;
|
|
|
+ int cpu;
|
|
|
+
|
|
|
+ spin_lock(&fbc->lock);
|
|
|
+ ret = fbc->count;
|
|
|
+ for_each_cpu(cpu) {
|
|
|
+ long *pcount = per_cpu_ptr(fbc->counters, cpu);
|
|
|
+ ret += *pcount;
|
|
|
+ }
|
|
|
+ spin_unlock(&fbc->lock);
|
|
|
+ return ret < 0 ? 0 : ret;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(percpu_counter_sum);
|
|
|
#endif
|
|
|
|
|
|
/*
|