|
@@ -60,14 +60,15 @@ static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
|
|
|
void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
|
|
|
{
|
|
|
int cpu;
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
- raw_spin_lock(&fbc->lock);
|
|
|
+ raw_spin_lock_irqsave(&fbc->lock, flags);
|
|
|
for_each_possible_cpu(cpu) {
|
|
|
s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
|
|
|
*pcount = 0;
|
|
|
}
|
|
|
fbc->count = amount;
|
|
|
- raw_spin_unlock(&fbc->lock);
|
|
|
+ raw_spin_unlock_irqrestore(&fbc->lock, flags);
|
|
|
}
|
|
|
EXPORT_SYMBOL(percpu_counter_set);
|
|
|
|
|
@@ -78,9 +79,10 @@ void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
|
|
|
preempt_disable();
|
|
|
count = __this_cpu_read(*fbc->counters) + amount;
|
|
|
if (count >= batch || count <= -batch) {
|
|
|
- raw_spin_lock(&fbc->lock);
|
|
|
+ unsigned long flags;
|
|
|
+ raw_spin_lock_irqsave(&fbc->lock, flags);
|
|
|
fbc->count += count;
|
|
|
- raw_spin_unlock(&fbc->lock);
|
|
|
+ raw_spin_unlock_irqrestore(&fbc->lock, flags);
|
|
|
__this_cpu_write(*fbc->counters, 0);
|
|
|
} else {
|
|
|
__this_cpu_write(*fbc->counters, count);
|
|
@@ -97,14 +99,15 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc)
|
|
|
{
|
|
|
s64 ret;
|
|
|
int cpu;
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
- raw_spin_lock(&fbc->lock);
|
|
|
+ raw_spin_lock_irqsave(&fbc->lock, flags);
|
|
|
ret = fbc->count;
|
|
|
for_each_online_cpu(cpu) {
|
|
|
s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
|
|
|
ret += *pcount;
|
|
|
}
|
|
|
- raw_spin_unlock(&fbc->lock);
|
|
|
+ raw_spin_unlock_irqrestore(&fbc->lock, flags);
|
|
|
return ret;
|
|
|
}
|
|
|
EXPORT_SYMBOL(__percpu_counter_sum);
|