|
@@ -190,7 +190,7 @@ prop_adjust_shift(int *pl_shift, unsigned long *pl_period, int new_shift)
|
|
|
|
|
|
int prop_local_init_percpu(struct prop_local_percpu *pl)
|
|
|
{
|
|
|
- spin_lock_init(&pl->lock);
|
|
|
+ raw_spin_lock_init(&pl->lock);
|
|
|
pl->shift = 0;
|
|
|
pl->period = 0;
|
|
|
return percpu_counter_init(&pl->events, 0);
|
|
@@ -226,7 +226,7 @@ void prop_norm_percpu(struct prop_global *pg, struct prop_local_percpu *pl)
|
|
|
if (pl->period == global_period)
|
|
|
return;
|
|
|
|
|
|
- spin_lock_irqsave(&pl->lock, flags);
|
|
|
+ raw_spin_lock_irqsave(&pl->lock, flags);
|
|
|
prop_adjust_shift(&pl->shift, &pl->period, pg->shift);
|
|
|
|
|
|
/*
|
|
@@ -247,7 +247,7 @@ void prop_norm_percpu(struct prop_global *pg, struct prop_local_percpu *pl)
|
|
|
percpu_counter_set(&pl->events, 0);
|
|
|
|
|
|
pl->period = global_period;
|
|
|
- spin_unlock_irqrestore(&pl->lock, flags);
|
|
|
+ raw_spin_unlock_irqrestore(&pl->lock, flags);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -324,7 +324,7 @@ void prop_fraction_percpu(struct prop_descriptor *pd,
|
|
|
|
|
|
int prop_local_init_single(struct prop_local_single *pl)
|
|
|
{
|
|
|
- spin_lock_init(&pl->lock);
|
|
|
+ raw_spin_lock_init(&pl->lock);
|
|
|
pl->shift = 0;
|
|
|
pl->period = 0;
|
|
|
pl->events = 0;
|
|
@@ -356,7 +356,7 @@ void prop_norm_single(struct prop_global *pg, struct prop_local_single *pl)
|
|
|
if (pl->period == global_period)
|
|
|
return;
|
|
|
|
|
|
- spin_lock_irqsave(&pl->lock, flags);
|
|
|
+ raw_spin_lock_irqsave(&pl->lock, flags);
|
|
|
prop_adjust_shift(&pl->shift, &pl->period, pg->shift);
|
|
|
/*
|
|
|
* For each missed period, we half the local counter.
|
|
@@ -367,7 +367,7 @@ void prop_norm_single(struct prop_global *pg, struct prop_local_single *pl)
|
|
|
else
|
|
|
pl->events = 0;
|
|
|
pl->period = global_period;
|
|
|
- spin_unlock_irqrestore(&pl->lock, flags);
|
|
|
+ raw_spin_unlock_irqrestore(&pl->lock, flags);
|
|
|
}
|
|
|
|
|
|
/*
|