|
@@ -73,9 +73,10 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p,
|
|
|
|
|
|
for (idx = 0; idx < task_pri; idx++) {
|
|
|
struct cpupri_vec *vec = &cp->pri_to_cpu[idx];
|
|
|
+ int skip = 0;
|
|
|
|
|
|
if (!atomic_read(&(vec)->count))
|
|
|
- continue;
|
|
|
+ skip = 1;
|
|
|
/*
|
|
|
* When looking at the vector, we need to read the counter,
|
|
|
* do a memory barrier, then read the mask.
|
|
@@ -96,6 +97,10 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p,
|
|
|
*/
|
|
|
smp_rmb();
|
|
|
|
|
|
+ /* Need to do the rmb for every iteration */
|
|
|
+ if (skip)
|
|
|
+ continue;
|
|
|
+
|
|
|
if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids)
|
|
|
continue;
|
|
|
|
|
@@ -134,6 +139,7 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri)
|
|
|
{
|
|
|
int *currpri = &cp->cpu_to_pri[cpu];
|
|
|
int oldpri = *currpri;
|
|
|
+ int do_mb = 0;
|
|
|
|
|
|
newpri = convert_prio(newpri);
|
|
|
|
|
@@ -158,18 +164,34 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri)
|
|
|
* do a write memory barrier, and then update the count, to
|
|
|
* make sure the vector is visible when count is set.
|
|
|
*/
|
|
|
- smp_wmb();
|
|
|
+ smp_mb__before_atomic_inc();
|
|
|
atomic_inc(&(vec)->count);
|
|
|
+ do_mb = 1;
|
|
|
}
|
|
|
if (likely(oldpri != CPUPRI_INVALID)) {
|
|
|
struct cpupri_vec *vec = &cp->pri_to_cpu[oldpri];
|
|
|
|
|
|
+ /*
|
|
|
+ * Because the order of modification of the vec->count
|
|
|
+ * is important, we must make sure that the update
|
|
|
+ * of the new prio is seen before we decrement the
|
|
|
+ * old prio. This makes sure that the loop sees
|
|
|
+ * one or the other when we raise the priority of
|
|
|
+ * the run queue. We don't care about when we lower the
|
|
|
+ * priority, as that will trigger an rt pull anyway.
|
|
|
+ *
|
|
|
+ * We only need to do a memory barrier if we updated
|
|
|
+ * the new priority vec.
|
|
|
+ */
|
|
|
+ if (do_mb)
|
|
|
+ smp_mb__after_atomic_inc();
|
|
|
+
|
|
|
/*
|
|
|
* When removing from the vector, we decrement the counter first
|
|
|
* do a memory barrier and then clear the mask.
|
|
|
*/
|
|
|
atomic_dec(&(vec)->count);
|
|
|
- smp_wmb();
|
|
|
+ smp_mb__after_atomic_inc();
|
|
|
cpumask_clear_cpu(cpu, vec->mask);
|
|
|
}
|
|
|
|