|
@@ -962,7 +962,7 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
|
|
|
return next;
|
|
|
}
|
|
|
|
|
|
-static DEFINE_PER_CPU(cpumask_t, local_cpu_mask);
|
|
|
+static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
|
|
|
|
|
|
static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask)
|
|
|
{
|
|
@@ -982,7 +982,7 @@ static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask)
|
|
|
static int find_lowest_rq(struct task_struct *task)
|
|
|
{
|
|
|
struct sched_domain *sd;
|
|
|
- cpumask_t *lowest_mask = &__get_cpu_var(local_cpu_mask);
|
|
|
+ cpumask_t *lowest_mask = __get_cpu_var(local_cpu_mask);
|
|
|
int this_cpu = smp_processor_id();
|
|
|
int cpu = task_cpu(task);
|
|
|
|
|
@@ -1551,3 +1551,12 @@ static void print_rt_stats(struct seq_file *m, int cpu)
|
|
|
rcu_read_unlock();
|
|
|
}
|
|
|
#endif /* CONFIG_SCHED_DEBUG */
|
|
|
+
|
|
|
+/* Note that this is never called for !SMP, but that's OK. */
|
|
|
+static inline void init_sched_rt_class(void)
|
|
|
+{
|
|
|
+ unsigned int i;
|
|
|
+
|
|
|
+ for_each_possible_cpu(i)
|
|
|
+ alloc_cpumask_var(&per_cpu(local_cpu_mask, i), GFP_KERNEL);
|
|
|
+}
|