|
@@ -4279,9 +4279,9 @@ pick_next_task(struct rq *rq)
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
- * schedule() is the main scheduler function.
|
|
|
|
|
|
+ * __schedule() is the main scheduler function.
|
|
*/
|
|
*/
|
|
-asmlinkage void __sched schedule(void)
|
|
|
|
|
|
+static void __sched __schedule(void)
|
|
{
|
|
{
|
|
struct task_struct *prev, *next;
|
|
struct task_struct *prev, *next;
|
|
unsigned long *switch_count;
|
|
unsigned long *switch_count;
|
|
@@ -4322,16 +4322,6 @@ need_resched:
|
|
if (to_wakeup)
|
|
if (to_wakeup)
|
|
try_to_wake_up_local(to_wakeup);
|
|
try_to_wake_up_local(to_wakeup);
|
|
}
|
|
}
|
|
-
|
|
|
|
- /*
|
|
|
|
- * If we are going to sleep and we have plugged IO
|
|
|
|
- * queued, make sure to submit it to avoid deadlocks.
|
|
|
|
- */
|
|
|
|
- if (blk_needs_flush_plug(prev)) {
|
|
|
|
- raw_spin_unlock(&rq->lock);
|
|
|
|
- blk_schedule_flush_plug(prev);
|
|
|
|
- raw_spin_lock(&rq->lock);
|
|
|
|
- }
|
|
|
|
}
|
|
}
|
|
switch_count = &prev->nvcsw;
|
|
switch_count = &prev->nvcsw;
|
|
}
|
|
}
|
|
@@ -4369,6 +4359,26 @@ need_resched:
|
|
if (need_resched())
|
|
if (need_resched())
|
|
goto need_resched;
|
|
goto need_resched;
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+static inline void sched_submit_work(struct task_struct *tsk)
|
|
|
|
+{
|
|
|
|
+ if (!tsk->state)
|
|
|
|
+ return;
|
|
|
|
+ /*
|
|
|
|
+ * If we are going to sleep and we have plugged IO queued,
|
|
|
|
+ * make sure to submit it to avoid deadlocks.
|
|
|
|
+ */
|
|
|
|
+ if (blk_needs_flush_plug(tsk))
|
|
|
|
+ blk_schedule_flush_plug(tsk);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+asmlinkage void schedule(void)
|
|
|
|
+{
|
|
|
|
+ struct task_struct *tsk = current;
|
|
|
|
+
|
|
|
|
+ sched_submit_work(tsk);
|
|
|
|
+ __schedule();
|
|
|
|
+}
|
|
EXPORT_SYMBOL(schedule);
|
|
EXPORT_SYMBOL(schedule);
|
|
|
|
|
|
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
|
|
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
|
|
@@ -4435,7 +4445,7 @@ asmlinkage void __sched notrace preempt_schedule(void)
|
|
|
|
|
|
do {
|
|
do {
|
|
add_preempt_count_notrace(PREEMPT_ACTIVE);
|
|
add_preempt_count_notrace(PREEMPT_ACTIVE);
|
|
- schedule();
|
|
|
|
|
|
+ __schedule();
|
|
sub_preempt_count_notrace(PREEMPT_ACTIVE);
|
|
sub_preempt_count_notrace(PREEMPT_ACTIVE);
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -4463,7 +4473,7 @@ asmlinkage void __sched preempt_schedule_irq(void)
|
|
do {
|
|
do {
|
|
add_preempt_count(PREEMPT_ACTIVE);
|
|
add_preempt_count(PREEMPT_ACTIVE);
|
|
local_irq_enable();
|
|
local_irq_enable();
|
|
- schedule();
|
|
|
|
|
|
+ __schedule();
|
|
local_irq_disable();
|
|
local_irq_disable();
|
|
sub_preempt_count(PREEMPT_ACTIVE);
|
|
sub_preempt_count(PREEMPT_ACTIVE);
|
|
|
|
|
|
@@ -5588,7 +5598,7 @@ static inline int should_resched(void)
|
|
static void __cond_resched(void)
|
|
static void __cond_resched(void)
|
|
{
|
|
{
|
|
add_preempt_count(PREEMPT_ACTIVE);
|
|
add_preempt_count(PREEMPT_ACTIVE);
|
|
- schedule();
|
|
|
|
|
|
+ __schedule();
|
|
sub_preempt_count(PREEMPT_ACTIVE);
|
|
sub_preempt_count(PREEMPT_ACTIVE);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -7443,6 +7453,7 @@ static void __sdt_free(const struct cpumask *cpu_map)
|
|
struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j);
|
|
struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j);
|
|
if (sd && (sd->flags & SD_OVERLAP))
|
|
if (sd && (sd->flags & SD_OVERLAP))
|
|
free_sched_groups(sd->groups, 0);
|
|
free_sched_groups(sd->groups, 0);
|
|
|
|
+ kfree(*per_cpu_ptr(sdd->sd, j));
|
|
kfree(*per_cpu_ptr(sdd->sg, j));
|
|
kfree(*per_cpu_ptr(sdd->sg, j));
|
|
kfree(*per_cpu_ptr(sdd->sgp, j));
|
|
kfree(*per_cpu_ptr(sdd->sgp, j));
|
|
}
|
|
}
|