|
@@ -1270,7 +1270,7 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
|
|
|
int dest_cpu;
|
|
|
|
|
|
/* Look for allowed, online CPU in same node. */
|
|
|
- for_each_cpu_mask(dest_cpu, *nodemask) {
|
|
|
+ for_each_cpu(dest_cpu, nodemask) {
|
|
|
if (!cpu_online(dest_cpu))
|
|
|
continue;
|
|
|
if (!cpu_active(dest_cpu))
|
|
@@ -1281,7 +1281,7 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
|
|
|
|
|
|
for (;;) {
|
|
|
/* Any allowed, online CPU? */
|
|
|
- for_each_cpu_mask(dest_cpu, *tsk_cpus_allowed(p)) {
|
|
|
+ for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) {
|
|
|
if (!cpu_online(dest_cpu))
|
|
|
continue;
|
|
|
if (!cpu_active(dest_cpu))
|
|
@@ -1964,6 +1964,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
|
|
|
local_irq_enable();
|
|
|
#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
|
|
|
finish_lock_switch(rq, prev);
|
|
|
+ finish_arch_post_lock_switch();
|
|
|
|
|
|
fire_sched_in_preempt_notifiers(current);
|
|
|
if (mm)
|
|
@@ -3101,8 +3102,6 @@ EXPORT_SYMBOL(sub_preempt_count);
|
|
|
*/
|
|
|
static noinline void __schedule_bug(struct task_struct *prev)
|
|
|
{
|
|
|
- struct pt_regs *regs = get_irq_regs();
|
|
|
-
|
|
|
if (oops_in_progress)
|
|
|
return;
|
|
|
|
|
@@ -3113,11 +3112,7 @@ static noinline void __schedule_bug(struct task_struct *prev)
|
|
|
print_modules();
|
|
|
if (irqs_disabled())
|
|
|
print_irqtrace_events(prev);
|
|
|
-
|
|
|
- if (regs)
|
|
|
- show_regs(regs);
|
|
|
- else
|
|
|
- dump_stack();
|
|
|
+ dump_stack();
|
|
|
}
|
|
|
|
|
|
/*
|