|
@@ -922,6 +922,13 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
|
|
|
rq->skip_clock_update = 1;
|
|
|
}
|
|
|
|
|
|
+static ATOMIC_NOTIFIER_HEAD(task_migration_notifier);
|
|
|
+
|
|
|
+void register_task_migration_notifier(struct notifier_block *n)
|
|
|
+{
|
|
|
+ atomic_notifier_chain_register(&task_migration_notifier, n);
|
|
|
+}
|
|
|
+
|
|
|
#ifdef CONFIG_SMP
|
|
|
void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
|
|
|
{
|
|
@@ -952,8 +959,16 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
|
|
|
trace_sched_migrate_task(p, new_cpu);
|
|
|
|
|
|
if (task_cpu(p) != new_cpu) {
|
|
|
+ struct task_migration_notifier tmn;
|
|
|
+
|
|
|
p->se.nr_migrations++;
|
|
|
perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, NULL, 0);
|
|
|
+
|
|
|
+ tmn.task = p;
|
|
|
+ tmn.from_cpu = task_cpu(p);
|
|
|
+ tmn.to_cpu = new_cpu;
|
|
|
+
|
|
|
+ atomic_notifier_call_chain(&task_migration_notifier, 0, &tmn);
|
|
|
}
|
|
|
|
|
|
__set_task_cpu(p, new_cpu);
|