|
@@ -1011,6 +1011,23 @@ migrate:
|
|
|
return migrate_task_to(p, env.best_cpu);
|
|
|
}
|
|
|
|
|
|
+/* Attempt to migrate a task to a CPU on the preferred node. */
|
|
|
+static void numa_migrate_preferred(struct task_struct *p)
|
|
|
+{
|
|
|
+ /* Success if task is already running on preferred CPU */
|
|
|
+ p->numa_migrate_retry = 0;
|
|
|
+ if (cpu_to_node(task_cpu(p)) == p->numa_preferred_nid)
|
|
|
+ return;
|
|
|
+
|
|
|
+ /* This task has no NUMA fault statistics yet */
|
|
|
+ if (unlikely(p->numa_preferred_nid == -1))
|
|
|
+ return;
|
|
|
+
|
|
|
+ /* Otherwise, try migrate to a CPU on the preferred node */
|
|
|
+ if (task_numa_migrate(p) != 0)
|
|
|
+ p->numa_migrate_retry = jiffies + HZ*5;
|
|
|
+}
|
|
|
+
|
|
|
static void task_numa_placement(struct task_struct *p)
|
|
|
{
|
|
|
int seq, nid, max_nid = -1;
|
|
@@ -1045,17 +1062,12 @@ static void task_numa_placement(struct task_struct *p)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- /*
|
|
|
- * Record the preferred node as the node with the most faults,
|
|
|
- * requeue the task to be running on the idlest CPU on the
|
|
|
- * preferred node and reset the scanning rate to recheck
|
|
|
- * the working set placement.
|
|
|
- */
|
|
|
+ /* Preferred node as the node with the most faults */
|
|
|
if (max_faults && max_nid != p->numa_preferred_nid) {
|
|
|
/* Update the preferred nid and migrate task if possible */
|
|
|
p->numa_preferred_nid = max_nid;
|
|
|
p->numa_migrate_seq = 1;
|
|
|
- task_numa_migrate(p);
|
|
|
+ numa_migrate_preferred(p);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -1111,6 +1123,10 @@ void task_numa_fault(int last_nidpid, int node, int pages, bool migrated)
|
|
|
|
|
|
task_numa_placement(p);
|
|
|
|
|
|
+ /* Retry task to preferred node migration if it previously failed */
|
|
|
+ if (p->numa_migrate_retry && time_after(jiffies, p->numa_migrate_retry))
|
|
|
+ numa_migrate_preferred(p);
|
|
|
+
|
|
|
p->numa_faults_buffer[task_faults_idx(node, priv)] += pages;
|
|
|
}
|
|
|
|