|
@@ -860,6 +860,24 @@ void task_numa_work(struct callback_head *work)
|
|
|
if (p->flags & PF_EXITING)
|
|
|
return;
|
|
|
|
|
|
+ /*
|
|
|
+ * We do not care about task placement until a task runs on a node
|
|
|
+ * other than the first one used by the address space. This is
|
|
|
+ * largely because migrations are driven by what CPU the task
|
|
|
+ * is running on. If it's never scheduled on another node, it'll
|
|
|
+ * not migrate so why bother trapping the fault.
|
|
|
+ */
|
|
|
+ if (mm->first_nid == NUMA_PTE_SCAN_INIT)
|
|
|
+ mm->first_nid = numa_node_id();
|
|
|
+ if (mm->first_nid != NUMA_PTE_SCAN_ACTIVE) {
|
|
|
+ /* Are we running on a new node yet? */
|
|
|
+ if (numa_node_id() == mm->first_nid &&
|
|
|
+ !sched_feat_numa(NUMA_FORCE))
|
|
|
+ return;
|
|
|
+
|
|
|
+ mm->first_nid = NUMA_PTE_SCAN_ACTIVE;
|
|
|
+ }
|
|
|
+
|
|
|
/*
|
|
|
* Reset the scan period if enough time has gone by. Objective is that
|
|
|
* scanning will be reduced if pages are properly placed. As tasks
|