|
@@ -949,6 +949,8 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
|
|
|
static void cpuset_change_task_nodemask(struct task_struct *tsk,
|
|
|
nodemask_t *newmems)
|
|
|
{
|
|
|
+ bool masks_disjoint = !nodes_intersects(*newmems, tsk->mems_allowed);
|
|
|
+
|
|
|
repeat:
|
|
|
/*
|
|
|
* Allow tasks that have access to memory reserves because they have
|
|
@@ -963,7 +965,6 @@ repeat:
|
|
|
nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
|
|
|
mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP1);
|
|
|
|
|
|
-
|
|
|
/*
|
|
|
* ensure checking ->mems_allowed_change_disable after setting all new
|
|
|
* allowed nodes.
|
|
@@ -980,9 +981,11 @@ repeat:
|
|
|
|
|
|
/*
|
|
|
* Allocation of memory is very fast, we needn't sleep when waiting
|
|
|
- * for the read-side.
|
|
|
+ * for the read-side. No wait is necessary, however, if at least one
|
|
|
+ * node remains unchanged.
|
|
|
*/
|
|
|
- while (ACCESS_ONCE(tsk->mems_allowed_change_disable)) {
|
|
|
+ while (masks_disjoint &&
|
|
|
+ ACCESS_ONCE(tsk->mems_allowed_change_disable)) {
|
|
|
task_unlock(tsk);
|
|
|
if (!task_curr(tsk))
|
|
|
yield();
|