|
@@ -123,6 +123,19 @@ static inline struct cpuset *task_cs(struct task_struct *task)
|
|
|
struct cpuset, css);
|
|
|
}
|
|
|
|
|
|
+#ifdef CONFIG_NUMA
|
|
|
+static inline bool task_has_mempolicy(struct task_struct *task)
|
|
|
+{
|
|
|
+ return task->mempolicy;
|
|
|
+}
|
|
|
+#else
|
|
|
+static inline bool task_has_mempolicy(struct task_struct *task)
|
|
|
+{
|
|
|
+ return false;
|
|
|
+}
|
|
|
+#endif
|
|
|
+
|
|
|
+
|
|
|
/* bits in struct cpuset flags field */
|
|
|
typedef enum {
|
|
|
CS_CPU_EXCLUSIVE,
|
|
@@ -949,7 +962,7 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
|
|
|
static void cpuset_change_task_nodemask(struct task_struct *tsk,
|
|
|
nodemask_t *newmems)
|
|
|
{
|
|
|
- bool masks_disjoint = !nodes_intersects(*newmems, tsk->mems_allowed);
|
|
|
+ bool need_loop;
|
|
|
|
|
|
repeat:
|
|
|
/*
|
|
@@ -962,6 +975,14 @@ repeat:
|
|
|
return;
|
|
|
|
|
|
task_lock(tsk);
|
|
|
+ /*
|
|
|
+ * Determine if a loop is necessary if another thread is doing
|
|
|
+ * get_mems_allowed(). If at least one node remains unchanged and
|
|
|
+ * tsk does not have a mempolicy, then an empty nodemask will not be
|
|
|
+ * possible when mems_allowed is larger than a word.
|
|
|
+ */
|
|
|
+ need_loop = task_has_mempolicy(tsk) ||
|
|
|
+ !nodes_intersects(*newmems, tsk->mems_allowed);
|
|
|
nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
|
|
|
mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP1);
|
|
|
|
|
@@ -981,11 +1002,9 @@ repeat:
|
|
|
|
|
|
/*
|
|
|
* Allocation of memory is very fast, we needn't sleep when waiting
|
|
|
- * for the read-side. No wait is necessary, however, if at least one
|
|
|
- * node remains unchanged.
|
|
|
+ * for the read-side.
|
|
|
*/
|
|
|
- while (masks_disjoint &&
|
|
|
- ACCESS_ONCE(tsk->mems_allowed_change_disable)) {
|
|
|
+ while (need_loop && ACCESS_ONCE(tsk->mems_allowed_change_disable)) {
|
|
|
task_unlock(tsk);
|
|
|
if (!task_curr(tsk))
|
|
|
yield();
|