|
@@ -2083,7 +2083,7 @@ bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
|
|
|
*/
|
|
|
|
|
|
/* lookup first element intersecting start-end */
|
|
|
-/* Caller holds sp->lock */
|
|
|
+/* Caller holds sp->mutex */
|
|
|
static struct sp_node *
|
|
|
sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
|
|
|
{
|
|
@@ -2147,13 +2147,13 @@ mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
|
|
|
|
|
|
if (!sp->root.rb_node)
|
|
|
return NULL;
|
|
|
- spin_lock(&sp->lock);
|
|
|
+ mutex_lock(&sp->mutex);
|
|
|
sn = sp_lookup(sp, idx, idx+1);
|
|
|
if (sn) {
|
|
|
mpol_get(sn->policy);
|
|
|
pol = sn->policy;
|
|
|
}
|
|
|
- spin_unlock(&sp->lock);
|
|
|
+ mutex_unlock(&sp->mutex);
|
|
|
return pol;
|
|
|
}
|
|
|
|
|
@@ -2193,10 +2193,10 @@ static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
|
|
|
static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
|
|
|
unsigned long end, struct sp_node *new)
|
|
|
{
|
|
|
- struct sp_node *n, *new2 = NULL;
|
|
|
+ struct sp_node *n;
|
|
|
+ int ret = 0;
|
|
|
|
|
|
-restart:
|
|
|
- spin_lock(&sp->lock);
|
|
|
+ mutex_lock(&sp->mutex);
|
|
|
n = sp_lookup(sp, start, end);
|
|
|
/* Take care of old policies in the same range. */
|
|
|
while (n && n->start < end) {
|
|
@@ -2209,16 +2209,14 @@ restart:
|
|
|
} else {
|
|
|
/* Old policy spanning whole new range. */
|
|
|
if (n->end > end) {
|
|
|
+ struct sp_node *new2;
|
|
|
+ new2 = sp_alloc(end, n->end, n->policy);
|
|
|
if (!new2) {
|
|
|
- spin_unlock(&sp->lock);
|
|
|
- new2 = sp_alloc(end, n->end, n->policy);
|
|
|
- if (!new2)
|
|
|
- return -ENOMEM;
|
|
|
- goto restart;
|
|
|
+ ret = -ENOMEM;
|
|
|
+ goto out;
|
|
|
}
|
|
|
n->end = start;
|
|
|
sp_insert(sp, new2);
|
|
|
- new2 = NULL;
|
|
|
break;
|
|
|
} else
|
|
|
n->end = start;
|
|
@@ -2229,12 +2227,9 @@ restart:
|
|
|
}
|
|
|
if (new)
|
|
|
sp_insert(sp, new);
|
|
|
- spin_unlock(&sp->lock);
|
|
|
- if (new2) {
|
|
|
- mpol_put(new2->policy);
|
|
|
- kmem_cache_free(sn_cache, new2);
|
|
|
- }
|
|
|
- return 0;
|
|
|
+out:
|
|
|
+ mutex_unlock(&sp->mutex);
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -2252,7 +2247,7 @@ void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
|
|
|
int ret;
|
|
|
|
|
|
sp->root = RB_ROOT; /* empty tree == default mempolicy */
|
|
|
- spin_lock_init(&sp->lock);
|
|
|
+ mutex_init(&sp->mutex);
|
|
|
|
|
|
if (mpol) {
|
|
|
struct vm_area_struct pvma;
|
|
@@ -2318,7 +2313,7 @@ void mpol_free_shared_policy(struct shared_policy *p)
|
|
|
|
|
|
if (!p->root.rb_node)
|
|
|
return;
|
|
|
- spin_lock(&p->lock);
|
|
|
+ mutex_lock(&p->mutex);
|
|
|
next = rb_first(&p->root);
|
|
|
while (next) {
|
|
|
n = rb_entry(next, struct sp_node, nd);
|
|
@@ -2327,7 +2322,7 @@ void mpol_free_shared_policy(struct shared_policy *p)
|
|
|
mpol_put(n->policy);
|
|
|
kmem_cache_free(sn_cache, n);
|
|
|
}
|
|
|
- spin_unlock(&p->lock);
|
|
|
+ mutex_unlock(&p->mutex);
|
|
|
}
|
|
|
|
|
|
/* assumes fs == KERNEL_DS */
|