Browse Source

Reduce cpuset.c write_lock_irq() to read_lock()

cpuset.c:update_nodemask() uses a write_lock_irq() on tasklist_lock to
block concurrent forks; a read_lock() suffices and is less intrusive.

Signed-off-by: Paul Menage<menage@google.com>
Acked-by: Paul Jackson <pj@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Paul Menage 18 years ago
parent
commit
c2aef333c9
1 changed files with 3 additions and 3 deletions
  1. 3 3
      kernel/cpuset.c

+ 3 - 3
kernel/cpuset.c

@@ -981,10 +981,10 @@ static int update_nodemask(struct cpuset *cs, char *buf)
 		mmarray = kmalloc(ntasks * sizeof(*mmarray), GFP_KERNEL);
 		mmarray = kmalloc(ntasks * sizeof(*mmarray), GFP_KERNEL);
 		if (!mmarray)
 		if (!mmarray)
 			goto done;
 			goto done;
-		write_lock_irq(&tasklist_lock);		/* block fork */
+		read_lock(&tasklist_lock);		/* block fork */
 		if (atomic_read(&cs->count) <= ntasks)
 		if (atomic_read(&cs->count) <= ntasks)
 			break;				/* got enough */
 			break;				/* got enough */
-		write_unlock_irq(&tasklist_lock);	/* try again */
+		read_unlock(&tasklist_lock);		/* try again */
 		kfree(mmarray);
 		kfree(mmarray);
 	}
 	}
 
 
@@ -1006,7 +1006,7 @@ static int update_nodemask(struct cpuset *cs, char *buf)
 			continue;
 			continue;
 		mmarray[n++] = mm;
 		mmarray[n++] = mm;
 	} while_each_thread(g, p);
 	} while_each_thread(g, p);
-	write_unlock_irq(&tasklist_lock);
+	read_unlock(&tasklist_lock);
 
 
 	/*
 	/*
 	 * Now that we've dropped the tasklist spinlock, we can
 	 * Now that we've dropped the tasklist spinlock, we can