Browse Source

rwsem generic spinlock: use IRQ save/restore spinlocks

rwsems can be used with IRQs disabled, particularily in early boot
before IRQs are enabled.  Currently the spin_unlock_irq() usage in the
slow-patch will unconditionally enable interrupts and cause problems
since interrupts are not yet initialized or enabled.

This patch uses save/restore versions of IRQ spinlocks in the slowpath
to ensure interrupts are not unintentionally disabled.

Signed-off-by: Kevin Hilman <khilman@deeprootsystems.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Kevin Hilman 15 years ago
parent
commit
3eac4abaa6
1 changed files with 8 additions and 6 deletions
  1. 8 6
      lib/rwsem-spinlock.c

+ 8 - 6
lib/rwsem-spinlock.c

@@ -143,13 +143,14 @@ void __sched __down_read(struct rw_semaphore *sem)
 {
 {
 	struct rwsem_waiter waiter;
 	struct rwsem_waiter waiter;
 	struct task_struct *tsk;
 	struct task_struct *tsk;
+	unsigned long flags;
 
 
-	spin_lock_irq(&sem->wait_lock);
+	spin_lock_irqsave(&sem->wait_lock, flags);
 
 
 	if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
 	if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
 		/* granted */
 		/* granted */
 		sem->activity++;
 		sem->activity++;
-		spin_unlock_irq(&sem->wait_lock);
+		spin_unlock_irqrestore(&sem->wait_lock, flags);
 		goto out;
 		goto out;
 	}
 	}
 
 
@@ -164,7 +165,7 @@ void __sched __down_read(struct rw_semaphore *sem)
 	list_add_tail(&waiter.list, &sem->wait_list);
 	list_add_tail(&waiter.list, &sem->wait_list);
 
 
 	/* we don't need to touch the semaphore struct anymore */
 	/* we don't need to touch the semaphore struct anymore */
-	spin_unlock_irq(&sem->wait_lock);
+	spin_unlock_irqrestore(&sem->wait_lock, flags);
 
 
 	/* wait to be given the lock */
 	/* wait to be given the lock */
 	for (;;) {
 	for (;;) {
@@ -209,13 +210,14 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
 {
 {
 	struct rwsem_waiter waiter;
 	struct rwsem_waiter waiter;
 	struct task_struct *tsk;
 	struct task_struct *tsk;
+	unsigned long flags;
 
 
-	spin_lock_irq(&sem->wait_lock);
+	spin_lock_irqsave(&sem->wait_lock, flags);
 
 
 	if (sem->activity == 0 && list_empty(&sem->wait_list)) {
 	if (sem->activity == 0 && list_empty(&sem->wait_list)) {
 		/* granted */
 		/* granted */
 		sem->activity = -1;
 		sem->activity = -1;
-		spin_unlock_irq(&sem->wait_lock);
+		spin_unlock_irqrestore(&sem->wait_lock, flags);
 		goto out;
 		goto out;
 	}
 	}
 
 
@@ -230,7 +232,7 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
 	list_add_tail(&waiter.list, &sem->wait_list);
 	list_add_tail(&waiter.list, &sem->wait_list);
 
 
 	/* we don't need to touch the semaphore struct anymore */
 	/* we don't need to touch the semaphore struct anymore */
-	spin_unlock_irq(&sem->wait_lock);
+	spin_unlock_irqrestore(&sem->wait_lock, flags);
 
 
 	/* wait to be given the lock */
 	/* wait to be given the lock */
 	for (;;) {
 	for (;;) {