|
@@ -22,9 +22,9 @@ int rwsem_is_locked(struct rw_semaphore *sem)
|
|
|
int ret = 1;
|
|
|
unsigned long flags;
|
|
|
|
|
|
- if (spin_trylock_irqsave(&sem->wait_lock, flags)) {
|
|
|
+ if (raw_spin_trylock_irqsave(&sem->wait_lock, flags)) {
|
|
|
ret = (sem->activity != 0);
|
|
|
- spin_unlock_irqrestore(&sem->wait_lock, flags);
|
|
|
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
|
|
|
}
|
|
|
return ret;
|
|
|
}
|
|
@@ -44,7 +44,7 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name,
|
|
|
lockdep_init_map(&sem->dep_map, name, key, 0);
|
|
|
#endif
|
|
|
sem->activity = 0;
|
|
|
- spin_lock_init(&sem->wait_lock);
|
|
|
+ raw_spin_lock_init(&sem->wait_lock);
|
|
|
INIT_LIST_HEAD(&sem->wait_list);
|
|
|
}
|
|
|
EXPORT_SYMBOL(__init_rwsem);
|
|
@@ -145,12 +145,12 @@ void __sched __down_read(struct rw_semaphore *sem)
|
|
|
struct task_struct *tsk;
|
|
|
unsigned long flags;
|
|
|
|
|
|
- spin_lock_irqsave(&sem->wait_lock, flags);
|
|
|
+ raw_spin_lock_irqsave(&sem->wait_lock, flags);
|
|
|
|
|
|
if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
|
|
|
/* granted */
|
|
|
sem->activity++;
|
|
|
- spin_unlock_irqrestore(&sem->wait_lock, flags);
|
|
|
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
@@ -165,7 +165,7 @@ void __sched __down_read(struct rw_semaphore *sem)
|
|
|
list_add_tail(&waiter.list, &sem->wait_list);
|
|
|
|
|
|
/* we don't need to touch the semaphore struct anymore */
|
|
|
- spin_unlock_irqrestore(&sem->wait_lock, flags);
|
|
|
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
|
|
|
|
|
|
/* wait to be given the lock */
|
|
|
for (;;) {
|
|
@@ -189,7 +189,7 @@ int __down_read_trylock(struct rw_semaphore *sem)
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
|
- spin_lock_irqsave(&sem->wait_lock, flags);
|
|
|
+ raw_spin_lock_irqsave(&sem->wait_lock, flags);
|
|
|
|
|
|
if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
|
|
|
/* granted */
|
|
@@ -197,7 +197,7 @@ int __down_read_trylock(struct rw_semaphore *sem)
|
|
|
ret = 1;
|
|
|
}
|
|
|
|
|
|
- spin_unlock_irqrestore(&sem->wait_lock, flags);
|
|
|
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
|
|
|
|
|
|
return ret;
|
|
|
}
|
|
@@ -212,12 +212,12 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
|
|
|
struct task_struct *tsk;
|
|
|
unsigned long flags;
|
|
|
|
|
|
- spin_lock_irqsave(&sem->wait_lock, flags);
|
|
|
+ raw_spin_lock_irqsave(&sem->wait_lock, flags);
|
|
|
|
|
|
if (sem->activity == 0 && list_empty(&sem->wait_list)) {
|
|
|
/* granted */
|
|
|
sem->activity = -1;
|
|
|
- spin_unlock_irqrestore(&sem->wait_lock, flags);
|
|
|
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
@@ -232,7 +232,7 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
|
|
|
list_add_tail(&waiter.list, &sem->wait_list);
|
|
|
|
|
|
/* we don't need to touch the semaphore struct anymore */
|
|
|
- spin_unlock_irqrestore(&sem->wait_lock, flags);
|
|
|
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
|
|
|
|
|
|
/* wait to be given the lock */
|
|
|
for (;;) {
|
|
@@ -260,7 +260,7 @@ int __down_write_trylock(struct rw_semaphore *sem)
|
|
|
unsigned long flags;
|
|
|
int ret = 0;
|
|
|
|
|
|
- spin_lock_irqsave(&sem->wait_lock, flags);
|
|
|
+ raw_spin_lock_irqsave(&sem->wait_lock, flags);
|
|
|
|
|
|
if (sem->activity == 0 && list_empty(&sem->wait_list)) {
|
|
|
/* granted */
|
|
@@ -268,7 +268,7 @@ int __down_write_trylock(struct rw_semaphore *sem)
|
|
|
ret = 1;
|
|
|
}
|
|
|
|
|
|
- spin_unlock_irqrestore(&sem->wait_lock, flags);
|
|
|
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
|
|
|
|
|
|
return ret;
|
|
|
}
|
|
@@ -280,12 +280,12 @@ void __up_read(struct rw_semaphore *sem)
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
|
|
|
- spin_lock_irqsave(&sem->wait_lock, flags);
|
|
|
+ raw_spin_lock_irqsave(&sem->wait_lock, flags);
|
|
|
|
|
|
if (--sem->activity == 0 && !list_empty(&sem->wait_list))
|
|
|
sem = __rwsem_wake_one_writer(sem);
|
|
|
|
|
|
- spin_unlock_irqrestore(&sem->wait_lock, flags);
|
|
|
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -295,13 +295,13 @@ void __up_write(struct rw_semaphore *sem)
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
|
|
|
- spin_lock_irqsave(&sem->wait_lock, flags);
|
|
|
+ raw_spin_lock_irqsave(&sem->wait_lock, flags);
|
|
|
|
|
|
sem->activity = 0;
|
|
|
if (!list_empty(&sem->wait_list))
|
|
|
sem = __rwsem_do_wake(sem, 1);
|
|
|
|
|
|
- spin_unlock_irqrestore(&sem->wait_lock, flags);
|
|
|
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -312,12 +312,12 @@ void __downgrade_write(struct rw_semaphore *sem)
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
|
|
|
- spin_lock_irqsave(&sem->wait_lock, flags);
|
|
|
+ raw_spin_lock_irqsave(&sem->wait_lock, flags);
|
|
|
|
|
|
sem->activity = 1;
|
|
|
if (!list_empty(&sem->wait_list))
|
|
|
sem = __rwsem_do_wake(sem, 0);
|
|
|
|
|
|
- spin_unlock_irqrestore(&sem->wait_lock, flags);
|
|
|
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
|
|
|
}
|
|
|
|