|
@@ -194,14 +194,31 @@ void __init sem_init (void)
|
|
|
* sem_lock_(check_) routines are called in the paths where the rw_mutex
|
|
|
* is not held.
|
|
|
*/
|
|
|
-static inline struct sem_array *sem_lock(struct ipc_namespace *ns, int id)
|
|
|
+static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns, int id)
|
|
|
{
|
|
|
- struct kern_ipc_perm *ipcp = ipc_lock(&sem_ids(ns), id);
|
|
|
+ struct kern_ipc_perm *ipcp;
|
|
|
+ struct sem_array *sma;
|
|
|
|
|
|
- if (IS_ERR(ipcp))
|
|
|
- return (struct sem_array *)ipcp;
|
|
|
+ rcu_read_lock();
|
|
|
+ ipcp = ipc_obtain_object(&sem_ids(ns), id);
|
|
|
+ if (IS_ERR(ipcp)) {
|
|
|
+ sma = ERR_CAST(ipcp);
|
|
|
+ goto err;
|
|
|
+ }
|
|
|
|
|
|
- return container_of(ipcp, struct sem_array, sem_perm);
|
|
|
+ spin_lock(&ipcp->lock);
|
|
|
+
|
|
|
+ /* ipc_rmid() may have already freed the ID while sem_lock
|
|
|
+ * was spinning: verify that the structure is still valid
|
|
|
+ */
|
|
|
+ if (!ipcp->deleted)
|
|
|
+ return container_of(ipcp, struct sem_array, sem_perm);
|
|
|
+
|
|
|
+ spin_unlock(&ipcp->lock);
|
|
|
+ sma = ERR_PTR(-EINVAL);
|
|
|
+err:
|
|
|
+ rcu_read_unlock();
|
|
|
+ return sma;
|
|
|
}
|
|
|
|
|
|
static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int id)
|
|
@@ -1593,7 +1610,7 @@ sleep_again:
|
|
|
goto out_free;
|
|
|
}
|
|
|
|
|
|
- sma = sem_lock(ns, semid);
|
|
|
+ sma = sem_obtain_lock(ns, semid);
|
|
|
|
|
|
/*
|
|
|
* Wait until it's guaranteed that no wakeup_sem_queue_do() is ongoing.
|