|
@@ -74,6 +74,7 @@ void shm_init_ns(struct ipc_namespace *ns)
|
|
|
ns->shm_ctlmax = SHMMAX;
|
|
|
ns->shm_ctlall = SHMALL;
|
|
|
ns->shm_ctlmni = SHMMNI;
|
|
|
+ ns->shm_rmid_forced = 0;
|
|
|
ns->shm_tot = 0;
|
|
|
ipc_init_ids(&shm_ids(ns));
|
|
|
}
|
|
@@ -186,6 +187,23 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
|
|
|
ipc_rcu_putref(shp);
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * shm_may_destroy - identifies whether shm segment should be destroyed now
|
|
|
+ *
|
|
|
+ * Returns true if and only if there are no active users of the segment and
|
|
|
+ * one of the following is true:
|
|
|
+ *
|
|
|
+ * 1) shmctl(id, IPC_RMID, NULL) was called for this shp
|
|
|
+ *
|
|
|
+ * 2) sysctl kernel.shm_rmid_forced is set to 1.
|
|
|
+ */
|
|
|
+static bool shm_may_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
|
|
|
+{
|
|
|
+ return (shp->shm_nattch == 0) &&
|
|
|
+ (ns->shm_rmid_forced ||
|
|
|
+ (shp->shm_perm.mode & SHM_DEST));
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* remove the attach descriptor vma.
|
|
|
* free memory for segment if it is marked destroyed.
|
|
@@ -206,11 +224,83 @@ static void shm_close(struct vm_area_struct *vma)
|
|
|
shp->shm_lprid = task_tgid_vnr(current);
|
|
|
shp->shm_dtim = get_seconds();
|
|
|
shp->shm_nattch--;
|
|
|
- if(shp->shm_nattch == 0 &&
|
|
|
- shp->shm_perm.mode & SHM_DEST)
|
|
|
+ if (shm_may_destroy(ns, shp))
|
|
|
+ shm_destroy(ns, shp);
|
|
|
+ else
|
|
|
+ shm_unlock(shp);
|
|
|
+ up_write(&shm_ids(ns).rw_mutex);
|
|
|
+}
|
|
|
+
|
|
|
+static int shm_try_destroy_current(int id, void *p, void *data)
|
|
|
+{
|
|
|
+ struct ipc_namespace *ns = data;
|
|
|
+ struct shmid_kernel *shp = shm_lock(ns, id);
|
|
|
+
|
|
|
+ if (IS_ERR(shp))
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ if (shp->shm_cprid != task_tgid_vnr(current)) {
|
|
|
+ shm_unlock(shp);
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (shm_may_destroy(ns, shp))
|
|
|
+ shm_destroy(ns, shp);
|
|
|
+ else
|
|
|
+ shm_unlock(shp);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static int shm_try_destroy_orphaned(int id, void *p, void *data)
|
|
|
+{
|
|
|
+ struct ipc_namespace *ns = data;
|
|
|
+ struct shmid_kernel *shp = shm_lock(ns, id);
|
|
|
+ struct task_struct *task;
|
|
|
+
|
|
|
+ if (IS_ERR(shp))
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * We want to destroy segments without users and with already
|
|
|
+ * exit'ed originating process.
|
|
|
+ *
|
|
|
+ * XXX: the originating process may exist in another pid namespace.
|
|
|
+ */
|
|
|
+ task = find_task_by_vpid(shp->shm_cprid);
|
|
|
+ if (task != NULL) {
|
|
|
+ shm_unlock(shp);
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (shm_may_destroy(ns, shp))
|
|
|
shm_destroy(ns, shp);
|
|
|
else
|
|
|
shm_unlock(shp);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+void shm_destroy_orphaned(struct ipc_namespace *ns)
|
|
|
+{
|
|
|
+ down_write(&shm_ids(ns).rw_mutex);
|
|
|
+ idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns);
|
|
|
+ up_write(&shm_ids(ns).rw_mutex);
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
+void exit_shm(struct task_struct *task)
|
|
|
+{
|
|
|
+ struct nsproxy *nsp = task->nsproxy;
|
|
|
+ struct ipc_namespace *ns;
|
|
|
+
|
|
|
+ if (!nsp)
|
|
|
+ return;
|
|
|
+ ns = nsp->ipc_ns;
|
|
|
+ if (!ns || !ns->shm_rmid_forced)
|
|
|
+ return;
|
|
|
+
|
|
|
+ /* Destroy all already created segments, but not mapped yet */
|
|
|
+ down_write(&shm_ids(ns).rw_mutex);
|
|
|
+ idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_current, ns);
|
|
|
up_write(&shm_ids(ns).rw_mutex);
|
|
|
}
|
|
|
|
|
@@ -950,8 +1040,7 @@ out_nattch:
|
|
|
shp = shm_lock(ns, shmid);
|
|
|
BUG_ON(IS_ERR(shp));
|
|
|
shp->shm_nattch--;
|
|
|
- if(shp->shm_nattch == 0 &&
|
|
|
- shp->shm_perm.mode & SHM_DEST)
|
|
|
+ if (shm_may_destroy(ns, shp))
|
|
|
shm_destroy(ns, shp);
|
|
|
else
|
|
|
shm_unlock(shp);
|