shm.c 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141
  1. /*
  2. * linux/ipc/shm.c
  3. * Copyright (C) 1992, 1993 Krishna Balasubramanian
  4. * Many improvements/fixes by Bruno Haible.
  5. * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
  6. * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
  7. *
  8. * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
  9. * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
  10. * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
  11. * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
  12. * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
  13. * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
  14. * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
  15. *
  16. * support for audit of ipc object properties and permission changes
  17. * Dustin Kirkland <dustin.kirkland@us.ibm.com>
  18. *
  19. * namespaces support
  20. * OpenVZ, SWsoft Inc.
  21. * Pavel Emelianov <xemul@openvz.org>
  22. */
  23. #include <linux/slab.h>
  24. #include <linux/mm.h>
  25. #include <linux/hugetlb.h>
  26. #include <linux/shm.h>
  27. #include <linux/init.h>
  28. #include <linux/file.h>
  29. #include <linux/mman.h>
  30. #include <linux/shmem_fs.h>
  31. #include <linux/security.h>
  32. #include <linux/syscalls.h>
  33. #include <linux/audit.h>
  34. #include <linux/capability.h>
  35. #include <linux/ptrace.h>
  36. #include <linux/seq_file.h>
  37. #include <linux/mutex.h>
  38. #include <linux/nsproxy.h>
  39. #include <linux/mount.h>
  40. #include <asm/uaccess.h>
  41. #include "util.h"
  42. struct shm_file_data {
  43. int id;
  44. struct ipc_namespace *ns;
  45. struct file *file;
  46. const struct vm_operations_struct *vm_ops;
  47. };
  48. #define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data))
  49. static const struct file_operations shm_file_operations;
  50. static struct vm_operations_struct shm_vm_ops;
  51. static struct ipc_ids init_shm_ids;
  52. #define shm_ids(ns) (*((ns)->ids[IPC_SHM_IDS]))
  53. #define shm_unlock(shp) \
  54. ipc_unlock(&(shp)->shm_perm)
  55. #define shm_buildid(ns, id, seq) \
  56. ipc_buildid(&shm_ids(ns), id, seq)
  57. static int newseg(struct ipc_namespace *, struct ipc_params *);
  58. static void shm_open(struct vm_area_struct *vma);
  59. static void shm_close(struct vm_area_struct *vma);
  60. static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
  61. #ifdef CONFIG_PROC_FS
  62. static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
  63. #endif
  64. static void __shm_init_ns(struct ipc_namespace *ns, struct ipc_ids *ids)
  65. {
  66. ns->ids[IPC_SHM_IDS] = ids;
  67. ns->shm_ctlmax = SHMMAX;
  68. ns->shm_ctlall = SHMALL;
  69. ns->shm_ctlmni = SHMMNI;
  70. ns->shm_tot = 0;
  71. ipc_init_ids(ids);
  72. }
  73. static void do_shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *shp)
  74. {
  75. if (shp->shm_nattch){
  76. shp->shm_perm.mode |= SHM_DEST;
  77. /* Do not find it any more */
  78. shp->shm_perm.key = IPC_PRIVATE;
  79. shm_unlock(shp);
  80. } else
  81. shm_destroy(ns, shp);
  82. }
  83. int shm_init_ns(struct ipc_namespace *ns)
  84. {
  85. struct ipc_ids *ids;
  86. ids = kmalloc(sizeof(struct ipc_ids), GFP_KERNEL);
  87. if (ids == NULL)
  88. return -ENOMEM;
  89. __shm_init_ns(ns, ids);
  90. return 0;
  91. }
  92. void shm_exit_ns(struct ipc_namespace *ns)
  93. {
  94. struct shmid_kernel *shp;
  95. int next_id;
  96. int total, in_use;
  97. mutex_lock(&shm_ids(ns).mutex);
  98. in_use = shm_ids(ns).in_use;
  99. for (total = 0, next_id = 0; total < in_use; next_id++) {
  100. shp = idr_find(&shm_ids(ns).ipcs_idr, next_id);
  101. if (shp == NULL)
  102. continue;
  103. ipc_lock_by_ptr(&shp->shm_perm);
  104. do_shm_rmid(ns, shp);
  105. total++;
  106. }
  107. mutex_unlock(&shm_ids(ns).mutex);
  108. kfree(ns->ids[IPC_SHM_IDS]);
  109. ns->ids[IPC_SHM_IDS] = NULL;
  110. }
  111. void __init shm_init (void)
  112. {
  113. __shm_init_ns(&init_ipc_ns, &init_shm_ids);
  114. ipc_init_proc_interface("sysvipc/shm",
  115. " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime\n",
  116. IPC_SHM_IDS, sysvipc_shm_proc_show);
  117. }
  118. static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
  119. {
  120. struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id);
  121. return container_of(ipcp, struct shmid_kernel, shm_perm);
  122. }
  123. static inline struct shmid_kernel *shm_lock_check(struct ipc_namespace *ns,
  124. int id)
  125. {
  126. struct kern_ipc_perm *ipcp = ipc_lock_check(&shm_ids(ns), id);
  127. return container_of(ipcp, struct shmid_kernel, shm_perm);
  128. }
  129. static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
  130. {
  131. ipc_rmid(&shm_ids(ns), &s->shm_perm);
  132. }
  133. static inline int shm_addid(struct ipc_namespace *ns, struct shmid_kernel *shp)
  134. {
  135. return ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
  136. }
  137. /* This is called by fork, once for every shm attach. */
  138. static void shm_open(struct vm_area_struct *vma)
  139. {
  140. struct file *file = vma->vm_file;
  141. struct shm_file_data *sfd = shm_file_data(file);
  142. struct shmid_kernel *shp;
  143. shp = shm_lock(sfd->ns, sfd->id);
  144. BUG_ON(IS_ERR(shp));
  145. shp->shm_atim = get_seconds();
  146. shp->shm_lprid = task_tgid_vnr(current);
  147. shp->shm_nattch++;
  148. shm_unlock(shp);
  149. }
  150. /*
  151. * shm_destroy - free the struct shmid_kernel
  152. *
  153. * @shp: struct to free
  154. *
  155. * It has to be called with shp and shm_ids.mutex locked,
  156. * but returns with shp unlocked and freed.
  157. */
  158. static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
  159. {
  160. ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
  161. shm_rmid(ns, shp);
  162. shm_unlock(shp);
  163. if (!is_file_hugepages(shp->shm_file))
  164. shmem_lock(shp->shm_file, 0, shp->mlock_user);
  165. else
  166. user_shm_unlock(shp->shm_file->f_path.dentry->d_inode->i_size,
  167. shp->mlock_user);
  168. fput (shp->shm_file);
  169. security_shm_free(shp);
  170. ipc_rcu_putref(shp);
  171. }
  172. /*
  173. * remove the attach descriptor vma.
  174. * free memory for segment if it is marked destroyed.
  175. * The descriptor has already been removed from the current->mm->mmap list
  176. * and will later be kfree()d.
  177. */
  178. static void shm_close(struct vm_area_struct *vma)
  179. {
  180. struct file * file = vma->vm_file;
  181. struct shm_file_data *sfd = shm_file_data(file);
  182. struct shmid_kernel *shp;
  183. struct ipc_namespace *ns = sfd->ns;
  184. mutex_lock(&shm_ids(ns).mutex);
  185. /* remove from the list of attaches of the shm segment */
  186. shp = shm_lock(ns, sfd->id);
  187. BUG_ON(IS_ERR(shp));
  188. shp->shm_lprid = task_tgid_vnr(current);
  189. shp->shm_dtim = get_seconds();
  190. shp->shm_nattch--;
  191. if(shp->shm_nattch == 0 &&
  192. shp->shm_perm.mode & SHM_DEST)
  193. shm_destroy(ns, shp);
  194. else
  195. shm_unlock(shp);
  196. mutex_unlock(&shm_ids(ns).mutex);
  197. }
  198. static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  199. {
  200. struct file *file = vma->vm_file;
  201. struct shm_file_data *sfd = shm_file_data(file);
  202. return sfd->vm_ops->fault(vma, vmf);
  203. }
  204. #ifdef CONFIG_NUMA
  205. static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
  206. {
  207. struct file *file = vma->vm_file;
  208. struct shm_file_data *sfd = shm_file_data(file);
  209. int err = 0;
  210. if (sfd->vm_ops->set_policy)
  211. err = sfd->vm_ops->set_policy(vma, new);
  212. return err;
  213. }
  214. static struct mempolicy *shm_get_policy(struct vm_area_struct *vma,
  215. unsigned long addr)
  216. {
  217. struct file *file = vma->vm_file;
  218. struct shm_file_data *sfd = shm_file_data(file);
  219. struct mempolicy *pol = NULL;
  220. if (sfd->vm_ops->get_policy)
  221. pol = sfd->vm_ops->get_policy(vma, addr);
  222. else if (vma->vm_policy)
  223. pol = vma->vm_policy;
  224. else
  225. pol = current->mempolicy;
  226. return pol;
  227. }
  228. #endif
  229. static int shm_mmap(struct file * file, struct vm_area_struct * vma)
  230. {
  231. struct shm_file_data *sfd = shm_file_data(file);
  232. int ret;
  233. ret = sfd->file->f_op->mmap(sfd->file, vma);
  234. if (ret != 0)
  235. return ret;
  236. sfd->vm_ops = vma->vm_ops;
  237. #ifdef CONFIG_MMU
  238. BUG_ON(!sfd->vm_ops->fault);
  239. #endif
  240. vma->vm_ops = &shm_vm_ops;
  241. shm_open(vma);
  242. return ret;
  243. }
  244. static int shm_release(struct inode *ino, struct file *file)
  245. {
  246. struct shm_file_data *sfd = shm_file_data(file);
  247. put_ipc_ns(sfd->ns);
  248. shm_file_data(file) = NULL;
  249. kfree(sfd);
  250. return 0;
  251. }
  252. static int shm_fsync(struct file *file, struct dentry *dentry, int datasync)
  253. {
  254. int (*fsync) (struct file *, struct dentry *, int datasync);
  255. struct shm_file_data *sfd = shm_file_data(file);
  256. int ret = -EINVAL;
  257. fsync = sfd->file->f_op->fsync;
  258. if (fsync)
  259. ret = fsync(sfd->file, sfd->file->f_path.dentry, datasync);
  260. return ret;
  261. }
  262. static unsigned long shm_get_unmapped_area(struct file *file,
  263. unsigned long addr, unsigned long len, unsigned long pgoff,
  264. unsigned long flags)
  265. {
  266. struct shm_file_data *sfd = shm_file_data(file);
  267. return get_unmapped_area(sfd->file, addr, len, pgoff, flags);
  268. }
  269. int is_file_shm_hugepages(struct file *file)
  270. {
  271. int ret = 0;
  272. if (file->f_op == &shm_file_operations) {
  273. struct shm_file_data *sfd;
  274. sfd = shm_file_data(file);
  275. ret = is_file_hugepages(sfd->file);
  276. }
  277. return ret;
  278. }
  279. static const struct file_operations shm_file_operations = {
  280. .mmap = shm_mmap,
  281. .fsync = shm_fsync,
  282. .release = shm_release,
  283. .get_unmapped_area = shm_get_unmapped_area,
  284. };
  285. static struct vm_operations_struct shm_vm_ops = {
  286. .open = shm_open, /* callback for a new vm-area open */
  287. .close = shm_close, /* callback for when the vm-area is released */
  288. .fault = shm_fault,
  289. #if defined(CONFIG_NUMA)
  290. .set_policy = shm_set_policy,
  291. .get_policy = shm_get_policy,
  292. #endif
  293. };
  294. static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
  295. {
  296. key_t key = params->key;
  297. int shmflg = params->flg;
  298. size_t size = params->u.size;
  299. int error;
  300. struct shmid_kernel *shp;
  301. int numpages = (size + PAGE_SIZE -1) >> PAGE_SHIFT;
  302. struct file * file;
  303. char name[13];
  304. int id;
  305. if (size < SHMMIN || size > ns->shm_ctlmax)
  306. return -EINVAL;
  307. if (ns->shm_tot + numpages > ns->shm_ctlall)
  308. return -ENOSPC;
  309. shp = ipc_rcu_alloc(sizeof(*shp));
  310. if (!shp)
  311. return -ENOMEM;
  312. shp->shm_perm.key = key;
  313. shp->shm_perm.mode = (shmflg & S_IRWXUGO);
  314. shp->mlock_user = NULL;
  315. shp->shm_perm.security = NULL;
  316. error = security_shm_alloc(shp);
  317. if (error) {
  318. ipc_rcu_putref(shp);
  319. return error;
  320. }
  321. sprintf (name, "SYSV%08x", key);
  322. if (shmflg & SHM_HUGETLB) {
  323. /* hugetlb_file_setup takes care of mlock user accounting */
  324. file = hugetlb_file_setup(name, size);
  325. shp->mlock_user = current->user;
  326. } else {
  327. int acctflag = VM_ACCOUNT;
  328. /*
  329. * Do not allow no accounting for OVERCOMMIT_NEVER, even
  330. * if it's asked for.
  331. */
  332. if ((shmflg & SHM_NORESERVE) &&
  333. sysctl_overcommit_memory != OVERCOMMIT_NEVER)
  334. acctflag = 0;
  335. file = shmem_file_setup(name, size, acctflag);
  336. }
  337. error = PTR_ERR(file);
  338. if (IS_ERR(file))
  339. goto no_file;
  340. error = -ENOSPC;
  341. id = shm_addid(ns, shp);
  342. if(id == -1)
  343. goto no_id;
  344. shp->shm_cprid = task_tgid_vnr(current);
  345. shp->shm_lprid = 0;
  346. shp->shm_atim = shp->shm_dtim = 0;
  347. shp->shm_ctim = get_seconds();
  348. shp->shm_segsz = size;
  349. shp->shm_nattch = 0;
  350. shp->shm_perm.id = shm_buildid(ns, id, shp->shm_perm.seq);
  351. shp->shm_file = file;
  352. /*
  353. * shmid gets reported as "inode#" in /proc/pid/maps.
  354. * proc-ps tools use this. Changing this will break them.
  355. */
  356. file->f_dentry->d_inode->i_ino = shp->shm_perm.id;
  357. ns->shm_tot += numpages;
  358. error = shp->shm_perm.id;
  359. shm_unlock(shp);
  360. return error;
  361. no_id:
  362. fput(file);
  363. no_file:
  364. security_shm_free(shp);
  365. ipc_rcu_putref(shp);
  366. return error;
  367. }
  368. static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg)
  369. {
  370. struct shmid_kernel *shp;
  371. shp = container_of(ipcp, struct shmid_kernel, shm_perm);
  372. return security_shm_associate(shp, shmflg);
  373. }
  374. static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
  375. struct ipc_params *params)
  376. {
  377. struct shmid_kernel *shp;
  378. shp = container_of(ipcp, struct shmid_kernel, shm_perm);
  379. if (shp->shm_segsz < params->u.size)
  380. return -EINVAL;
  381. return 0;
  382. }
  383. asmlinkage long sys_shmget (key_t key, size_t size, int shmflg)
  384. {
  385. struct ipc_namespace *ns;
  386. struct ipc_ops shm_ops;
  387. struct ipc_params shm_params;
  388. ns = current->nsproxy->ipc_ns;
  389. shm_ops.getnew = newseg;
  390. shm_ops.associate = shm_security;
  391. shm_ops.more_checks = shm_more_checks;
  392. shm_params.key = key;
  393. shm_params.flg = shmflg;
  394. shm_params.u.size = size;
  395. return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params);
  396. }
  397. static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
  398. {
  399. switch(version) {
  400. case IPC_64:
  401. return copy_to_user(buf, in, sizeof(*in));
  402. case IPC_OLD:
  403. {
  404. struct shmid_ds out;
  405. ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
  406. out.shm_segsz = in->shm_segsz;
  407. out.shm_atime = in->shm_atime;
  408. out.shm_dtime = in->shm_dtime;
  409. out.shm_ctime = in->shm_ctime;
  410. out.shm_cpid = in->shm_cpid;
  411. out.shm_lpid = in->shm_lpid;
  412. out.shm_nattch = in->shm_nattch;
  413. return copy_to_user(buf, &out, sizeof(out));
  414. }
  415. default:
  416. return -EINVAL;
  417. }
  418. }
  419. struct shm_setbuf {
  420. uid_t uid;
  421. gid_t gid;
  422. mode_t mode;
  423. };
  424. static inline unsigned long copy_shmid_from_user(struct shm_setbuf *out, void __user *buf, int version)
  425. {
  426. switch(version) {
  427. case IPC_64:
  428. {
  429. struct shmid64_ds tbuf;
  430. if (copy_from_user(&tbuf, buf, sizeof(tbuf)))
  431. return -EFAULT;
  432. out->uid = tbuf.shm_perm.uid;
  433. out->gid = tbuf.shm_perm.gid;
  434. out->mode = tbuf.shm_perm.mode;
  435. return 0;
  436. }
  437. case IPC_OLD:
  438. {
  439. struct shmid_ds tbuf_old;
  440. if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
  441. return -EFAULT;
  442. out->uid = tbuf_old.shm_perm.uid;
  443. out->gid = tbuf_old.shm_perm.gid;
  444. out->mode = tbuf_old.shm_perm.mode;
  445. return 0;
  446. }
  447. default:
  448. return -EINVAL;
  449. }
  450. }
  451. static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
  452. {
  453. switch(version) {
  454. case IPC_64:
  455. return copy_to_user(buf, in, sizeof(*in));
  456. case IPC_OLD:
  457. {
  458. struct shminfo out;
  459. if(in->shmmax > INT_MAX)
  460. out.shmmax = INT_MAX;
  461. else
  462. out.shmmax = (int)in->shmmax;
  463. out.shmmin = in->shmmin;
  464. out.shmmni = in->shmmni;
  465. out.shmseg = in->shmseg;
  466. out.shmall = in->shmall;
  467. return copy_to_user(buf, &out, sizeof(out));
  468. }
  469. default:
  470. return -EINVAL;
  471. }
  472. }
  473. static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
  474. unsigned long *swp)
  475. {
  476. int next_id;
  477. int total, in_use;
  478. *rss = 0;
  479. *swp = 0;
  480. in_use = shm_ids(ns).in_use;
  481. for (total = 0, next_id = 0; total < in_use; next_id++) {
  482. struct shmid_kernel *shp;
  483. struct inode *inode;
  484. /*
  485. * idr_find() is called via shm_get(), so with shm_ids.mutex
  486. * locked. Since ipc_addid() is also called with
  487. * shm_ids.mutex down, there is no need to add read barriers
  488. * here to gurantee the writes in ipc_addid() are seen in
  489. * order here (for Alpha).
  490. * However idr_find() itself does not necessary require
  491. * ipc_ids.mutex down. So if idr_find() is used by other
  492. * places without ipc_ids.mutex down, then it needs read
  493. * read memory barriers as ipc_lock() does.
  494. */
  495. shp = idr_find(&shm_ids(ns).ipcs_idr, next_id);
  496. if (shp == NULL)
  497. continue;
  498. inode = shp->shm_file->f_path.dentry->d_inode;
  499. if (is_file_hugepages(shp->shm_file)) {
  500. struct address_space *mapping = inode->i_mapping;
  501. *rss += (HPAGE_SIZE/PAGE_SIZE)*mapping->nrpages;
  502. } else {
  503. struct shmem_inode_info *info = SHMEM_I(inode);
  504. spin_lock(&info->lock);
  505. *rss += inode->i_mapping->nrpages;
  506. *swp += info->swapped;
  507. spin_unlock(&info->lock);
  508. }
  509. total++;
  510. }
  511. }
  512. asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf)
  513. {
  514. struct shm_setbuf setbuf;
  515. struct shmid_kernel *shp;
  516. int err, version;
  517. struct ipc_namespace *ns;
  518. if (cmd < 0 || shmid < 0) {
  519. err = -EINVAL;
  520. goto out;
  521. }
  522. version = ipc_parse_version(&cmd);
  523. ns = current->nsproxy->ipc_ns;
  524. switch (cmd) { /* replace with proc interface ? */
  525. case IPC_INFO:
  526. {
  527. struct shminfo64 shminfo;
  528. err = security_shm_shmctl(NULL, cmd);
  529. if (err)
  530. return err;
  531. memset(&shminfo,0,sizeof(shminfo));
  532. shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni;
  533. shminfo.shmmax = ns->shm_ctlmax;
  534. shminfo.shmall = ns->shm_ctlall;
  535. shminfo.shmmin = SHMMIN;
  536. if(copy_shminfo_to_user (buf, &shminfo, version))
  537. return -EFAULT;
  538. /* reading a integer is always atomic */
  539. err = ipc_get_maxid(&shm_ids(ns));
  540. if(err<0)
  541. err = 0;
  542. goto out;
  543. }
  544. case SHM_INFO:
  545. {
  546. struct shm_info shm_info;
  547. err = security_shm_shmctl(NULL, cmd);
  548. if (err)
  549. return err;
  550. memset(&shm_info,0,sizeof(shm_info));
  551. mutex_lock(&shm_ids(ns).mutex);
  552. shm_info.used_ids = shm_ids(ns).in_use;
  553. shm_get_stat (ns, &shm_info.shm_rss, &shm_info.shm_swp);
  554. shm_info.shm_tot = ns->shm_tot;
  555. shm_info.swap_attempts = 0;
  556. shm_info.swap_successes = 0;
  557. err = ipc_get_maxid(&shm_ids(ns));
  558. mutex_unlock(&shm_ids(ns).mutex);
  559. if(copy_to_user (buf, &shm_info, sizeof(shm_info))) {
  560. err = -EFAULT;
  561. goto out;
  562. }
  563. err = err < 0 ? 0 : err;
  564. goto out;
  565. }
  566. case SHM_STAT:
  567. case IPC_STAT:
  568. {
  569. struct shmid64_ds tbuf;
  570. int result;
  571. if (!buf) {
  572. err = -EFAULT;
  573. goto out;
  574. }
  575. if (cmd == SHM_STAT) {
  576. shp = shm_lock(ns, shmid);
  577. if (IS_ERR(shp)) {
  578. err = PTR_ERR(shp);
  579. goto out;
  580. }
  581. result = shp->shm_perm.id;
  582. } else {
  583. shp = shm_lock_check(ns, shmid);
  584. if (IS_ERR(shp)) {
  585. err = PTR_ERR(shp);
  586. goto out;
  587. }
  588. result = 0;
  589. }
  590. err=-EACCES;
  591. if (ipcperms (&shp->shm_perm, S_IRUGO))
  592. goto out_unlock;
  593. err = security_shm_shmctl(shp, cmd);
  594. if (err)
  595. goto out_unlock;
  596. memset(&tbuf, 0, sizeof(tbuf));
  597. kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm);
  598. tbuf.shm_segsz = shp->shm_segsz;
  599. tbuf.shm_atime = shp->shm_atim;
  600. tbuf.shm_dtime = shp->shm_dtim;
  601. tbuf.shm_ctime = shp->shm_ctim;
  602. tbuf.shm_cpid = shp->shm_cprid;
  603. tbuf.shm_lpid = shp->shm_lprid;
  604. tbuf.shm_nattch = shp->shm_nattch;
  605. shm_unlock(shp);
  606. if(copy_shmid_to_user (buf, &tbuf, version))
  607. err = -EFAULT;
  608. else
  609. err = result;
  610. goto out;
  611. }
  612. case SHM_LOCK:
  613. case SHM_UNLOCK:
  614. {
  615. shp = shm_lock_check(ns, shmid);
  616. if (IS_ERR(shp)) {
  617. err = PTR_ERR(shp);
  618. goto out;
  619. }
  620. err = audit_ipc_obj(&(shp->shm_perm));
  621. if (err)
  622. goto out_unlock;
  623. if (!capable(CAP_IPC_LOCK)) {
  624. err = -EPERM;
  625. if (current->euid != shp->shm_perm.uid &&
  626. current->euid != shp->shm_perm.cuid)
  627. goto out_unlock;
  628. if (cmd == SHM_LOCK &&
  629. !current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur)
  630. goto out_unlock;
  631. }
  632. err = security_shm_shmctl(shp, cmd);
  633. if (err)
  634. goto out_unlock;
  635. if(cmd==SHM_LOCK) {
  636. struct user_struct * user = current->user;
  637. if (!is_file_hugepages(shp->shm_file)) {
  638. err = shmem_lock(shp->shm_file, 1, user);
  639. if (!err && !(shp->shm_perm.mode & SHM_LOCKED)){
  640. shp->shm_perm.mode |= SHM_LOCKED;
  641. shp->mlock_user = user;
  642. }
  643. }
  644. } else if (!is_file_hugepages(shp->shm_file)) {
  645. shmem_lock(shp->shm_file, 0, shp->mlock_user);
  646. shp->shm_perm.mode &= ~SHM_LOCKED;
  647. shp->mlock_user = NULL;
  648. }
  649. shm_unlock(shp);
  650. goto out;
  651. }
  652. case IPC_RMID:
  653. {
  654. /*
  655. * We cannot simply remove the file. The SVID states
  656. * that the block remains until the last person
  657. * detaches from it, then is deleted. A shmat() on
  658. * an RMID segment is legal in older Linux and if
  659. * we change it apps break...
  660. *
  661. * Instead we set a destroyed flag, and then blow
  662. * the name away when the usage hits zero.
  663. */
  664. mutex_lock(&shm_ids(ns).mutex);
  665. shp = shm_lock_check(ns, shmid);
  666. if (IS_ERR(shp)) {
  667. err = PTR_ERR(shp);
  668. goto out_up;
  669. }
  670. err = audit_ipc_obj(&(shp->shm_perm));
  671. if (err)
  672. goto out_unlock_up;
  673. if (current->euid != shp->shm_perm.uid &&
  674. current->euid != shp->shm_perm.cuid &&
  675. !capable(CAP_SYS_ADMIN)) {
  676. err=-EPERM;
  677. goto out_unlock_up;
  678. }
  679. err = security_shm_shmctl(shp, cmd);
  680. if (err)
  681. goto out_unlock_up;
  682. do_shm_rmid(ns, shp);
  683. mutex_unlock(&shm_ids(ns).mutex);
  684. goto out;
  685. }
  686. case IPC_SET:
  687. {
  688. if (!buf) {
  689. err = -EFAULT;
  690. goto out;
  691. }
  692. if (copy_shmid_from_user (&setbuf, buf, version)) {
  693. err = -EFAULT;
  694. goto out;
  695. }
  696. mutex_lock(&shm_ids(ns).mutex);
  697. shp = shm_lock_check(ns, shmid);
  698. if (IS_ERR(shp)) {
  699. err = PTR_ERR(shp);
  700. goto out_up;
  701. }
  702. err = audit_ipc_obj(&(shp->shm_perm));
  703. if (err)
  704. goto out_unlock_up;
  705. err = audit_ipc_set_perm(0, setbuf.uid, setbuf.gid, setbuf.mode);
  706. if (err)
  707. goto out_unlock_up;
  708. err=-EPERM;
  709. if (current->euid != shp->shm_perm.uid &&
  710. current->euid != shp->shm_perm.cuid &&
  711. !capable(CAP_SYS_ADMIN)) {
  712. goto out_unlock_up;
  713. }
  714. err = security_shm_shmctl(shp, cmd);
  715. if (err)
  716. goto out_unlock_up;
  717. shp->shm_perm.uid = setbuf.uid;
  718. shp->shm_perm.gid = setbuf.gid;
  719. shp->shm_perm.mode = (shp->shm_perm.mode & ~S_IRWXUGO)
  720. | (setbuf.mode & S_IRWXUGO);
  721. shp->shm_ctim = get_seconds();
  722. break;
  723. }
  724. default:
  725. err = -EINVAL;
  726. goto out;
  727. }
  728. err = 0;
  729. out_unlock_up:
  730. shm_unlock(shp);
  731. out_up:
  732. mutex_unlock(&shm_ids(ns).mutex);
  733. goto out;
  734. out_unlock:
  735. shm_unlock(shp);
  736. out:
  737. return err;
  738. }
  739. /*
  740. * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
  741. *
  742. * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
  743. * "raddr" thing points to kernel space, and there has to be a wrapper around
  744. * this.
  745. */
  746. long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
  747. {
  748. struct shmid_kernel *shp;
  749. unsigned long addr;
  750. unsigned long size;
  751. struct file * file;
  752. int err;
  753. unsigned long flags;
  754. unsigned long prot;
  755. int acc_mode;
  756. unsigned long user_addr;
  757. struct ipc_namespace *ns;
  758. struct shm_file_data *sfd;
  759. struct path path;
  760. mode_t f_mode;
  761. err = -EINVAL;
  762. if (shmid < 0)
  763. goto out;
  764. else if ((addr = (ulong)shmaddr)) {
  765. if (addr & (SHMLBA-1)) {
  766. if (shmflg & SHM_RND)
  767. addr &= ~(SHMLBA-1); /* round down */
  768. else
  769. #ifndef __ARCH_FORCE_SHMLBA
  770. if (addr & ~PAGE_MASK)
  771. #endif
  772. goto out;
  773. }
  774. flags = MAP_SHARED | MAP_FIXED;
  775. } else {
  776. if ((shmflg & SHM_REMAP))
  777. goto out;
  778. flags = MAP_SHARED;
  779. }
  780. if (shmflg & SHM_RDONLY) {
  781. prot = PROT_READ;
  782. acc_mode = S_IRUGO;
  783. f_mode = FMODE_READ;
  784. } else {
  785. prot = PROT_READ | PROT_WRITE;
  786. acc_mode = S_IRUGO | S_IWUGO;
  787. f_mode = FMODE_READ | FMODE_WRITE;
  788. }
  789. if (shmflg & SHM_EXEC) {
  790. prot |= PROT_EXEC;
  791. acc_mode |= S_IXUGO;
  792. }
  793. /*
  794. * We cannot rely on the fs check since SYSV IPC does have an
  795. * additional creator id...
  796. */
  797. ns = current->nsproxy->ipc_ns;
  798. shp = shm_lock_check(ns, shmid);
  799. if (IS_ERR(shp)) {
  800. err = PTR_ERR(shp);
  801. goto out;
  802. }
  803. err = -EACCES;
  804. if (ipcperms(&shp->shm_perm, acc_mode))
  805. goto out_unlock;
  806. err = security_shm_shmat(shp, shmaddr, shmflg);
  807. if (err)
  808. goto out_unlock;
  809. path.dentry = dget(shp->shm_file->f_path.dentry);
  810. path.mnt = shp->shm_file->f_path.mnt;
  811. shp->shm_nattch++;
  812. size = i_size_read(path.dentry->d_inode);
  813. shm_unlock(shp);
  814. err = -ENOMEM;
  815. sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
  816. if (!sfd)
  817. goto out_put_dentry;
  818. err = -ENOMEM;
  819. file = alloc_file(path.mnt, path.dentry, f_mode, &shm_file_operations);
  820. if (!file)
  821. goto out_free;
  822. file->private_data = sfd;
  823. file->f_mapping = shp->shm_file->f_mapping;
  824. sfd->id = shp->shm_perm.id;
  825. sfd->ns = get_ipc_ns(ns);
  826. sfd->file = shp->shm_file;
  827. sfd->vm_ops = NULL;
  828. down_write(&current->mm->mmap_sem);
  829. if (addr && !(shmflg & SHM_REMAP)) {
  830. err = -EINVAL;
  831. if (find_vma_intersection(current->mm, addr, addr + size))
  832. goto invalid;
  833. /*
  834. * If shm segment goes below stack, make sure there is some
  835. * space left for the stack to grow (at least 4 pages).
  836. */
  837. if (addr < current->mm->start_stack &&
  838. addr > current->mm->start_stack - size - PAGE_SIZE * 5)
  839. goto invalid;
  840. }
  841. user_addr = do_mmap (file, addr, size, prot, flags, 0);
  842. *raddr = user_addr;
  843. err = 0;
  844. if (IS_ERR_VALUE(user_addr))
  845. err = (long)user_addr;
  846. invalid:
  847. up_write(&current->mm->mmap_sem);
  848. fput(file);
  849. out_nattch:
  850. mutex_lock(&shm_ids(ns).mutex);
  851. shp = shm_lock(ns, shmid);
  852. BUG_ON(IS_ERR(shp));
  853. shp->shm_nattch--;
  854. if(shp->shm_nattch == 0 &&
  855. shp->shm_perm.mode & SHM_DEST)
  856. shm_destroy(ns, shp);
  857. else
  858. shm_unlock(shp);
  859. mutex_unlock(&shm_ids(ns).mutex);
  860. out:
  861. return err;
  862. out_unlock:
  863. shm_unlock(shp);
  864. goto out;
  865. out_free:
  866. kfree(sfd);
  867. out_put_dentry:
  868. dput(path.dentry);
  869. goto out_nattch;
  870. }
  871. asmlinkage long sys_shmat(int shmid, char __user *shmaddr, int shmflg)
  872. {
  873. unsigned long ret;
  874. long err;
  875. err = do_shmat(shmid, shmaddr, shmflg, &ret);
  876. if (err)
  877. return err;
  878. force_successful_syscall_return();
  879. return (long)ret;
  880. }
  881. /*
  882. * detach and kill segment if marked destroyed.
  883. * The work is done in shm_close.
  884. */
  885. asmlinkage long sys_shmdt(char __user *shmaddr)
  886. {
  887. struct mm_struct *mm = current->mm;
  888. struct vm_area_struct *vma, *next;
  889. unsigned long addr = (unsigned long)shmaddr;
  890. loff_t size = 0;
  891. int retval = -EINVAL;
  892. if (addr & ~PAGE_MASK)
  893. return retval;
  894. down_write(&mm->mmap_sem);
  895. /*
  896. * This function tries to be smart and unmap shm segments that
  897. * were modified by partial mlock or munmap calls:
  898. * - It first determines the size of the shm segment that should be
  899. * unmapped: It searches for a vma that is backed by shm and that
  900. * started at address shmaddr. It records it's size and then unmaps
  901. * it.
  902. * - Then it unmaps all shm vmas that started at shmaddr and that
  903. * are within the initially determined size.
  904. * Errors from do_munmap are ignored: the function only fails if
  905. * it's called with invalid parameters or if it's called to unmap
  906. * a part of a vma. Both calls in this function are for full vmas,
  907. * the parameters are directly copied from the vma itself and always
  908. * valid - therefore do_munmap cannot fail. (famous last words?)
  909. */
  910. /*
  911. * If it had been mremap()'d, the starting address would not
  912. * match the usual checks anyway. So assume all vma's are
  913. * above the starting address given.
  914. */
  915. vma = find_vma(mm, addr);
  916. while (vma) {
  917. next = vma->vm_next;
  918. /*
  919. * Check if the starting address would match, i.e. it's
  920. * a fragment created by mprotect() and/or munmap(), or it
  921. * otherwise it starts at this address with no hassles.
  922. */
  923. if ((vma->vm_ops == &shm_vm_ops) &&
  924. (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
  925. size = vma->vm_file->f_path.dentry->d_inode->i_size;
  926. do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
  927. /*
  928. * We discovered the size of the shm segment, so
  929. * break out of here and fall through to the next
  930. * loop that uses the size information to stop
  931. * searching for matching vma's.
  932. */
  933. retval = 0;
  934. vma = next;
  935. break;
  936. }
  937. vma = next;
  938. }
  939. /*
  940. * We need look no further than the maximum address a fragment
  941. * could possibly have landed at. Also cast things to loff_t to
  942. * prevent overflows and make comparisions vs. equal-width types.
  943. */
  944. size = PAGE_ALIGN(size);
  945. while (vma && (loff_t)(vma->vm_end - addr) <= size) {
  946. next = vma->vm_next;
  947. /* finding a matching vma now does not alter retval */
  948. if ((vma->vm_ops == &shm_vm_ops) &&
  949. (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff)
  950. do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
  951. vma = next;
  952. }
  953. up_write(&mm->mmap_sem);
  954. return retval;
  955. }
  956. #ifdef CONFIG_PROC_FS
  957. static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
  958. {
  959. struct shmid_kernel *shp = it;
  960. char *format;
  961. #define SMALL_STRING "%10d %10d %4o %10u %5u %5u %5d %5u %5u %5u %5u %10lu %10lu %10lu\n"
  962. #define BIG_STRING "%10d %10d %4o %21u %5u %5u %5d %5u %5u %5u %5u %10lu %10lu %10lu\n"
  963. if (sizeof(size_t) <= sizeof(int))
  964. format = SMALL_STRING;
  965. else
  966. format = BIG_STRING;
  967. return seq_printf(s, format,
  968. shp->shm_perm.key,
  969. shp->shm_perm.id,
  970. shp->shm_perm.mode,
  971. shp->shm_segsz,
  972. shp->shm_cprid,
  973. shp->shm_lprid,
  974. shp->shm_nattch,
  975. shp->shm_perm.uid,
  976. shp->shm_perm.gid,
  977. shp->shm_perm.cuid,
  978. shp->shm_perm.cgid,
  979. shp->shm_atim,
  980. shp->shm_dtim,
  981. shp->shm_ctim);
  982. }
  983. #endif