shm.c 26 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102
  1. /*
  2. * linux/ipc/shm.c
  3. * Copyright (C) 1992, 1993 Krishna Balasubramanian
  4. * Many improvements/fixes by Bruno Haible.
  5. * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
  6. * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
  7. *
  8. * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
  9. * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
  10. * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
  11. * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
  12. * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
  13. * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
  14. * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
  15. *
  16. * support for audit of ipc object properties and permission changes
  17. * Dustin Kirkland <dustin.kirkland@us.ibm.com>
  18. *
  19. * namespaces support
  20. * OpenVZ, SWsoft Inc.
  21. * Pavel Emelianov <xemul@openvz.org>
  22. */
  23. #include <linux/slab.h>
  24. #include <linux/mm.h>
  25. #include <linux/hugetlb.h>
  26. #include <linux/shm.h>
  27. #include <linux/init.h>
  28. #include <linux/file.h>
  29. #include <linux/mman.h>
  30. #include <linux/shmem_fs.h>
  31. #include <linux/security.h>
  32. #include <linux/syscalls.h>
  33. #include <linux/audit.h>
  34. #include <linux/capability.h>
  35. #include <linux/ptrace.h>
  36. #include <linux/seq_file.h>
  37. #include <linux/rwsem.h>
  38. #include <linux/nsproxy.h>
  39. #include <linux/mount.h>
  40. #include <linux/ipc_namespace.h>
  41. #include <asm/uaccess.h>
  42. #include "util.h"
  43. struct shm_file_data {
  44. int id;
  45. struct ipc_namespace *ns;
  46. struct file *file;
  47. const struct vm_operations_struct *vm_ops;
  48. };
  49. #define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data))
  50. static const struct file_operations shm_file_operations;
  51. static const struct vm_operations_struct shm_vm_ops;
  52. #define shm_ids(ns) ((ns)->ids[IPC_SHM_IDS])
  53. #define shm_unlock(shp) \
  54. ipc_unlock(&(shp)->shm_perm)
  55. static int newseg(struct ipc_namespace *, struct ipc_params *);
  56. static void shm_open(struct vm_area_struct *vma);
  57. static void shm_close(struct vm_area_struct *vma);
  58. static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
  59. #ifdef CONFIG_PROC_FS
  60. static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
  61. #endif
  62. void shm_init_ns(struct ipc_namespace *ns)
  63. {
  64. ns->shm_ctlmax = SHMMAX;
  65. ns->shm_ctlall = SHMALL;
  66. ns->shm_ctlmni = SHMMNI;
  67. ns->shm_tot = 0;
  68. ipc_init_ids(&shm_ids(ns));
  69. }
  70. /*
  71. * Called with shm_ids.rw_mutex (writer) and the shp structure locked.
  72. * Only shm_ids.rw_mutex remains locked on exit.
  73. */
  74. static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
  75. {
  76. struct shmid_kernel *shp;
  77. shp = container_of(ipcp, struct shmid_kernel, shm_perm);
  78. if (shp->shm_nattch){
  79. shp->shm_perm.mode |= SHM_DEST;
  80. /* Do not find it any more */
  81. shp->shm_perm.key = IPC_PRIVATE;
  82. shm_unlock(shp);
  83. } else
  84. shm_destroy(ns, shp);
  85. }
  86. #ifdef CONFIG_IPC_NS
  87. void shm_exit_ns(struct ipc_namespace *ns)
  88. {
  89. free_ipcs(ns, &shm_ids(ns), do_shm_rmid);
  90. idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr);
  91. }
  92. #endif
  93. void __init shm_init (void)
  94. {
  95. shm_init_ns(&init_ipc_ns);
  96. ipc_init_proc_interface("sysvipc/shm",
  97. " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime\n",
  98. IPC_SHM_IDS, sysvipc_shm_proc_show);
  99. }
  100. /*
  101. * shm_lock_(check_) routines are called in the paths where the rw_mutex
  102. * is not necessarily held.
  103. */
  104. static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
  105. {
  106. struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id);
  107. if (IS_ERR(ipcp))
  108. return (struct shmid_kernel *)ipcp;
  109. return container_of(ipcp, struct shmid_kernel, shm_perm);
  110. }
  111. static inline struct shmid_kernel *shm_lock_check(struct ipc_namespace *ns,
  112. int id)
  113. {
  114. struct kern_ipc_perm *ipcp = ipc_lock_check(&shm_ids(ns), id);
  115. if (IS_ERR(ipcp))
  116. return (struct shmid_kernel *)ipcp;
  117. return container_of(ipcp, struct shmid_kernel, shm_perm);
  118. }
  119. static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
  120. {
  121. ipc_rmid(&shm_ids(ns), &s->shm_perm);
  122. }
  123. /* This is called by fork, once for every shm attach. */
  124. static void shm_open(struct vm_area_struct *vma)
  125. {
  126. struct file *file = vma->vm_file;
  127. struct shm_file_data *sfd = shm_file_data(file);
  128. struct shmid_kernel *shp;
  129. shp = shm_lock(sfd->ns, sfd->id);
  130. BUG_ON(IS_ERR(shp));
  131. shp->shm_atim = get_seconds();
  132. shp->shm_lprid = task_tgid_vnr(current);
  133. shp->shm_nattch++;
  134. shm_unlock(shp);
  135. }
  136. /*
  137. * shm_destroy - free the struct shmid_kernel
  138. *
  139. * @ns: namespace
  140. * @shp: struct to free
  141. *
  142. * It has to be called with shp and shm_ids.rw_mutex (writer) locked,
  143. * but returns with shp unlocked and freed.
  144. */
  145. static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
  146. {
  147. ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
  148. shm_rmid(ns, shp);
  149. shm_unlock(shp);
  150. if (!is_file_hugepages(shp->shm_file))
  151. shmem_lock(shp->shm_file, 0, shp->mlock_user);
  152. else if (shp->mlock_user)
  153. user_shm_unlock(shp->shm_file->f_path.dentry->d_inode->i_size,
  154. shp->mlock_user);
  155. fput (shp->shm_file);
  156. security_shm_free(shp);
  157. ipc_rcu_putref(shp);
  158. }
  159. /*
  160. * remove the attach descriptor vma.
  161. * free memory for segment if it is marked destroyed.
  162. * The descriptor has already been removed from the current->mm->mmap list
  163. * and will later be kfree()d.
  164. */
  165. static void shm_close(struct vm_area_struct *vma)
  166. {
  167. struct file * file = vma->vm_file;
  168. struct shm_file_data *sfd = shm_file_data(file);
  169. struct shmid_kernel *shp;
  170. struct ipc_namespace *ns = sfd->ns;
  171. down_write(&shm_ids(ns).rw_mutex);
  172. /* remove from the list of attaches of the shm segment */
  173. shp = shm_lock(ns, sfd->id);
  174. BUG_ON(IS_ERR(shp));
  175. shp->shm_lprid = task_tgid_vnr(current);
  176. shp->shm_dtim = get_seconds();
  177. shp->shm_nattch--;
  178. if(shp->shm_nattch == 0 &&
  179. shp->shm_perm.mode & SHM_DEST)
  180. shm_destroy(ns, shp);
  181. else
  182. shm_unlock(shp);
  183. up_write(&shm_ids(ns).rw_mutex);
  184. }
  185. static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  186. {
  187. struct file *file = vma->vm_file;
  188. struct shm_file_data *sfd = shm_file_data(file);
  189. return sfd->vm_ops->fault(vma, vmf);
  190. }
  191. #ifdef CONFIG_NUMA
  192. static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
  193. {
  194. struct file *file = vma->vm_file;
  195. struct shm_file_data *sfd = shm_file_data(file);
  196. int err = 0;
  197. if (sfd->vm_ops->set_policy)
  198. err = sfd->vm_ops->set_policy(vma, new);
  199. return err;
  200. }
  201. static struct mempolicy *shm_get_policy(struct vm_area_struct *vma,
  202. unsigned long addr)
  203. {
  204. struct file *file = vma->vm_file;
  205. struct shm_file_data *sfd = shm_file_data(file);
  206. struct mempolicy *pol = NULL;
  207. if (sfd->vm_ops->get_policy)
  208. pol = sfd->vm_ops->get_policy(vma, addr);
  209. else if (vma->vm_policy)
  210. pol = vma->vm_policy;
  211. return pol;
  212. }
  213. #endif
  214. static int shm_mmap(struct file * file, struct vm_area_struct * vma)
  215. {
  216. struct shm_file_data *sfd = shm_file_data(file);
  217. int ret;
  218. ret = sfd->file->f_op->mmap(sfd->file, vma);
  219. if (ret != 0)
  220. return ret;
  221. sfd->vm_ops = vma->vm_ops;
  222. #ifdef CONFIG_MMU
  223. BUG_ON(!sfd->vm_ops->fault);
  224. #endif
  225. vma->vm_ops = &shm_vm_ops;
  226. shm_open(vma);
  227. return ret;
  228. }
  229. static int shm_release(struct inode *ino, struct file *file)
  230. {
  231. struct shm_file_data *sfd = shm_file_data(file);
  232. put_ipc_ns(sfd->ns);
  233. shm_file_data(file) = NULL;
  234. kfree(sfd);
  235. return 0;
  236. }
  237. static int shm_fsync(struct file *file, struct dentry *dentry, int datasync)
  238. {
  239. int (*fsync) (struct file *, struct dentry *, int datasync);
  240. struct shm_file_data *sfd = shm_file_data(file);
  241. int ret = -EINVAL;
  242. fsync = sfd->file->f_op->fsync;
  243. if (fsync)
  244. ret = fsync(sfd->file, sfd->file->f_path.dentry, datasync);
  245. return ret;
  246. }
  247. static unsigned long shm_get_unmapped_area(struct file *file,
  248. unsigned long addr, unsigned long len, unsigned long pgoff,
  249. unsigned long flags)
  250. {
  251. struct shm_file_data *sfd = shm_file_data(file);
  252. return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len,
  253. pgoff, flags);
  254. }
  255. static const struct file_operations shm_file_operations = {
  256. .mmap = shm_mmap,
  257. .fsync = shm_fsync,
  258. .release = shm_release,
  259. #ifndef CONFIG_MMU
  260. .get_unmapped_area = shm_get_unmapped_area,
  261. #endif
  262. };
  263. static const struct file_operations shm_file_operations_huge = {
  264. .mmap = shm_mmap,
  265. .fsync = shm_fsync,
  266. .release = shm_release,
  267. .get_unmapped_area = shm_get_unmapped_area,
  268. };
  269. int is_file_shm_hugepages(struct file *file)
  270. {
  271. return file->f_op == &shm_file_operations_huge;
  272. }
  273. static const struct vm_operations_struct shm_vm_ops = {
  274. .open = shm_open, /* callback for a new vm-area open */
  275. .close = shm_close, /* callback for when the vm-area is released */
  276. .fault = shm_fault,
  277. #if defined(CONFIG_NUMA)
  278. .set_policy = shm_set_policy,
  279. .get_policy = shm_get_policy,
  280. #endif
  281. };
  282. /**
  283. * newseg - Create a new shared memory segment
  284. * @ns: namespace
  285. * @params: ptr to the structure that contains key, size and shmflg
  286. *
  287. * Called with shm_ids.rw_mutex held as a writer.
  288. */
  289. static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
  290. {
  291. key_t key = params->key;
  292. int shmflg = params->flg;
  293. size_t size = params->u.size;
  294. int error;
  295. struct shmid_kernel *shp;
  296. int numpages = (size + PAGE_SIZE -1) >> PAGE_SHIFT;
  297. struct file * file;
  298. char name[13];
  299. int id;
  300. int acctflag = 0;
  301. if (size < SHMMIN || size > ns->shm_ctlmax)
  302. return -EINVAL;
  303. if (ns->shm_tot + numpages > ns->shm_ctlall)
  304. return -ENOSPC;
  305. shp = ipc_rcu_alloc(sizeof(*shp));
  306. if (!shp)
  307. return -ENOMEM;
  308. shp->shm_perm.key = key;
  309. shp->shm_perm.mode = (shmflg & S_IRWXUGO);
  310. shp->mlock_user = NULL;
  311. shp->shm_perm.security = NULL;
  312. error = security_shm_alloc(shp);
  313. if (error) {
  314. ipc_rcu_putref(shp);
  315. return error;
  316. }
  317. sprintf (name, "SYSV%08x", key);
  318. if (shmflg & SHM_HUGETLB) {
  319. /* hugetlb_file_setup applies strict accounting */
  320. if (shmflg & SHM_NORESERVE)
  321. acctflag = VM_NORESERVE;
  322. file = hugetlb_file_setup(name, size, acctflag,
  323. &shp->mlock_user, HUGETLB_SHMFS_INODE);
  324. } else {
  325. /*
  326. * Do not allow no accounting for OVERCOMMIT_NEVER, even
  327. * if it's asked for.
  328. */
  329. if ((shmflg & SHM_NORESERVE) &&
  330. sysctl_overcommit_memory != OVERCOMMIT_NEVER)
  331. acctflag = VM_NORESERVE;
  332. file = shmem_file_setup(name, size, acctflag);
  333. }
  334. error = PTR_ERR(file);
  335. if (IS_ERR(file))
  336. goto no_file;
  337. id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
  338. if (id < 0) {
  339. error = id;
  340. goto no_id;
  341. }
  342. shp->shm_cprid = task_tgid_vnr(current);
  343. shp->shm_lprid = 0;
  344. shp->shm_atim = shp->shm_dtim = 0;
  345. shp->shm_ctim = get_seconds();
  346. shp->shm_segsz = size;
  347. shp->shm_nattch = 0;
  348. shp->shm_file = file;
  349. /*
  350. * shmid gets reported as "inode#" in /proc/pid/maps.
  351. * proc-ps tools use this. Changing this will break them.
  352. */
  353. file->f_dentry->d_inode->i_ino = shp->shm_perm.id;
  354. ns->shm_tot += numpages;
  355. error = shp->shm_perm.id;
  356. shm_unlock(shp);
  357. return error;
  358. no_id:
  359. if (is_file_hugepages(file) && shp->mlock_user)
  360. user_shm_unlock(size, shp->mlock_user);
  361. fput(file);
  362. no_file:
  363. security_shm_free(shp);
  364. ipc_rcu_putref(shp);
  365. return error;
  366. }
  367. /*
  368. * Called with shm_ids.rw_mutex and ipcp locked.
  369. */
  370. static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg)
  371. {
  372. struct shmid_kernel *shp;
  373. shp = container_of(ipcp, struct shmid_kernel, shm_perm);
  374. return security_shm_associate(shp, shmflg);
  375. }
  376. /*
  377. * Called with shm_ids.rw_mutex and ipcp locked.
  378. */
  379. static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
  380. struct ipc_params *params)
  381. {
  382. struct shmid_kernel *shp;
  383. shp = container_of(ipcp, struct shmid_kernel, shm_perm);
  384. if (shp->shm_segsz < params->u.size)
  385. return -EINVAL;
  386. return 0;
  387. }
  388. SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
  389. {
  390. struct ipc_namespace *ns;
  391. struct ipc_ops shm_ops;
  392. struct ipc_params shm_params;
  393. ns = current->nsproxy->ipc_ns;
  394. shm_ops.getnew = newseg;
  395. shm_ops.associate = shm_security;
  396. shm_ops.more_checks = shm_more_checks;
  397. shm_params.key = key;
  398. shm_params.flg = shmflg;
  399. shm_params.u.size = size;
  400. return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params);
  401. }
  402. static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
  403. {
  404. switch(version) {
  405. case IPC_64:
  406. return copy_to_user(buf, in, sizeof(*in));
  407. case IPC_OLD:
  408. {
  409. struct shmid_ds out;
  410. ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
  411. out.shm_segsz = in->shm_segsz;
  412. out.shm_atime = in->shm_atime;
  413. out.shm_dtime = in->shm_dtime;
  414. out.shm_ctime = in->shm_ctime;
  415. out.shm_cpid = in->shm_cpid;
  416. out.shm_lpid = in->shm_lpid;
  417. out.shm_nattch = in->shm_nattch;
  418. return copy_to_user(buf, &out, sizeof(out));
  419. }
  420. default:
  421. return -EINVAL;
  422. }
  423. }
  424. static inline unsigned long
  425. copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version)
  426. {
  427. switch(version) {
  428. case IPC_64:
  429. if (copy_from_user(out, buf, sizeof(*out)))
  430. return -EFAULT;
  431. return 0;
  432. case IPC_OLD:
  433. {
  434. struct shmid_ds tbuf_old;
  435. if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
  436. return -EFAULT;
  437. out->shm_perm.uid = tbuf_old.shm_perm.uid;
  438. out->shm_perm.gid = tbuf_old.shm_perm.gid;
  439. out->shm_perm.mode = tbuf_old.shm_perm.mode;
  440. return 0;
  441. }
  442. default:
  443. return -EINVAL;
  444. }
  445. }
  446. static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
  447. {
  448. switch(version) {
  449. case IPC_64:
  450. return copy_to_user(buf, in, sizeof(*in));
  451. case IPC_OLD:
  452. {
  453. struct shminfo out;
  454. if(in->shmmax > INT_MAX)
  455. out.shmmax = INT_MAX;
  456. else
  457. out.shmmax = (int)in->shmmax;
  458. out.shmmin = in->shmmin;
  459. out.shmmni = in->shmmni;
  460. out.shmseg = in->shmseg;
  461. out.shmall = in->shmall;
  462. return copy_to_user(buf, &out, sizeof(out));
  463. }
  464. default:
  465. return -EINVAL;
  466. }
  467. }
  468. /*
  469. * Called with shm_ids.rw_mutex held as a reader
  470. */
  471. static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
  472. unsigned long *swp)
  473. {
  474. int next_id;
  475. int total, in_use;
  476. *rss = 0;
  477. *swp = 0;
  478. in_use = shm_ids(ns).in_use;
  479. for (total = 0, next_id = 0; total < in_use; next_id++) {
  480. struct kern_ipc_perm *ipc;
  481. struct shmid_kernel *shp;
  482. struct inode *inode;
  483. ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id);
  484. if (ipc == NULL)
  485. continue;
  486. shp = container_of(ipc, struct shmid_kernel, shm_perm);
  487. inode = shp->shm_file->f_path.dentry->d_inode;
  488. if (is_file_hugepages(shp->shm_file)) {
  489. struct address_space *mapping = inode->i_mapping;
  490. struct hstate *h = hstate_file(shp->shm_file);
  491. *rss += pages_per_huge_page(h) * mapping->nrpages;
  492. } else {
  493. #ifdef CONFIG_SHMEM
  494. struct shmem_inode_info *info = SHMEM_I(inode);
  495. spin_lock(&info->lock);
  496. *rss += inode->i_mapping->nrpages;
  497. *swp += info->swapped;
  498. spin_unlock(&info->lock);
  499. #else
  500. *rss += inode->i_mapping->nrpages;
  501. #endif
  502. }
  503. total++;
  504. }
  505. }
  506. /*
  507. * This function handles some shmctl commands which require the rw_mutex
  508. * to be held in write mode.
  509. * NOTE: no locks must be held, the rw_mutex is taken inside this function.
  510. */
  511. static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
  512. struct shmid_ds __user *buf, int version)
  513. {
  514. struct kern_ipc_perm *ipcp;
  515. struct shmid64_ds shmid64;
  516. struct shmid_kernel *shp;
  517. int err;
  518. if (cmd == IPC_SET) {
  519. if (copy_shmid_from_user(&shmid64, buf, version))
  520. return -EFAULT;
  521. }
  522. ipcp = ipcctl_pre_down(&shm_ids(ns), shmid, cmd, &shmid64.shm_perm, 0);
  523. if (IS_ERR(ipcp))
  524. return PTR_ERR(ipcp);
  525. shp = container_of(ipcp, struct shmid_kernel, shm_perm);
  526. err = security_shm_shmctl(shp, cmd);
  527. if (err)
  528. goto out_unlock;
  529. switch (cmd) {
  530. case IPC_RMID:
  531. do_shm_rmid(ns, ipcp);
  532. goto out_up;
  533. case IPC_SET:
  534. ipc_update_perm(&shmid64.shm_perm, ipcp);
  535. shp->shm_ctim = get_seconds();
  536. break;
  537. default:
  538. err = -EINVAL;
  539. }
  540. out_unlock:
  541. shm_unlock(shp);
  542. out_up:
  543. up_write(&shm_ids(ns).rw_mutex);
  544. return err;
  545. }
  546. SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
  547. {
  548. struct shmid_kernel *shp;
  549. int err, version;
  550. struct ipc_namespace *ns;
  551. if (cmd < 0 || shmid < 0) {
  552. err = -EINVAL;
  553. goto out;
  554. }
  555. version = ipc_parse_version(&cmd);
  556. ns = current->nsproxy->ipc_ns;
  557. switch (cmd) { /* replace with proc interface ? */
  558. case IPC_INFO:
  559. {
  560. struct shminfo64 shminfo;
  561. err = security_shm_shmctl(NULL, cmd);
  562. if (err)
  563. return err;
  564. memset(&shminfo, 0, sizeof(shminfo));
  565. shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni;
  566. shminfo.shmmax = ns->shm_ctlmax;
  567. shminfo.shmall = ns->shm_ctlall;
  568. shminfo.shmmin = SHMMIN;
  569. if(copy_shminfo_to_user (buf, &shminfo, version))
  570. return -EFAULT;
  571. down_read(&shm_ids(ns).rw_mutex);
  572. err = ipc_get_maxid(&shm_ids(ns));
  573. up_read(&shm_ids(ns).rw_mutex);
  574. if(err<0)
  575. err = 0;
  576. goto out;
  577. }
  578. case SHM_INFO:
  579. {
  580. struct shm_info shm_info;
  581. err = security_shm_shmctl(NULL, cmd);
  582. if (err)
  583. return err;
  584. memset(&shm_info, 0, sizeof(shm_info));
  585. down_read(&shm_ids(ns).rw_mutex);
  586. shm_info.used_ids = shm_ids(ns).in_use;
  587. shm_get_stat (ns, &shm_info.shm_rss, &shm_info.shm_swp);
  588. shm_info.shm_tot = ns->shm_tot;
  589. shm_info.swap_attempts = 0;
  590. shm_info.swap_successes = 0;
  591. err = ipc_get_maxid(&shm_ids(ns));
  592. up_read(&shm_ids(ns).rw_mutex);
  593. if (copy_to_user(buf, &shm_info, sizeof(shm_info))) {
  594. err = -EFAULT;
  595. goto out;
  596. }
  597. err = err < 0 ? 0 : err;
  598. goto out;
  599. }
  600. case SHM_STAT:
  601. case IPC_STAT:
  602. {
  603. struct shmid64_ds tbuf;
  604. int result;
  605. if (cmd == SHM_STAT) {
  606. shp = shm_lock(ns, shmid);
  607. if (IS_ERR(shp)) {
  608. err = PTR_ERR(shp);
  609. goto out;
  610. }
  611. result = shp->shm_perm.id;
  612. } else {
  613. shp = shm_lock_check(ns, shmid);
  614. if (IS_ERR(shp)) {
  615. err = PTR_ERR(shp);
  616. goto out;
  617. }
  618. result = 0;
  619. }
  620. err = -EACCES;
  621. if (ipcperms (&shp->shm_perm, S_IRUGO))
  622. goto out_unlock;
  623. err = security_shm_shmctl(shp, cmd);
  624. if (err)
  625. goto out_unlock;
  626. memset(&tbuf, 0, sizeof(tbuf));
  627. kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm);
  628. tbuf.shm_segsz = shp->shm_segsz;
  629. tbuf.shm_atime = shp->shm_atim;
  630. tbuf.shm_dtime = shp->shm_dtim;
  631. tbuf.shm_ctime = shp->shm_ctim;
  632. tbuf.shm_cpid = shp->shm_cprid;
  633. tbuf.shm_lpid = shp->shm_lprid;
  634. tbuf.shm_nattch = shp->shm_nattch;
  635. shm_unlock(shp);
  636. if(copy_shmid_to_user (buf, &tbuf, version))
  637. err = -EFAULT;
  638. else
  639. err = result;
  640. goto out;
  641. }
  642. case SHM_LOCK:
  643. case SHM_UNLOCK:
  644. {
  645. struct file *uninitialized_var(shm_file);
  646. lru_add_drain_all(); /* drain pagevecs to lru lists */
  647. shp = shm_lock_check(ns, shmid);
  648. if (IS_ERR(shp)) {
  649. err = PTR_ERR(shp);
  650. goto out;
  651. }
  652. audit_ipc_obj(&(shp->shm_perm));
  653. if (!capable(CAP_IPC_LOCK)) {
  654. uid_t euid = current_euid();
  655. err = -EPERM;
  656. if (euid != shp->shm_perm.uid &&
  657. euid != shp->shm_perm.cuid)
  658. goto out_unlock;
  659. if (cmd == SHM_LOCK &&
  660. !current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur)
  661. goto out_unlock;
  662. }
  663. err = security_shm_shmctl(shp, cmd);
  664. if (err)
  665. goto out_unlock;
  666. if(cmd==SHM_LOCK) {
  667. struct user_struct *user = current_user();
  668. if (!is_file_hugepages(shp->shm_file)) {
  669. err = shmem_lock(shp->shm_file, 1, user);
  670. if (!err && !(shp->shm_perm.mode & SHM_LOCKED)){
  671. shp->shm_perm.mode |= SHM_LOCKED;
  672. shp->mlock_user = user;
  673. }
  674. }
  675. } else if (!is_file_hugepages(shp->shm_file)) {
  676. shmem_lock(shp->shm_file, 0, shp->mlock_user);
  677. shp->shm_perm.mode &= ~SHM_LOCKED;
  678. shp->mlock_user = NULL;
  679. }
  680. shm_unlock(shp);
  681. goto out;
  682. }
  683. case IPC_RMID:
  684. case IPC_SET:
  685. err = shmctl_down(ns, shmid, cmd, buf, version);
  686. return err;
  687. default:
  688. return -EINVAL;
  689. }
  690. out_unlock:
  691. shm_unlock(shp);
  692. out:
  693. return err;
  694. }
  695. /*
  696. * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
  697. *
  698. * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
  699. * "raddr" thing points to kernel space, and there has to be a wrapper around
  700. * this.
  701. */
  702. long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
  703. {
  704. struct shmid_kernel *shp;
  705. unsigned long addr;
  706. unsigned long size;
  707. struct file * file;
  708. int err;
  709. unsigned long flags;
  710. unsigned long prot;
  711. int acc_mode;
  712. unsigned long user_addr;
  713. struct ipc_namespace *ns;
  714. struct shm_file_data *sfd;
  715. struct path path;
  716. fmode_t f_mode;
  717. err = -EINVAL;
  718. if (shmid < 0)
  719. goto out;
  720. else if ((addr = (ulong)shmaddr)) {
  721. if (addr & (SHMLBA-1)) {
  722. if (shmflg & SHM_RND)
  723. addr &= ~(SHMLBA-1); /* round down */
  724. else
  725. #ifndef __ARCH_FORCE_SHMLBA
  726. if (addr & ~PAGE_MASK)
  727. #endif
  728. goto out;
  729. }
  730. flags = MAP_SHARED | MAP_FIXED;
  731. } else {
  732. if ((shmflg & SHM_REMAP))
  733. goto out;
  734. flags = MAP_SHARED;
  735. }
  736. if (shmflg & SHM_RDONLY) {
  737. prot = PROT_READ;
  738. acc_mode = S_IRUGO;
  739. f_mode = FMODE_READ;
  740. } else {
  741. prot = PROT_READ | PROT_WRITE;
  742. acc_mode = S_IRUGO | S_IWUGO;
  743. f_mode = FMODE_READ | FMODE_WRITE;
  744. }
  745. if (shmflg & SHM_EXEC) {
  746. prot |= PROT_EXEC;
  747. acc_mode |= S_IXUGO;
  748. }
  749. /*
  750. * We cannot rely on the fs check since SYSV IPC does have an
  751. * additional creator id...
  752. */
  753. ns = current->nsproxy->ipc_ns;
  754. shp = shm_lock_check(ns, shmid);
  755. if (IS_ERR(shp)) {
  756. err = PTR_ERR(shp);
  757. goto out;
  758. }
  759. err = -EACCES;
  760. if (ipcperms(&shp->shm_perm, acc_mode))
  761. goto out_unlock;
  762. err = security_shm_shmat(shp, shmaddr, shmflg);
  763. if (err)
  764. goto out_unlock;
  765. path = shp->shm_file->f_path;
  766. path_get(&path);
  767. shp->shm_nattch++;
  768. size = i_size_read(path.dentry->d_inode);
  769. shm_unlock(shp);
  770. err = -ENOMEM;
  771. sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
  772. if (!sfd)
  773. goto out_put_dentry;
  774. file = alloc_file(&path, f_mode,
  775. is_file_hugepages(shp->shm_file) ?
  776. &shm_file_operations_huge :
  777. &shm_file_operations);
  778. if (!file)
  779. goto out_free;
  780. file->private_data = sfd;
  781. file->f_mapping = shp->shm_file->f_mapping;
  782. sfd->id = shp->shm_perm.id;
  783. sfd->ns = get_ipc_ns(ns);
  784. sfd->file = shp->shm_file;
  785. sfd->vm_ops = NULL;
  786. down_write(&current->mm->mmap_sem);
  787. if (addr && !(shmflg & SHM_REMAP)) {
  788. err = -EINVAL;
  789. if (find_vma_intersection(current->mm, addr, addr + size))
  790. goto invalid;
  791. /*
  792. * If shm segment goes below stack, make sure there is some
  793. * space left for the stack to grow (at least 4 pages).
  794. */
  795. if (addr < current->mm->start_stack &&
  796. addr > current->mm->start_stack - size - PAGE_SIZE * 5)
  797. goto invalid;
  798. }
  799. user_addr = do_mmap (file, addr, size, prot, flags, 0);
  800. *raddr = user_addr;
  801. err = 0;
  802. if (IS_ERR_VALUE(user_addr))
  803. err = (long)user_addr;
  804. invalid:
  805. up_write(&current->mm->mmap_sem);
  806. fput(file);
  807. out_nattch:
  808. down_write(&shm_ids(ns).rw_mutex);
  809. shp = shm_lock(ns, shmid);
  810. BUG_ON(IS_ERR(shp));
  811. shp->shm_nattch--;
  812. if(shp->shm_nattch == 0 &&
  813. shp->shm_perm.mode & SHM_DEST)
  814. shm_destroy(ns, shp);
  815. else
  816. shm_unlock(shp);
  817. up_write(&shm_ids(ns).rw_mutex);
  818. out:
  819. return err;
  820. out_unlock:
  821. shm_unlock(shp);
  822. goto out;
  823. out_free:
  824. kfree(sfd);
  825. out_put_dentry:
  826. path_put(&path);
  827. goto out_nattch;
  828. }
  829. SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg)
  830. {
  831. unsigned long ret;
  832. long err;
  833. err = do_shmat(shmid, shmaddr, shmflg, &ret);
  834. if (err)
  835. return err;
  836. force_successful_syscall_return();
  837. return (long)ret;
  838. }
  839. /*
  840. * detach and kill segment if marked destroyed.
  841. * The work is done in shm_close.
  842. */
  843. SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
  844. {
  845. struct mm_struct *mm = current->mm;
  846. struct vm_area_struct *vma;
  847. unsigned long addr = (unsigned long)shmaddr;
  848. int retval = -EINVAL;
  849. #ifdef CONFIG_MMU
  850. loff_t size = 0;
  851. struct vm_area_struct *next;
  852. #endif
  853. if (addr & ~PAGE_MASK)
  854. return retval;
  855. down_write(&mm->mmap_sem);
  856. /*
  857. * This function tries to be smart and unmap shm segments that
  858. * were modified by partial mlock or munmap calls:
  859. * - It first determines the size of the shm segment that should be
  860. * unmapped: It searches for a vma that is backed by shm and that
  861. * started at address shmaddr. It records it's size and then unmaps
  862. * it.
  863. * - Then it unmaps all shm vmas that started at shmaddr and that
  864. * are within the initially determined size.
  865. * Errors from do_munmap are ignored: the function only fails if
  866. * it's called with invalid parameters or if it's called to unmap
  867. * a part of a vma. Both calls in this function are for full vmas,
  868. * the parameters are directly copied from the vma itself and always
  869. * valid - therefore do_munmap cannot fail. (famous last words?)
  870. */
  871. /*
  872. * If it had been mremap()'d, the starting address would not
  873. * match the usual checks anyway. So assume all vma's are
  874. * above the starting address given.
  875. */
  876. vma = find_vma(mm, addr);
  877. #ifdef CONFIG_MMU
  878. while (vma) {
  879. next = vma->vm_next;
  880. /*
  881. * Check if the starting address would match, i.e. it's
  882. * a fragment created by mprotect() and/or munmap(), or it
  883. * otherwise it starts at this address with no hassles.
  884. */
  885. if ((vma->vm_ops == &shm_vm_ops) &&
  886. (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
  887. size = vma->vm_file->f_path.dentry->d_inode->i_size;
  888. do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
  889. /*
  890. * We discovered the size of the shm segment, so
  891. * break out of here and fall through to the next
  892. * loop that uses the size information to stop
  893. * searching for matching vma's.
  894. */
  895. retval = 0;
  896. vma = next;
  897. break;
  898. }
  899. vma = next;
  900. }
  901. /*
  902. * We need look no further than the maximum address a fragment
  903. * could possibly have landed at. Also cast things to loff_t to
  904. * prevent overflows and make comparisions vs. equal-width types.
  905. */
  906. size = PAGE_ALIGN(size);
  907. while (vma && (loff_t)(vma->vm_end - addr) <= size) {
  908. next = vma->vm_next;
  909. /* finding a matching vma now does not alter retval */
  910. if ((vma->vm_ops == &shm_vm_ops) &&
  911. (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff)
  912. do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
  913. vma = next;
  914. }
  915. #else /* CONFIG_MMU */
  916. /* under NOMMU conditions, the exact address to be destroyed must be
  917. * given */
  918. retval = -EINVAL;
  919. if (vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
  920. do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
  921. retval = 0;
  922. }
  923. #endif
  924. up_write(&mm->mmap_sem);
  925. return retval;
  926. }
  927. #ifdef CONFIG_PROC_FS
  928. static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
  929. {
  930. struct shmid_kernel *shp = it;
  931. #if BITS_PER_LONG <= 32
  932. #define SIZE_SPEC "%10lu"
  933. #else
  934. #define SIZE_SPEC "%21lu"
  935. #endif
  936. return seq_printf(s,
  937. "%10d %10d %4o " SIZE_SPEC " %5u %5u "
  938. "%5lu %5u %5u %5u %5u %10lu %10lu %10lu\n",
  939. shp->shm_perm.key,
  940. shp->shm_perm.id,
  941. shp->shm_perm.mode,
  942. shp->shm_segsz,
  943. shp->shm_cprid,
  944. shp->shm_lprid,
  945. shp->shm_nattch,
  946. shp->shm_perm.uid,
  947. shp->shm_perm.gid,
  948. shp->shm_perm.cuid,
  949. shp->shm_perm.cgid,
  950. shp->shm_atim,
  951. shp->shm_dtim,
  952. shp->shm_ctim);
  953. }
  954. #endif