shm.c 26 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111
  1. /*
  2. * linux/ipc/shm.c
  3. * Copyright (C) 1992, 1993 Krishna Balasubramanian
  4. * Many improvements/fixes by Bruno Haible.
  5. * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
  6. * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
  7. *
  8. * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
  9. * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
  10. * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
  11. * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
  12. * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
  13. * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
  14. * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
  15. *
  16. * support for audit of ipc object properties and permission changes
  17. * Dustin Kirkland <dustin.kirkland@us.ibm.com>
  18. *
  19. * namespaces support
  20. * OpenVZ, SWsoft Inc.
  21. * Pavel Emelianov <xemul@openvz.org>
  22. */
  23. #include <linux/slab.h>
  24. #include <linux/mm.h>
  25. #include <linux/hugetlb.h>
  26. #include <linux/shm.h>
  27. #include <linux/init.h>
  28. #include <linux/file.h>
  29. #include <linux/mman.h>
  30. #include <linux/shmem_fs.h>
  31. #include <linux/security.h>
  32. #include <linux/syscalls.h>
  33. #include <linux/audit.h>
  34. #include <linux/capability.h>
  35. #include <linux/ptrace.h>
  36. #include <linux/seq_file.h>
  37. #include <linux/mutex.h>
  38. #include <linux/nsproxy.h>
  39. #include <linux/mount.h>
  40. #include <asm/uaccess.h>
  41. #include "util.h"
  42. struct shm_file_data {
  43. int id;
  44. struct ipc_namespace *ns;
  45. struct file *file;
  46. const struct vm_operations_struct *vm_ops;
  47. };
  48. #define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data))
  49. static const struct file_operations shm_file_operations;
  50. static struct vm_operations_struct shm_vm_ops;
  51. static struct ipc_ids init_shm_ids;
  52. #define shm_ids(ns) (*((ns)->ids[IPC_SHM_IDS]))
  53. #define shm_lock(ns, id) \
  54. ((struct shmid_kernel*)ipc_lock(&shm_ids(ns),id))
  55. #define shm_unlock(shp) \
  56. ipc_unlock(&(shp)->shm_perm)
  57. #define shm_get(ns, id) \
  58. ((struct shmid_kernel*)ipc_get(&shm_ids(ns),id))
  59. #define shm_buildid(ns, id, seq) \
  60. ipc_buildid(&shm_ids(ns), id, seq)
  61. static int newseg (struct ipc_namespace *ns, key_t key,
  62. int shmflg, size_t size);
  63. static void shm_open(struct vm_area_struct *vma);
  64. static void shm_close(struct vm_area_struct *vma);
  65. static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
  66. #ifdef CONFIG_PROC_FS
  67. static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
  68. #endif
  69. static void __shm_init_ns(struct ipc_namespace *ns, struct ipc_ids *ids)
  70. {
  71. ns->ids[IPC_SHM_IDS] = ids;
  72. ns->shm_ctlmax = SHMMAX;
  73. ns->shm_ctlall = SHMALL;
  74. ns->shm_ctlmni = SHMMNI;
  75. ns->shm_tot = 0;
  76. ipc_init_ids(ids, 1);
  77. }
  78. static void do_shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *shp)
  79. {
  80. if (shp->shm_nattch){
  81. shp->shm_perm.mode |= SHM_DEST;
  82. /* Do not find it any more */
  83. shp->shm_perm.key = IPC_PRIVATE;
  84. shm_unlock(shp);
  85. } else
  86. shm_destroy(ns, shp);
  87. }
  88. int shm_init_ns(struct ipc_namespace *ns)
  89. {
  90. struct ipc_ids *ids;
  91. ids = kmalloc(sizeof(struct ipc_ids), GFP_KERNEL);
  92. if (ids == NULL)
  93. return -ENOMEM;
  94. __shm_init_ns(ns, ids);
  95. return 0;
  96. }
  97. void shm_exit_ns(struct ipc_namespace *ns)
  98. {
  99. int i;
  100. struct shmid_kernel *shp;
  101. mutex_lock(&shm_ids(ns).mutex);
  102. for (i = 0; i <= shm_ids(ns).max_id; i++) {
  103. shp = shm_lock(ns, i);
  104. if (shp == NULL)
  105. continue;
  106. do_shm_rmid(ns, shp);
  107. }
  108. mutex_unlock(&shm_ids(ns).mutex);
  109. ipc_fini_ids(ns->ids[IPC_SHM_IDS]);
  110. kfree(ns->ids[IPC_SHM_IDS]);
  111. ns->ids[IPC_SHM_IDS] = NULL;
  112. }
  113. void __init shm_init (void)
  114. {
  115. __shm_init_ns(&init_ipc_ns, &init_shm_ids);
  116. ipc_init_proc_interface("sysvipc/shm",
  117. " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime\n",
  118. IPC_SHM_IDS, sysvipc_shm_proc_show);
  119. }
  120. static inline int shm_checkid(struct ipc_namespace *ns,
  121. struct shmid_kernel *s, int id)
  122. {
  123. if (ipc_checkid(&shm_ids(ns), &s->shm_perm, id))
  124. return -EIDRM;
  125. return 0;
  126. }
  127. static inline struct shmid_kernel *shm_rmid(struct ipc_namespace *ns, int id)
  128. {
  129. return (struct shmid_kernel *)ipc_rmid(&shm_ids(ns), id);
  130. }
  131. static inline int shm_addid(struct ipc_namespace *ns, struct shmid_kernel *shp)
  132. {
  133. return ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
  134. }
  135. /* This is called by fork, once for every shm attach. */
  136. static void shm_open(struct vm_area_struct *vma)
  137. {
  138. struct file *file = vma->vm_file;
  139. struct shm_file_data *sfd = shm_file_data(file);
  140. struct shmid_kernel *shp;
  141. shp = shm_lock(sfd->ns, sfd->id);
  142. BUG_ON(!shp);
  143. shp->shm_atim = get_seconds();
  144. shp->shm_lprid = current->tgid;
  145. shp->shm_nattch++;
  146. shm_unlock(shp);
  147. }
  148. /*
  149. * shm_destroy - free the struct shmid_kernel
  150. *
  151. * @shp: struct to free
  152. *
  153. * It has to be called with shp and shm_ids.mutex locked,
  154. * but returns with shp unlocked and freed.
  155. */
  156. static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
  157. {
  158. ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
  159. shm_rmid(ns, shp->id);
  160. shm_unlock(shp);
  161. if (!is_file_hugepages(shp->shm_file))
  162. shmem_lock(shp->shm_file, 0, shp->mlock_user);
  163. else
  164. user_shm_unlock(shp->shm_file->f_path.dentry->d_inode->i_size,
  165. shp->mlock_user);
  166. fput (shp->shm_file);
  167. security_shm_free(shp);
  168. ipc_rcu_putref(shp);
  169. }
  170. /*
  171. * remove the attach descriptor vma.
  172. * free memory for segment if it is marked destroyed.
  173. * The descriptor has already been removed from the current->mm->mmap list
  174. * and will later be kfree()d.
  175. */
  176. static void shm_close(struct vm_area_struct *vma)
  177. {
  178. struct file * file = vma->vm_file;
  179. struct shm_file_data *sfd = shm_file_data(file);
  180. struct shmid_kernel *shp;
  181. struct ipc_namespace *ns = sfd->ns;
  182. mutex_lock(&shm_ids(ns).mutex);
  183. /* remove from the list of attaches of the shm segment */
  184. shp = shm_lock(ns, sfd->id);
  185. BUG_ON(!shp);
  186. shp->shm_lprid = current->tgid;
  187. shp->shm_dtim = get_seconds();
  188. shp->shm_nattch--;
  189. if(shp->shm_nattch == 0 &&
  190. shp->shm_perm.mode & SHM_DEST)
  191. shm_destroy(ns, shp);
  192. else
  193. shm_unlock(shp);
  194. mutex_unlock(&shm_ids(ns).mutex);
  195. }
  196. static struct page *shm_nopage(struct vm_area_struct *vma,
  197. unsigned long address, int *type)
  198. {
  199. struct file *file = vma->vm_file;
  200. struct shm_file_data *sfd = shm_file_data(file);
  201. return sfd->vm_ops->nopage(vma, address, type);
  202. }
  203. #ifdef CONFIG_NUMA
  204. int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
  205. {
  206. struct file *file = vma->vm_file;
  207. struct shm_file_data *sfd = shm_file_data(file);
  208. int err = 0;
  209. if (sfd->vm_ops->set_policy)
  210. err = sfd->vm_ops->set_policy(vma, new);
  211. return err;
  212. }
  213. struct mempolicy *shm_get_policy(struct vm_area_struct *vma, unsigned long addr)
  214. {
  215. struct file *file = vma->vm_file;
  216. struct shm_file_data *sfd = shm_file_data(file);
  217. struct mempolicy *pol = NULL;
  218. if (sfd->vm_ops->get_policy)
  219. pol = sfd->vm_ops->get_policy(vma, addr);
  220. else if (vma->vm_policy)
  221. pol = vma->vm_policy;
  222. else
  223. pol = current->mempolicy;
  224. return pol;
  225. }
  226. #endif
  227. static int shm_mmap(struct file * file, struct vm_area_struct * vma)
  228. {
  229. struct shm_file_data *sfd = shm_file_data(file);
  230. int ret;
  231. ret = sfd->file->f_op->mmap(sfd->file, vma);
  232. if (ret != 0)
  233. return ret;
  234. sfd->vm_ops = vma->vm_ops;
  235. vma->vm_ops = &shm_vm_ops;
  236. shm_open(vma);
  237. return ret;
  238. }
  239. static int shm_release(struct inode *ino, struct file *file)
  240. {
  241. struct shm_file_data *sfd = shm_file_data(file);
  242. put_ipc_ns(sfd->ns);
  243. shm_file_data(file) = NULL;
  244. kfree(sfd);
  245. return 0;
  246. }
  247. static int shm_fsync(struct file *file, struct dentry *dentry, int datasync)
  248. {
  249. int (*fsync) (struct file *, struct dentry *, int datasync);
  250. struct shm_file_data *sfd = shm_file_data(file);
  251. int ret = -EINVAL;
  252. fsync = sfd->file->f_op->fsync;
  253. if (fsync)
  254. ret = fsync(sfd->file, sfd->file->f_path.dentry, datasync);
  255. return ret;
  256. }
  257. static unsigned long shm_get_unmapped_area(struct file *file,
  258. unsigned long addr, unsigned long len, unsigned long pgoff,
  259. unsigned long flags)
  260. {
  261. struct shm_file_data *sfd = shm_file_data(file);
  262. return get_unmapped_area(sfd->file, addr, len, pgoff, flags);
  263. }
  264. int is_file_shm_hugepages(struct file *file)
  265. {
  266. int ret = 0;
  267. if (file->f_op == &shm_file_operations) {
  268. struct shm_file_data *sfd;
  269. sfd = shm_file_data(file);
  270. ret = is_file_hugepages(sfd->file);
  271. }
  272. return ret;
  273. }
  274. static const struct file_operations shm_file_operations = {
  275. .mmap = shm_mmap,
  276. .fsync = shm_fsync,
  277. .release = shm_release,
  278. .get_unmapped_area = shm_get_unmapped_area,
  279. };
  280. static struct vm_operations_struct shm_vm_ops = {
  281. .open = shm_open, /* callback for a new vm-area open */
  282. .close = shm_close, /* callback for when the vm-area is released */
  283. .nopage = shm_nopage,
  284. #if defined(CONFIG_NUMA)
  285. .set_policy = shm_set_policy,
  286. .get_policy = shm_get_policy,
  287. #endif
  288. };
  289. static int newseg (struct ipc_namespace *ns, key_t key, int shmflg, size_t size)
  290. {
  291. int error;
  292. struct shmid_kernel *shp;
  293. int numpages = (size + PAGE_SIZE -1) >> PAGE_SHIFT;
  294. struct file * file;
  295. char name[13];
  296. int id;
  297. if (size < SHMMIN || size > ns->shm_ctlmax)
  298. return -EINVAL;
  299. if (ns->shm_tot + numpages > ns->shm_ctlall)
  300. return -ENOSPC;
  301. shp = ipc_rcu_alloc(sizeof(*shp));
  302. if (!shp)
  303. return -ENOMEM;
  304. shp->shm_perm.key = key;
  305. shp->shm_perm.mode = (shmflg & S_IRWXUGO);
  306. shp->mlock_user = NULL;
  307. shp->shm_perm.security = NULL;
  308. error = security_shm_alloc(shp);
  309. if (error) {
  310. ipc_rcu_putref(shp);
  311. return error;
  312. }
  313. sprintf (name, "SYSV%08x", key);
  314. if (shmflg & SHM_HUGETLB) {
  315. /* hugetlb_file_setup takes care of mlock user accounting */
  316. file = hugetlb_file_setup(name, size);
  317. shp->mlock_user = current->user;
  318. } else {
  319. int acctflag = VM_ACCOUNT;
  320. /*
  321. * Do not allow no accounting for OVERCOMMIT_NEVER, even
  322. * if it's asked for.
  323. */
  324. if ((shmflg & SHM_NORESERVE) &&
  325. sysctl_overcommit_memory != OVERCOMMIT_NEVER)
  326. acctflag = 0;
  327. file = shmem_file_setup(name, size, acctflag);
  328. }
  329. error = PTR_ERR(file);
  330. if (IS_ERR(file))
  331. goto no_file;
  332. error = -ENOSPC;
  333. id = shm_addid(ns, shp);
  334. if(id == -1)
  335. goto no_id;
  336. shp->shm_cprid = current->tgid;
  337. shp->shm_lprid = 0;
  338. shp->shm_atim = shp->shm_dtim = 0;
  339. shp->shm_ctim = get_seconds();
  340. shp->shm_segsz = size;
  341. shp->shm_nattch = 0;
  342. shp->id = shm_buildid(ns, id, shp->shm_perm.seq);
  343. shp->shm_file = file;
  344. /*
  345. * shmid gets reported as "inode#" in /proc/pid/maps.
  346. * proc-ps tools use this. Changing this will break them.
  347. */
  348. file->f_dentry->d_inode->i_ino = shp->id;
  349. ns->shm_tot += numpages;
  350. shm_unlock(shp);
  351. return shp->id;
  352. no_id:
  353. fput(file);
  354. no_file:
  355. security_shm_free(shp);
  356. ipc_rcu_putref(shp);
  357. return error;
  358. }
  359. asmlinkage long sys_shmget (key_t key, size_t size, int shmflg)
  360. {
  361. struct shmid_kernel *shp;
  362. int err, id = 0;
  363. struct ipc_namespace *ns;
  364. ns = current->nsproxy->ipc_ns;
  365. mutex_lock(&shm_ids(ns).mutex);
  366. if (key == IPC_PRIVATE) {
  367. err = newseg(ns, key, shmflg, size);
  368. } else if ((id = ipc_findkey(&shm_ids(ns), key)) == -1) {
  369. if (!(shmflg & IPC_CREAT))
  370. err = -ENOENT;
  371. else
  372. err = newseg(ns, key, shmflg, size);
  373. } else if ((shmflg & IPC_CREAT) && (shmflg & IPC_EXCL)) {
  374. err = -EEXIST;
  375. } else {
  376. shp = shm_lock(ns, id);
  377. BUG_ON(shp==NULL);
  378. if (shp->shm_segsz < size)
  379. err = -EINVAL;
  380. else if (ipcperms(&shp->shm_perm, shmflg))
  381. err = -EACCES;
  382. else {
  383. int shmid = shm_buildid(ns, id, shp->shm_perm.seq);
  384. err = security_shm_associate(shp, shmflg);
  385. if (!err)
  386. err = shmid;
  387. }
  388. shm_unlock(shp);
  389. }
  390. mutex_unlock(&shm_ids(ns).mutex);
  391. return err;
  392. }
  393. static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
  394. {
  395. switch(version) {
  396. case IPC_64:
  397. return copy_to_user(buf, in, sizeof(*in));
  398. case IPC_OLD:
  399. {
  400. struct shmid_ds out;
  401. ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
  402. out.shm_segsz = in->shm_segsz;
  403. out.shm_atime = in->shm_atime;
  404. out.shm_dtime = in->shm_dtime;
  405. out.shm_ctime = in->shm_ctime;
  406. out.shm_cpid = in->shm_cpid;
  407. out.shm_lpid = in->shm_lpid;
  408. out.shm_nattch = in->shm_nattch;
  409. return copy_to_user(buf, &out, sizeof(out));
  410. }
  411. default:
  412. return -EINVAL;
  413. }
  414. }
  415. struct shm_setbuf {
  416. uid_t uid;
  417. gid_t gid;
  418. mode_t mode;
  419. };
  420. static inline unsigned long copy_shmid_from_user(struct shm_setbuf *out, void __user *buf, int version)
  421. {
  422. switch(version) {
  423. case IPC_64:
  424. {
  425. struct shmid64_ds tbuf;
  426. if (copy_from_user(&tbuf, buf, sizeof(tbuf)))
  427. return -EFAULT;
  428. out->uid = tbuf.shm_perm.uid;
  429. out->gid = tbuf.shm_perm.gid;
  430. out->mode = tbuf.shm_perm.mode;
  431. return 0;
  432. }
  433. case IPC_OLD:
  434. {
  435. struct shmid_ds tbuf_old;
  436. if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
  437. return -EFAULT;
  438. out->uid = tbuf_old.shm_perm.uid;
  439. out->gid = tbuf_old.shm_perm.gid;
  440. out->mode = tbuf_old.shm_perm.mode;
  441. return 0;
  442. }
  443. default:
  444. return -EINVAL;
  445. }
  446. }
  447. static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
  448. {
  449. switch(version) {
  450. case IPC_64:
  451. return copy_to_user(buf, in, sizeof(*in));
  452. case IPC_OLD:
  453. {
  454. struct shminfo out;
  455. if(in->shmmax > INT_MAX)
  456. out.shmmax = INT_MAX;
  457. else
  458. out.shmmax = (int)in->shmmax;
  459. out.shmmin = in->shmmin;
  460. out.shmmni = in->shmmni;
  461. out.shmseg = in->shmseg;
  462. out.shmall = in->shmall;
  463. return copy_to_user(buf, &out, sizeof(out));
  464. }
  465. default:
  466. return -EINVAL;
  467. }
  468. }
  469. static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
  470. unsigned long *swp)
  471. {
  472. int i;
  473. *rss = 0;
  474. *swp = 0;
  475. for (i = 0; i <= shm_ids(ns).max_id; i++) {
  476. struct shmid_kernel *shp;
  477. struct inode *inode;
  478. shp = shm_get(ns, i);
  479. if(!shp)
  480. continue;
  481. inode = shp->shm_file->f_path.dentry->d_inode;
  482. if (is_file_hugepages(shp->shm_file)) {
  483. struct address_space *mapping = inode->i_mapping;
  484. *rss += (HPAGE_SIZE/PAGE_SIZE)*mapping->nrpages;
  485. } else {
  486. struct shmem_inode_info *info = SHMEM_I(inode);
  487. spin_lock(&info->lock);
  488. *rss += inode->i_mapping->nrpages;
  489. *swp += info->swapped;
  490. spin_unlock(&info->lock);
  491. }
  492. }
  493. }
  494. asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf)
  495. {
  496. struct shm_setbuf setbuf;
  497. struct shmid_kernel *shp;
  498. int err, version;
  499. struct ipc_namespace *ns;
  500. if (cmd < 0 || shmid < 0) {
  501. err = -EINVAL;
  502. goto out;
  503. }
  504. version = ipc_parse_version(&cmd);
  505. ns = current->nsproxy->ipc_ns;
  506. switch (cmd) { /* replace with proc interface ? */
  507. case IPC_INFO:
  508. {
  509. struct shminfo64 shminfo;
  510. err = security_shm_shmctl(NULL, cmd);
  511. if (err)
  512. return err;
  513. memset(&shminfo,0,sizeof(shminfo));
  514. shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni;
  515. shminfo.shmmax = ns->shm_ctlmax;
  516. shminfo.shmall = ns->shm_ctlall;
  517. shminfo.shmmin = SHMMIN;
  518. if(copy_shminfo_to_user (buf, &shminfo, version))
  519. return -EFAULT;
  520. /* reading a integer is always atomic */
  521. err= shm_ids(ns).max_id;
  522. if(err<0)
  523. err = 0;
  524. goto out;
  525. }
  526. case SHM_INFO:
  527. {
  528. struct shm_info shm_info;
  529. err = security_shm_shmctl(NULL, cmd);
  530. if (err)
  531. return err;
  532. memset(&shm_info,0,sizeof(shm_info));
  533. mutex_lock(&shm_ids(ns).mutex);
  534. shm_info.used_ids = shm_ids(ns).in_use;
  535. shm_get_stat (ns, &shm_info.shm_rss, &shm_info.shm_swp);
  536. shm_info.shm_tot = ns->shm_tot;
  537. shm_info.swap_attempts = 0;
  538. shm_info.swap_successes = 0;
  539. err = shm_ids(ns).max_id;
  540. mutex_unlock(&shm_ids(ns).mutex);
  541. if(copy_to_user (buf, &shm_info, sizeof(shm_info))) {
  542. err = -EFAULT;
  543. goto out;
  544. }
  545. err = err < 0 ? 0 : err;
  546. goto out;
  547. }
  548. case SHM_STAT:
  549. case IPC_STAT:
  550. {
  551. struct shmid64_ds tbuf;
  552. int result;
  553. memset(&tbuf, 0, sizeof(tbuf));
  554. shp = shm_lock(ns, shmid);
  555. if(shp==NULL) {
  556. err = -EINVAL;
  557. goto out;
  558. } else if(cmd==SHM_STAT) {
  559. err = -EINVAL;
  560. if (shmid > shm_ids(ns).max_id)
  561. goto out_unlock;
  562. result = shm_buildid(ns, shmid, shp->shm_perm.seq);
  563. } else {
  564. err = shm_checkid(ns, shp,shmid);
  565. if(err)
  566. goto out_unlock;
  567. result = 0;
  568. }
  569. err=-EACCES;
  570. if (ipcperms (&shp->shm_perm, S_IRUGO))
  571. goto out_unlock;
  572. err = security_shm_shmctl(shp, cmd);
  573. if (err)
  574. goto out_unlock;
  575. kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm);
  576. tbuf.shm_segsz = shp->shm_segsz;
  577. tbuf.shm_atime = shp->shm_atim;
  578. tbuf.shm_dtime = shp->shm_dtim;
  579. tbuf.shm_ctime = shp->shm_ctim;
  580. tbuf.shm_cpid = shp->shm_cprid;
  581. tbuf.shm_lpid = shp->shm_lprid;
  582. tbuf.shm_nattch = shp->shm_nattch;
  583. shm_unlock(shp);
  584. if(copy_shmid_to_user (buf, &tbuf, version))
  585. err = -EFAULT;
  586. else
  587. err = result;
  588. goto out;
  589. }
  590. case SHM_LOCK:
  591. case SHM_UNLOCK:
  592. {
  593. shp = shm_lock(ns, shmid);
  594. if(shp==NULL) {
  595. err = -EINVAL;
  596. goto out;
  597. }
  598. err = shm_checkid(ns, shp,shmid);
  599. if(err)
  600. goto out_unlock;
  601. err = audit_ipc_obj(&(shp->shm_perm));
  602. if (err)
  603. goto out_unlock;
  604. if (!capable(CAP_IPC_LOCK)) {
  605. err = -EPERM;
  606. if (current->euid != shp->shm_perm.uid &&
  607. current->euid != shp->shm_perm.cuid)
  608. goto out_unlock;
  609. if (cmd == SHM_LOCK &&
  610. !current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur)
  611. goto out_unlock;
  612. }
  613. err = security_shm_shmctl(shp, cmd);
  614. if (err)
  615. goto out_unlock;
  616. if(cmd==SHM_LOCK) {
  617. struct user_struct * user = current->user;
  618. if (!is_file_hugepages(shp->shm_file)) {
  619. err = shmem_lock(shp->shm_file, 1, user);
  620. if (!err) {
  621. shp->shm_perm.mode |= SHM_LOCKED;
  622. shp->mlock_user = user;
  623. }
  624. }
  625. } else if (!is_file_hugepages(shp->shm_file)) {
  626. shmem_lock(shp->shm_file, 0, shp->mlock_user);
  627. shp->shm_perm.mode &= ~SHM_LOCKED;
  628. shp->mlock_user = NULL;
  629. }
  630. shm_unlock(shp);
  631. goto out;
  632. }
  633. case IPC_RMID:
  634. {
  635. /*
  636. * We cannot simply remove the file. The SVID states
  637. * that the block remains until the last person
  638. * detaches from it, then is deleted. A shmat() on
  639. * an RMID segment is legal in older Linux and if
  640. * we change it apps break...
  641. *
  642. * Instead we set a destroyed flag, and then blow
  643. * the name away when the usage hits zero.
  644. */
  645. mutex_lock(&shm_ids(ns).mutex);
  646. shp = shm_lock(ns, shmid);
  647. err = -EINVAL;
  648. if (shp == NULL)
  649. goto out_up;
  650. err = shm_checkid(ns, shp, shmid);
  651. if(err)
  652. goto out_unlock_up;
  653. err = audit_ipc_obj(&(shp->shm_perm));
  654. if (err)
  655. goto out_unlock_up;
  656. if (current->euid != shp->shm_perm.uid &&
  657. current->euid != shp->shm_perm.cuid &&
  658. !capable(CAP_SYS_ADMIN)) {
  659. err=-EPERM;
  660. goto out_unlock_up;
  661. }
  662. err = security_shm_shmctl(shp, cmd);
  663. if (err)
  664. goto out_unlock_up;
  665. do_shm_rmid(ns, shp);
  666. mutex_unlock(&shm_ids(ns).mutex);
  667. goto out;
  668. }
  669. case IPC_SET:
  670. {
  671. if (copy_shmid_from_user (&setbuf, buf, version)) {
  672. err = -EFAULT;
  673. goto out;
  674. }
  675. mutex_lock(&shm_ids(ns).mutex);
  676. shp = shm_lock(ns, shmid);
  677. err=-EINVAL;
  678. if(shp==NULL)
  679. goto out_up;
  680. err = shm_checkid(ns, shp,shmid);
  681. if(err)
  682. goto out_unlock_up;
  683. err = audit_ipc_obj(&(shp->shm_perm));
  684. if (err)
  685. goto out_unlock_up;
  686. err = audit_ipc_set_perm(0, setbuf.uid, setbuf.gid, setbuf.mode);
  687. if (err)
  688. goto out_unlock_up;
  689. err=-EPERM;
  690. if (current->euid != shp->shm_perm.uid &&
  691. current->euid != shp->shm_perm.cuid &&
  692. !capable(CAP_SYS_ADMIN)) {
  693. goto out_unlock_up;
  694. }
  695. err = security_shm_shmctl(shp, cmd);
  696. if (err)
  697. goto out_unlock_up;
  698. shp->shm_perm.uid = setbuf.uid;
  699. shp->shm_perm.gid = setbuf.gid;
  700. shp->shm_perm.mode = (shp->shm_perm.mode & ~S_IRWXUGO)
  701. | (setbuf.mode & S_IRWXUGO);
  702. shp->shm_ctim = get_seconds();
  703. break;
  704. }
  705. default:
  706. err = -EINVAL;
  707. goto out;
  708. }
  709. err = 0;
  710. out_unlock_up:
  711. shm_unlock(shp);
  712. out_up:
  713. mutex_unlock(&shm_ids(ns).mutex);
  714. goto out;
  715. out_unlock:
  716. shm_unlock(shp);
  717. out:
  718. return err;
  719. }
  720. /*
  721. * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
  722. *
  723. * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
  724. * "raddr" thing points to kernel space, and there has to be a wrapper around
  725. * this.
  726. */
  727. long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
  728. {
  729. struct shmid_kernel *shp;
  730. unsigned long addr;
  731. unsigned long size;
  732. struct file * file;
  733. int err;
  734. unsigned long flags;
  735. unsigned long prot;
  736. int acc_mode;
  737. unsigned long user_addr;
  738. struct ipc_namespace *ns;
  739. struct shm_file_data *sfd;
  740. struct path path;
  741. mode_t f_mode;
  742. err = -EINVAL;
  743. if (shmid < 0)
  744. goto out;
  745. else if ((addr = (ulong)shmaddr)) {
  746. if (addr & (SHMLBA-1)) {
  747. if (shmflg & SHM_RND)
  748. addr &= ~(SHMLBA-1); /* round down */
  749. else
  750. #ifndef __ARCH_FORCE_SHMLBA
  751. if (addr & ~PAGE_MASK)
  752. #endif
  753. goto out;
  754. }
  755. flags = MAP_SHARED | MAP_FIXED;
  756. } else {
  757. if ((shmflg & SHM_REMAP))
  758. goto out;
  759. flags = MAP_SHARED;
  760. }
  761. if (shmflg & SHM_RDONLY) {
  762. prot = PROT_READ;
  763. acc_mode = S_IRUGO;
  764. f_mode = FMODE_READ;
  765. } else {
  766. prot = PROT_READ | PROT_WRITE;
  767. acc_mode = S_IRUGO | S_IWUGO;
  768. f_mode = FMODE_READ | FMODE_WRITE;
  769. }
  770. if (shmflg & SHM_EXEC) {
  771. prot |= PROT_EXEC;
  772. acc_mode |= S_IXUGO;
  773. }
  774. /*
  775. * We cannot rely on the fs check since SYSV IPC does have an
  776. * additional creator id...
  777. */
  778. ns = current->nsproxy->ipc_ns;
  779. shp = shm_lock(ns, shmid);
  780. if(shp == NULL)
  781. goto out;
  782. err = shm_checkid(ns, shp,shmid);
  783. if (err)
  784. goto out_unlock;
  785. err = -EACCES;
  786. if (ipcperms(&shp->shm_perm, acc_mode))
  787. goto out_unlock;
  788. err = security_shm_shmat(shp, shmaddr, shmflg);
  789. if (err)
  790. goto out_unlock;
  791. path.dentry = dget(shp->shm_file->f_path.dentry);
  792. path.mnt = mntget(shp->shm_file->f_path.mnt);
  793. shp->shm_nattch++;
  794. size = i_size_read(path.dentry->d_inode);
  795. shm_unlock(shp);
  796. err = -ENOMEM;
  797. sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
  798. if (!sfd)
  799. goto out_put_path;
  800. err = -ENOMEM;
  801. file = get_empty_filp();
  802. if (!file)
  803. goto out_free;
  804. file->f_op = &shm_file_operations;
  805. file->private_data = sfd;
  806. file->f_path = path;
  807. file->f_mapping = shp->shm_file->f_mapping;
  808. file->f_mode = f_mode;
  809. sfd->id = shp->id;
  810. sfd->ns = get_ipc_ns(ns);
  811. sfd->file = shp->shm_file;
  812. sfd->vm_ops = NULL;
  813. down_write(&current->mm->mmap_sem);
  814. if (addr && !(shmflg & SHM_REMAP)) {
  815. err = -EINVAL;
  816. if (find_vma_intersection(current->mm, addr, addr + size))
  817. goto invalid;
  818. /*
  819. * If shm segment goes below stack, make sure there is some
  820. * space left for the stack to grow (at least 4 pages).
  821. */
  822. if (addr < current->mm->start_stack &&
  823. addr > current->mm->start_stack - size - PAGE_SIZE * 5)
  824. goto invalid;
  825. }
  826. user_addr = do_mmap (file, addr, size, prot, flags, 0);
  827. *raddr = user_addr;
  828. err = 0;
  829. if (IS_ERR_VALUE(user_addr))
  830. err = (long)user_addr;
  831. invalid:
  832. up_write(&current->mm->mmap_sem);
  833. fput(file);
  834. out_nattch:
  835. mutex_lock(&shm_ids(ns).mutex);
  836. shp = shm_lock(ns, shmid);
  837. BUG_ON(!shp);
  838. shp->shm_nattch--;
  839. if(shp->shm_nattch == 0 &&
  840. shp->shm_perm.mode & SHM_DEST)
  841. shm_destroy(ns, shp);
  842. else
  843. shm_unlock(shp);
  844. mutex_unlock(&shm_ids(ns).mutex);
  845. out:
  846. return err;
  847. out_unlock:
  848. shm_unlock(shp);
  849. goto out;
  850. out_free:
  851. kfree(sfd);
  852. out_put_path:
  853. dput(path.dentry);
  854. mntput(path.mnt);
  855. goto out_nattch;
  856. }
  857. asmlinkage long sys_shmat(int shmid, char __user *shmaddr, int shmflg)
  858. {
  859. unsigned long ret;
  860. long err;
  861. err = do_shmat(shmid, shmaddr, shmflg, &ret);
  862. if (err)
  863. return err;
  864. force_successful_syscall_return();
  865. return (long)ret;
  866. }
  867. /*
  868. * detach and kill segment if marked destroyed.
  869. * The work is done in shm_close.
  870. */
  871. asmlinkage long sys_shmdt(char __user *shmaddr)
  872. {
  873. struct mm_struct *mm = current->mm;
  874. struct vm_area_struct *vma, *next;
  875. unsigned long addr = (unsigned long)shmaddr;
  876. loff_t size = 0;
  877. int retval = -EINVAL;
  878. if (addr & ~PAGE_MASK)
  879. return retval;
  880. down_write(&mm->mmap_sem);
  881. /*
  882. * This function tries to be smart and unmap shm segments that
  883. * were modified by partial mlock or munmap calls:
  884. * - It first determines the size of the shm segment that should be
  885. * unmapped: It searches for a vma that is backed by shm and that
  886. * started at address shmaddr. It records it's size and then unmaps
  887. * it.
  888. * - Then it unmaps all shm vmas that started at shmaddr and that
  889. * are within the initially determined size.
  890. * Errors from do_munmap are ignored: the function only fails if
  891. * it's called with invalid parameters or if it's called to unmap
  892. * a part of a vma. Both calls in this function are for full vmas,
  893. * the parameters are directly copied from the vma itself and always
  894. * valid - therefore do_munmap cannot fail. (famous last words?)
  895. */
  896. /*
  897. * If it had been mremap()'d, the starting address would not
  898. * match the usual checks anyway. So assume all vma's are
  899. * above the starting address given.
  900. */
  901. vma = find_vma(mm, addr);
  902. while (vma) {
  903. next = vma->vm_next;
  904. /*
  905. * Check if the starting address would match, i.e. it's
  906. * a fragment created by mprotect() and/or munmap(), or it
  907. * otherwise it starts at this address with no hassles.
  908. */
  909. if ((vma->vm_ops == &shm_vm_ops) &&
  910. (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
  911. size = vma->vm_file->f_path.dentry->d_inode->i_size;
  912. do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
  913. /*
  914. * We discovered the size of the shm segment, so
  915. * break out of here and fall through to the next
  916. * loop that uses the size information to stop
  917. * searching for matching vma's.
  918. */
  919. retval = 0;
  920. vma = next;
  921. break;
  922. }
  923. vma = next;
  924. }
  925. /*
  926. * We need look no further than the maximum address a fragment
  927. * could possibly have landed at. Also cast things to loff_t to
  928. * prevent overflows and make comparisions vs. equal-width types.
  929. */
  930. size = PAGE_ALIGN(size);
  931. while (vma && (loff_t)(vma->vm_end - addr) <= size) {
  932. next = vma->vm_next;
  933. /* finding a matching vma now does not alter retval */
  934. if ((vma->vm_ops == &shm_vm_ops) &&
  935. (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff)
  936. do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
  937. vma = next;
  938. }
  939. up_write(&mm->mmap_sem);
  940. return retval;
  941. }
  942. #ifdef CONFIG_PROC_FS
  943. static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
  944. {
  945. struct shmid_kernel *shp = it;
  946. char *format;
  947. #define SMALL_STRING "%10d %10d %4o %10u %5u %5u %5d %5u %5u %5u %5u %10lu %10lu %10lu\n"
  948. #define BIG_STRING "%10d %10d %4o %21u %5u %5u %5d %5u %5u %5u %5u %10lu %10lu %10lu\n"
  949. if (sizeof(size_t) <= sizeof(int))
  950. format = SMALL_STRING;
  951. else
  952. format = BIG_STRING;
  953. return seq_printf(s, format,
  954. shp->shm_perm.key,
  955. shp->id,
  956. shp->shm_perm.mode,
  957. shp->shm_segsz,
  958. shp->shm_cprid,
  959. shp->shm_lprid,
  960. shp->shm_nattch,
  961. shp->shm_perm.uid,
  962. shp->shm_perm.gid,
  963. shp->shm_perm.cuid,
  964. shp->shm_perm.cgid,
  965. shp->shm_atim,
  966. shp->shm_dtim,
  967. shp->shm_ctim);
  968. }
  969. #endif