shm.c 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339
  1. /*
  2. * linux/ipc/shm.c
  3. * Copyright (C) 1992, 1993 Krishna Balasubramanian
  4. * Many improvements/fixes by Bruno Haible.
  5. * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
  6. * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
  7. *
  8. * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
  9. * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
  10. * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
  11. * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
  12. * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
  13. * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
  14. * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
  15. *
  16. * support for audit of ipc object properties and permission changes
  17. * Dustin Kirkland <dustin.kirkland@us.ibm.com>
  18. *
  19. * namespaces support
  20. * OpenVZ, SWsoft Inc.
  21. * Pavel Emelianov <xemul@openvz.org>
  22. *
  23. * Better ipc lock (kern_ipc_perm.lock) handling
  24. * Davidlohr Bueso <davidlohr.bueso@hp.com>, June 2013.
  25. */
  26. #include <linux/slab.h>
  27. #include <linux/mm.h>
  28. #include <linux/hugetlb.h>
  29. #include <linux/shm.h>
  30. #include <linux/init.h>
  31. #include <linux/file.h>
  32. #include <linux/mman.h>
  33. #include <linux/shmem_fs.h>
  34. #include <linux/security.h>
  35. #include <linux/syscalls.h>
  36. #include <linux/audit.h>
  37. #include <linux/capability.h>
  38. #include <linux/ptrace.h>
  39. #include <linux/seq_file.h>
  40. #include <linux/rwsem.h>
  41. #include <linux/nsproxy.h>
  42. #include <linux/mount.h>
  43. #include <linux/ipc_namespace.h>
  44. #include <asm/uaccess.h>
  45. #include "util.h"
  46. struct shm_file_data {
  47. int id;
  48. struct ipc_namespace *ns;
  49. struct file *file;
  50. const struct vm_operations_struct *vm_ops;
  51. };
  52. #define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data))
  53. static const struct file_operations shm_file_operations;
  54. static const struct vm_operations_struct shm_vm_ops;
  55. #define shm_ids(ns) ((ns)->ids[IPC_SHM_IDS])
  56. #define shm_unlock(shp) \
  57. ipc_unlock(&(shp)->shm_perm)
  58. static int newseg(struct ipc_namespace *, struct ipc_params *);
  59. static void shm_open(struct vm_area_struct *vma);
  60. static void shm_close(struct vm_area_struct *vma);
  61. static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
  62. #ifdef CONFIG_PROC_FS
  63. static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
  64. #endif
  65. void shm_init_ns(struct ipc_namespace *ns)
  66. {
  67. ns->shm_ctlmax = SHMMAX;
  68. ns->shm_ctlall = SHMALL;
  69. ns->shm_ctlmni = SHMMNI;
  70. ns->shm_rmid_forced = 0;
  71. ns->shm_tot = 0;
  72. ipc_init_ids(&shm_ids(ns));
  73. }
  74. /*
  75. * Called with shm_ids.rw_mutex (writer) and the shp structure locked.
  76. * Only shm_ids.rw_mutex remains locked on exit.
  77. */
  78. static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
  79. {
  80. struct shmid_kernel *shp;
  81. shp = container_of(ipcp, struct shmid_kernel, shm_perm);
  82. if (shp->shm_nattch){
  83. shp->shm_perm.mode |= SHM_DEST;
  84. /* Do not find it any more */
  85. shp->shm_perm.key = IPC_PRIVATE;
  86. shm_unlock(shp);
  87. } else
  88. shm_destroy(ns, shp);
  89. }
  90. #ifdef CONFIG_IPC_NS
  91. void shm_exit_ns(struct ipc_namespace *ns)
  92. {
  93. free_ipcs(ns, &shm_ids(ns), do_shm_rmid);
  94. idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr);
  95. }
  96. #endif
  97. static int __init ipc_ns_init(void)
  98. {
  99. shm_init_ns(&init_ipc_ns);
  100. return 0;
  101. }
  102. pure_initcall(ipc_ns_init);
  103. void __init shm_init (void)
  104. {
  105. ipc_init_proc_interface("sysvipc/shm",
  106. #if BITS_PER_LONG <= 32
  107. " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n",
  108. #else
  109. " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n",
  110. #endif
  111. IPC_SHM_IDS, sysvipc_shm_proc_show);
  112. }
  113. static inline struct shmid_kernel *shm_obtain_object(struct ipc_namespace *ns, int id)
  114. {
  115. struct kern_ipc_perm *ipcp = ipc_obtain_object(&shm_ids(ns), id);
  116. if (IS_ERR(ipcp))
  117. return ERR_CAST(ipcp);
  118. return container_of(ipcp, struct shmid_kernel, shm_perm);
  119. }
  120. static inline struct shmid_kernel *shm_obtain_object_check(struct ipc_namespace *ns, int id)
  121. {
  122. struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&shm_ids(ns), id);
  123. if (IS_ERR(ipcp))
  124. return ERR_CAST(ipcp);
  125. return container_of(ipcp, struct shmid_kernel, shm_perm);
  126. }
  127. /*
  128. * shm_lock_(check_) routines are called in the paths where the rw_mutex
  129. * is not necessarily held.
  130. */
  131. static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
  132. {
  133. struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id);
  134. if (IS_ERR(ipcp))
  135. return (struct shmid_kernel *)ipcp;
  136. return container_of(ipcp, struct shmid_kernel, shm_perm);
  137. }
  138. static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
  139. {
  140. rcu_read_lock();
  141. ipc_lock_object(&ipcp->shm_perm);
  142. }
  143. static inline struct shmid_kernel *shm_lock_check(struct ipc_namespace *ns,
  144. int id)
  145. {
  146. struct kern_ipc_perm *ipcp = ipc_lock_check(&shm_ids(ns), id);
  147. if (IS_ERR(ipcp))
  148. return (struct shmid_kernel *)ipcp;
  149. return container_of(ipcp, struct shmid_kernel, shm_perm);
  150. }
  151. static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
  152. {
  153. ipc_rmid(&shm_ids(ns), &s->shm_perm);
  154. }
  155. /* This is called by fork, once for every shm attach. */
  156. static void shm_open(struct vm_area_struct *vma)
  157. {
  158. struct file *file = vma->vm_file;
  159. struct shm_file_data *sfd = shm_file_data(file);
  160. struct shmid_kernel *shp;
  161. shp = shm_lock(sfd->ns, sfd->id);
  162. BUG_ON(IS_ERR(shp));
  163. shp->shm_atim = get_seconds();
  164. shp->shm_lprid = task_tgid_vnr(current);
  165. shp->shm_nattch++;
  166. shm_unlock(shp);
  167. }
  168. /*
  169. * shm_destroy - free the struct shmid_kernel
  170. *
  171. * @ns: namespace
  172. * @shp: struct to free
  173. *
  174. * It has to be called with shp and shm_ids.rw_mutex (writer) locked,
  175. * but returns with shp unlocked and freed.
  176. */
  177. static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
  178. {
  179. ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
  180. shm_rmid(ns, shp);
  181. shm_unlock(shp);
  182. if (!is_file_hugepages(shp->shm_file))
  183. shmem_lock(shp->shm_file, 0, shp->mlock_user);
  184. else if (shp->mlock_user)
  185. user_shm_unlock(file_inode(shp->shm_file)->i_size,
  186. shp->mlock_user);
  187. fput (shp->shm_file);
  188. security_shm_free(shp);
  189. ipc_rcu_putref(shp);
  190. }
  191. /*
  192. * shm_may_destroy - identifies whether shm segment should be destroyed now
  193. *
  194. * Returns true if and only if there are no active users of the segment and
  195. * one of the following is true:
  196. *
  197. * 1) shmctl(id, IPC_RMID, NULL) was called for this shp
  198. *
  199. * 2) sysctl kernel.shm_rmid_forced is set to 1.
  200. */
  201. static bool shm_may_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
  202. {
  203. return (shp->shm_nattch == 0) &&
  204. (ns->shm_rmid_forced ||
  205. (shp->shm_perm.mode & SHM_DEST));
  206. }
  207. /*
  208. * remove the attach descriptor vma.
  209. * free memory for segment if it is marked destroyed.
  210. * The descriptor has already been removed from the current->mm->mmap list
  211. * and will later be kfree()d.
  212. */
  213. static void shm_close(struct vm_area_struct *vma)
  214. {
  215. struct file * file = vma->vm_file;
  216. struct shm_file_data *sfd = shm_file_data(file);
  217. struct shmid_kernel *shp;
  218. struct ipc_namespace *ns = sfd->ns;
  219. down_write(&shm_ids(ns).rw_mutex);
  220. /* remove from the list of attaches of the shm segment */
  221. shp = shm_lock(ns, sfd->id);
  222. BUG_ON(IS_ERR(shp));
  223. shp->shm_lprid = task_tgid_vnr(current);
  224. shp->shm_dtim = get_seconds();
  225. shp->shm_nattch--;
  226. if (shm_may_destroy(ns, shp))
  227. shm_destroy(ns, shp);
  228. else
  229. shm_unlock(shp);
  230. up_write(&shm_ids(ns).rw_mutex);
  231. }
  232. /* Called with ns->shm_ids(ns).rw_mutex locked */
  233. static int shm_try_destroy_current(int id, void *p, void *data)
  234. {
  235. struct ipc_namespace *ns = data;
  236. struct kern_ipc_perm *ipcp = p;
  237. struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm);
  238. if (shp->shm_creator != current)
  239. return 0;
  240. /*
  241. * Mark it as orphaned to destroy the segment when
  242. * kernel.shm_rmid_forced is changed.
  243. * It is noop if the following shm_may_destroy() returns true.
  244. */
  245. shp->shm_creator = NULL;
  246. /*
  247. * Don't even try to destroy it. If shm_rmid_forced=0 and IPC_RMID
  248. * is not set, it shouldn't be deleted here.
  249. */
  250. if (!ns->shm_rmid_forced)
  251. return 0;
  252. if (shm_may_destroy(ns, shp)) {
  253. shm_lock_by_ptr(shp);
  254. shm_destroy(ns, shp);
  255. }
  256. return 0;
  257. }
  258. /* Called with ns->shm_ids(ns).rw_mutex locked */
  259. static int shm_try_destroy_orphaned(int id, void *p, void *data)
  260. {
  261. struct ipc_namespace *ns = data;
  262. struct kern_ipc_perm *ipcp = p;
  263. struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm);
  264. /*
  265. * We want to destroy segments without users and with already
  266. * exit'ed originating process.
  267. *
  268. * As shp->* are changed under rw_mutex, it's safe to skip shp locking.
  269. */
  270. if (shp->shm_creator != NULL)
  271. return 0;
  272. if (shm_may_destroy(ns, shp)) {
  273. shm_lock_by_ptr(shp);
  274. shm_destroy(ns, shp);
  275. }
  276. return 0;
  277. }
  278. void shm_destroy_orphaned(struct ipc_namespace *ns)
  279. {
  280. down_write(&shm_ids(ns).rw_mutex);
  281. if (shm_ids(ns).in_use)
  282. idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns);
  283. up_write(&shm_ids(ns).rw_mutex);
  284. }
  285. void exit_shm(struct task_struct *task)
  286. {
  287. struct ipc_namespace *ns = task->nsproxy->ipc_ns;
  288. if (shm_ids(ns).in_use == 0)
  289. return;
  290. /* Destroy all already created segments, but not mapped yet */
  291. down_write(&shm_ids(ns).rw_mutex);
  292. if (shm_ids(ns).in_use)
  293. idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_current, ns);
  294. up_write(&shm_ids(ns).rw_mutex);
  295. }
  296. static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  297. {
  298. struct file *file = vma->vm_file;
  299. struct shm_file_data *sfd = shm_file_data(file);
  300. return sfd->vm_ops->fault(vma, vmf);
  301. }
  302. #ifdef CONFIG_NUMA
  303. static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
  304. {
  305. struct file *file = vma->vm_file;
  306. struct shm_file_data *sfd = shm_file_data(file);
  307. int err = 0;
  308. if (sfd->vm_ops->set_policy)
  309. err = sfd->vm_ops->set_policy(vma, new);
  310. return err;
  311. }
  312. static struct mempolicy *shm_get_policy(struct vm_area_struct *vma,
  313. unsigned long addr)
  314. {
  315. struct file *file = vma->vm_file;
  316. struct shm_file_data *sfd = shm_file_data(file);
  317. struct mempolicy *pol = NULL;
  318. if (sfd->vm_ops->get_policy)
  319. pol = sfd->vm_ops->get_policy(vma, addr);
  320. else if (vma->vm_policy)
  321. pol = vma->vm_policy;
  322. return pol;
  323. }
  324. #endif
  325. static int shm_mmap(struct file * file, struct vm_area_struct * vma)
  326. {
  327. struct shm_file_data *sfd = shm_file_data(file);
  328. int ret;
  329. ret = sfd->file->f_op->mmap(sfd->file, vma);
  330. if (ret != 0)
  331. return ret;
  332. sfd->vm_ops = vma->vm_ops;
  333. #ifdef CONFIG_MMU
  334. BUG_ON(!sfd->vm_ops->fault);
  335. #endif
  336. vma->vm_ops = &shm_vm_ops;
  337. shm_open(vma);
  338. return ret;
  339. }
  340. static int shm_release(struct inode *ino, struct file *file)
  341. {
  342. struct shm_file_data *sfd = shm_file_data(file);
  343. put_ipc_ns(sfd->ns);
  344. shm_file_data(file) = NULL;
  345. kfree(sfd);
  346. return 0;
  347. }
  348. static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync)
  349. {
  350. struct shm_file_data *sfd = shm_file_data(file);
  351. if (!sfd->file->f_op->fsync)
  352. return -EINVAL;
  353. return sfd->file->f_op->fsync(sfd->file, start, end, datasync);
  354. }
  355. static long shm_fallocate(struct file *file, int mode, loff_t offset,
  356. loff_t len)
  357. {
  358. struct shm_file_data *sfd = shm_file_data(file);
  359. if (!sfd->file->f_op->fallocate)
  360. return -EOPNOTSUPP;
  361. return sfd->file->f_op->fallocate(file, mode, offset, len);
  362. }
  363. static unsigned long shm_get_unmapped_area(struct file *file,
  364. unsigned long addr, unsigned long len, unsigned long pgoff,
  365. unsigned long flags)
  366. {
  367. struct shm_file_data *sfd = shm_file_data(file);
  368. return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len,
  369. pgoff, flags);
  370. }
  371. static const struct file_operations shm_file_operations = {
  372. .mmap = shm_mmap,
  373. .fsync = shm_fsync,
  374. .release = shm_release,
  375. #ifndef CONFIG_MMU
  376. .get_unmapped_area = shm_get_unmapped_area,
  377. #endif
  378. .llseek = noop_llseek,
  379. .fallocate = shm_fallocate,
  380. };
  381. static const struct file_operations shm_file_operations_huge = {
  382. .mmap = shm_mmap,
  383. .fsync = shm_fsync,
  384. .release = shm_release,
  385. .get_unmapped_area = shm_get_unmapped_area,
  386. .llseek = noop_llseek,
  387. .fallocate = shm_fallocate,
  388. };
  389. int is_file_shm_hugepages(struct file *file)
  390. {
  391. return file->f_op == &shm_file_operations_huge;
  392. }
  393. static const struct vm_operations_struct shm_vm_ops = {
  394. .open = shm_open, /* callback for a new vm-area open */
  395. .close = shm_close, /* callback for when the vm-area is released */
  396. .fault = shm_fault,
  397. #if defined(CONFIG_NUMA)
  398. .set_policy = shm_set_policy,
  399. .get_policy = shm_get_policy,
  400. #endif
  401. };
  402. /**
  403. * newseg - Create a new shared memory segment
  404. * @ns: namespace
  405. * @params: ptr to the structure that contains key, size and shmflg
  406. *
  407. * Called with shm_ids.rw_mutex held as a writer.
  408. */
  409. static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
  410. {
  411. key_t key = params->key;
  412. int shmflg = params->flg;
  413. size_t size = params->u.size;
  414. int error;
  415. struct shmid_kernel *shp;
  416. size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
  417. struct file * file;
  418. char name[13];
  419. int id;
  420. vm_flags_t acctflag = 0;
  421. if (size < SHMMIN || size > ns->shm_ctlmax)
  422. return -EINVAL;
  423. if (ns->shm_tot + numpages > ns->shm_ctlall)
  424. return -ENOSPC;
  425. shp = ipc_rcu_alloc(sizeof(*shp));
  426. if (!shp)
  427. return -ENOMEM;
  428. shp->shm_perm.key = key;
  429. shp->shm_perm.mode = (shmflg & S_IRWXUGO);
  430. shp->mlock_user = NULL;
  431. shp->shm_perm.security = NULL;
  432. error = security_shm_alloc(shp);
  433. if (error) {
  434. ipc_rcu_putref(shp);
  435. return error;
  436. }
  437. sprintf (name, "SYSV%08x", key);
  438. if (shmflg & SHM_HUGETLB) {
  439. struct hstate *hs;
  440. size_t hugesize;
  441. hs = hstate_sizelog((shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
  442. if (!hs) {
  443. error = -EINVAL;
  444. goto no_file;
  445. }
  446. hugesize = ALIGN(size, huge_page_size(hs));
  447. /* hugetlb_file_setup applies strict accounting */
  448. if (shmflg & SHM_NORESERVE)
  449. acctflag = VM_NORESERVE;
  450. file = hugetlb_file_setup(name, hugesize, acctflag,
  451. &shp->mlock_user, HUGETLB_SHMFS_INODE,
  452. (shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
  453. } else {
  454. /*
  455. * Do not allow no accounting for OVERCOMMIT_NEVER, even
  456. * if it's asked for.
  457. */
  458. if ((shmflg & SHM_NORESERVE) &&
  459. sysctl_overcommit_memory != OVERCOMMIT_NEVER)
  460. acctflag = VM_NORESERVE;
  461. file = shmem_file_setup(name, size, acctflag);
  462. }
  463. error = PTR_ERR(file);
  464. if (IS_ERR(file))
  465. goto no_file;
  466. id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
  467. if (id < 0) {
  468. error = id;
  469. goto no_id;
  470. }
  471. shp->shm_cprid = task_tgid_vnr(current);
  472. shp->shm_lprid = 0;
  473. shp->shm_atim = shp->shm_dtim = 0;
  474. shp->shm_ctim = get_seconds();
  475. shp->shm_segsz = size;
  476. shp->shm_nattch = 0;
  477. shp->shm_file = file;
  478. shp->shm_creator = current;
  479. /*
  480. * shmid gets reported as "inode#" in /proc/pid/maps.
  481. * proc-ps tools use this. Changing this will break them.
  482. */
  483. file_inode(file)->i_ino = shp->shm_perm.id;
  484. ns->shm_tot += numpages;
  485. error = shp->shm_perm.id;
  486. ipc_unlock_object(&shp->shm_perm);
  487. rcu_read_unlock();
  488. return error;
  489. no_id:
  490. if (is_file_hugepages(file) && shp->mlock_user)
  491. user_shm_unlock(size, shp->mlock_user);
  492. fput(file);
  493. no_file:
  494. security_shm_free(shp);
  495. ipc_rcu_putref(shp);
  496. return error;
  497. }
  498. /*
  499. * Called with shm_ids.rw_mutex and ipcp locked.
  500. */
  501. static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg)
  502. {
  503. struct shmid_kernel *shp;
  504. shp = container_of(ipcp, struct shmid_kernel, shm_perm);
  505. return security_shm_associate(shp, shmflg);
  506. }
  507. /*
  508. * Called with shm_ids.rw_mutex and ipcp locked.
  509. */
  510. static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
  511. struct ipc_params *params)
  512. {
  513. struct shmid_kernel *shp;
  514. shp = container_of(ipcp, struct shmid_kernel, shm_perm);
  515. if (shp->shm_segsz < params->u.size)
  516. return -EINVAL;
  517. return 0;
  518. }
  519. SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
  520. {
  521. struct ipc_namespace *ns;
  522. struct ipc_ops shm_ops;
  523. struct ipc_params shm_params;
  524. ns = current->nsproxy->ipc_ns;
  525. shm_ops.getnew = newseg;
  526. shm_ops.associate = shm_security;
  527. shm_ops.more_checks = shm_more_checks;
  528. shm_params.key = key;
  529. shm_params.flg = shmflg;
  530. shm_params.u.size = size;
  531. return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params);
  532. }
  533. static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
  534. {
  535. switch(version) {
  536. case IPC_64:
  537. return copy_to_user(buf, in, sizeof(*in));
  538. case IPC_OLD:
  539. {
  540. struct shmid_ds out;
  541. memset(&out, 0, sizeof(out));
  542. ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
  543. out.shm_segsz = in->shm_segsz;
  544. out.shm_atime = in->shm_atime;
  545. out.shm_dtime = in->shm_dtime;
  546. out.shm_ctime = in->shm_ctime;
  547. out.shm_cpid = in->shm_cpid;
  548. out.shm_lpid = in->shm_lpid;
  549. out.shm_nattch = in->shm_nattch;
  550. return copy_to_user(buf, &out, sizeof(out));
  551. }
  552. default:
  553. return -EINVAL;
  554. }
  555. }
  556. static inline unsigned long
  557. copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version)
  558. {
  559. switch(version) {
  560. case IPC_64:
  561. if (copy_from_user(out, buf, sizeof(*out)))
  562. return -EFAULT;
  563. return 0;
  564. case IPC_OLD:
  565. {
  566. struct shmid_ds tbuf_old;
  567. if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
  568. return -EFAULT;
  569. out->shm_perm.uid = tbuf_old.shm_perm.uid;
  570. out->shm_perm.gid = tbuf_old.shm_perm.gid;
  571. out->shm_perm.mode = tbuf_old.shm_perm.mode;
  572. return 0;
  573. }
  574. default:
  575. return -EINVAL;
  576. }
  577. }
  578. static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
  579. {
  580. switch(version) {
  581. case IPC_64:
  582. return copy_to_user(buf, in, sizeof(*in));
  583. case IPC_OLD:
  584. {
  585. struct shminfo out;
  586. if(in->shmmax > INT_MAX)
  587. out.shmmax = INT_MAX;
  588. else
  589. out.shmmax = (int)in->shmmax;
  590. out.shmmin = in->shmmin;
  591. out.shmmni = in->shmmni;
  592. out.shmseg = in->shmseg;
  593. out.shmall = in->shmall;
  594. return copy_to_user(buf, &out, sizeof(out));
  595. }
  596. default:
  597. return -EINVAL;
  598. }
  599. }
  600. /*
  601. * Calculate and add used RSS and swap pages of a shm.
  602. * Called with shm_ids.rw_mutex held as a reader
  603. */
  604. static void shm_add_rss_swap(struct shmid_kernel *shp,
  605. unsigned long *rss_add, unsigned long *swp_add)
  606. {
  607. struct inode *inode;
  608. inode = file_inode(shp->shm_file);
  609. if (is_file_hugepages(shp->shm_file)) {
  610. struct address_space *mapping = inode->i_mapping;
  611. struct hstate *h = hstate_file(shp->shm_file);
  612. *rss_add += pages_per_huge_page(h) * mapping->nrpages;
  613. } else {
  614. #ifdef CONFIG_SHMEM
  615. struct shmem_inode_info *info = SHMEM_I(inode);
  616. spin_lock(&info->lock);
  617. *rss_add += inode->i_mapping->nrpages;
  618. *swp_add += info->swapped;
  619. spin_unlock(&info->lock);
  620. #else
  621. *rss_add += inode->i_mapping->nrpages;
  622. #endif
  623. }
  624. }
  625. /*
  626. * Called with shm_ids.rw_mutex held as a reader
  627. */
  628. static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
  629. unsigned long *swp)
  630. {
  631. int next_id;
  632. int total, in_use;
  633. *rss = 0;
  634. *swp = 0;
  635. in_use = shm_ids(ns).in_use;
  636. for (total = 0, next_id = 0; total < in_use; next_id++) {
  637. struct kern_ipc_perm *ipc;
  638. struct shmid_kernel *shp;
  639. ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id);
  640. if (ipc == NULL)
  641. continue;
  642. shp = container_of(ipc, struct shmid_kernel, shm_perm);
  643. shm_add_rss_swap(shp, rss, swp);
  644. total++;
  645. }
  646. }
  647. /*
  648. * This function handles some shmctl commands which require the rw_mutex
  649. * to be held in write mode.
  650. * NOTE: no locks must be held, the rw_mutex is taken inside this function.
  651. */
  652. static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
  653. struct shmid_ds __user *buf, int version)
  654. {
  655. struct kern_ipc_perm *ipcp;
  656. struct shmid64_ds shmid64;
  657. struct shmid_kernel *shp;
  658. int err;
  659. if (cmd == IPC_SET) {
  660. if (copy_shmid_from_user(&shmid64, buf, version))
  661. return -EFAULT;
  662. }
  663. down_write(&shm_ids(ns).rw_mutex);
  664. rcu_read_lock();
  665. ipcp = ipcctl_pre_down_nolock(ns, &shm_ids(ns), shmid, cmd,
  666. &shmid64.shm_perm, 0);
  667. if (IS_ERR(ipcp)) {
  668. err = PTR_ERR(ipcp);
  669. goto out_unlock1;
  670. }
  671. shp = container_of(ipcp, struct shmid_kernel, shm_perm);
  672. err = security_shm_shmctl(shp, cmd);
  673. if (err)
  674. goto out_unlock1;
  675. switch (cmd) {
  676. case IPC_RMID:
  677. ipc_lock_object(&shp->shm_perm);
  678. /* do_shm_rmid unlocks the ipc object and rcu */
  679. do_shm_rmid(ns, ipcp);
  680. goto out_up;
  681. case IPC_SET:
  682. ipc_lock_object(&shp->shm_perm);
  683. err = ipc_update_perm(&shmid64.shm_perm, ipcp);
  684. if (err)
  685. goto out_unlock0;
  686. shp->shm_ctim = get_seconds();
  687. break;
  688. default:
  689. err = -EINVAL;
  690. goto out_unlock1;
  691. }
  692. out_unlock0:
  693. ipc_unlock_object(&shp->shm_perm);
  694. out_unlock1:
  695. rcu_read_unlock();
  696. out_up:
  697. up_write(&shm_ids(ns).rw_mutex);
  698. return err;
  699. }
  700. static int shmctl_nolock(struct ipc_namespace *ns, int shmid,
  701. int cmd, int version, void __user *buf)
  702. {
  703. int err;
  704. struct shmid_kernel *shp;
  705. /* preliminary security checks for *_INFO */
  706. if (cmd == IPC_INFO || cmd == SHM_INFO) {
  707. err = security_shm_shmctl(NULL, cmd);
  708. if (err)
  709. return err;
  710. }
  711. switch (cmd) {
  712. case IPC_INFO:
  713. {
  714. struct shminfo64 shminfo;
  715. memset(&shminfo, 0, sizeof(shminfo));
  716. shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni;
  717. shminfo.shmmax = ns->shm_ctlmax;
  718. shminfo.shmall = ns->shm_ctlall;
  719. shminfo.shmmin = SHMMIN;
  720. if(copy_shminfo_to_user (buf, &shminfo, version))
  721. return -EFAULT;
  722. down_read(&shm_ids(ns).rw_mutex);
  723. err = ipc_get_maxid(&shm_ids(ns));
  724. up_read(&shm_ids(ns).rw_mutex);
  725. if(err<0)
  726. err = 0;
  727. goto out;
  728. }
  729. case SHM_INFO:
  730. {
  731. struct shm_info shm_info;
  732. memset(&shm_info, 0, sizeof(shm_info));
  733. down_read(&shm_ids(ns).rw_mutex);
  734. shm_info.used_ids = shm_ids(ns).in_use;
  735. shm_get_stat (ns, &shm_info.shm_rss, &shm_info.shm_swp);
  736. shm_info.shm_tot = ns->shm_tot;
  737. shm_info.swap_attempts = 0;
  738. shm_info.swap_successes = 0;
  739. err = ipc_get_maxid(&shm_ids(ns));
  740. up_read(&shm_ids(ns).rw_mutex);
  741. if (copy_to_user(buf, &shm_info, sizeof(shm_info))) {
  742. err = -EFAULT;
  743. goto out;
  744. }
  745. err = err < 0 ? 0 : err;
  746. goto out;
  747. }
  748. case SHM_STAT:
  749. case IPC_STAT:
  750. {
  751. struct shmid64_ds tbuf;
  752. int result;
  753. rcu_read_lock();
  754. if (cmd == SHM_STAT) {
  755. shp = shm_obtain_object(ns, shmid);
  756. if (IS_ERR(shp)) {
  757. err = PTR_ERR(shp);
  758. goto out_unlock;
  759. }
  760. result = shp->shm_perm.id;
  761. } else {
  762. shp = shm_obtain_object_check(ns, shmid);
  763. if (IS_ERR(shp)) {
  764. err = PTR_ERR(shp);
  765. goto out_unlock;
  766. }
  767. result = 0;
  768. }
  769. err = -EACCES;
  770. if (ipcperms(ns, &shp->shm_perm, S_IRUGO))
  771. goto out_unlock;
  772. err = security_shm_shmctl(shp, cmd);
  773. if (err)
  774. goto out_unlock;
  775. memset(&tbuf, 0, sizeof(tbuf));
  776. kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm);
  777. tbuf.shm_segsz = shp->shm_segsz;
  778. tbuf.shm_atime = shp->shm_atim;
  779. tbuf.shm_dtime = shp->shm_dtim;
  780. tbuf.shm_ctime = shp->shm_ctim;
  781. tbuf.shm_cpid = shp->shm_cprid;
  782. tbuf.shm_lpid = shp->shm_lprid;
  783. tbuf.shm_nattch = shp->shm_nattch;
  784. rcu_read_unlock();
  785. if (copy_shmid_to_user(buf, &tbuf, version))
  786. err = -EFAULT;
  787. else
  788. err = result;
  789. goto out;
  790. }
  791. default:
  792. return -EINVAL;
  793. }
  794. out_unlock:
  795. rcu_read_unlock();
  796. out:
  797. return err;
  798. }
  799. SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
  800. {
  801. struct shmid_kernel *shp;
  802. int err, version;
  803. struct ipc_namespace *ns;
  804. if (cmd < 0 || shmid < 0)
  805. return -EINVAL;
  806. version = ipc_parse_version(&cmd);
  807. ns = current->nsproxy->ipc_ns;
  808. switch (cmd) {
  809. case IPC_INFO:
  810. case SHM_INFO:
  811. case SHM_STAT:
  812. case IPC_STAT:
  813. return shmctl_nolock(ns, shmid, cmd, version, buf);
  814. case IPC_RMID:
  815. case IPC_SET:
  816. return shmctl_down(ns, shmid, cmd, buf, version);
  817. case SHM_LOCK:
  818. case SHM_UNLOCK:
  819. {
  820. struct file *shm_file;
  821. rcu_read_lock();
  822. shp = shm_obtain_object_check(ns, shmid);
  823. if (IS_ERR(shp)) {
  824. err = PTR_ERR(shp);
  825. goto out_unlock1;
  826. }
  827. audit_ipc_obj(&(shp->shm_perm));
  828. err = security_shm_shmctl(shp, cmd);
  829. if (err)
  830. goto out_unlock1;
  831. ipc_lock_object(&shp->shm_perm);
  832. if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) {
  833. kuid_t euid = current_euid();
  834. err = -EPERM;
  835. if (!uid_eq(euid, shp->shm_perm.uid) &&
  836. !uid_eq(euid, shp->shm_perm.cuid))
  837. goto out_unlock0;
  838. if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK))
  839. goto out_unlock0;
  840. }
  841. shm_file = shp->shm_file;
  842. if (is_file_hugepages(shm_file))
  843. goto out_unlock0;
  844. if (cmd == SHM_LOCK) {
  845. struct user_struct *user = current_user();
  846. err = shmem_lock(shm_file, 1, user);
  847. if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) {
  848. shp->shm_perm.mode |= SHM_LOCKED;
  849. shp->mlock_user = user;
  850. }
  851. goto out_unlock0;
  852. }
  853. /* SHM_UNLOCK */
  854. if (!(shp->shm_perm.mode & SHM_LOCKED))
  855. goto out_unlock0;
  856. shmem_lock(shm_file, 0, shp->mlock_user);
  857. shp->shm_perm.mode &= ~SHM_LOCKED;
  858. shp->mlock_user = NULL;
  859. get_file(shm_file);
  860. ipc_unlock_object(&shp->shm_perm);
  861. rcu_read_unlock();
  862. shmem_unlock_mapping(shm_file->f_mapping);
  863. fput(shm_file);
  864. return err;
  865. }
  866. default:
  867. return -EINVAL;
  868. }
  869. out_unlock0:
  870. ipc_unlock_object(&shp->shm_perm);
  871. out_unlock1:
  872. rcu_read_unlock();
  873. return err;
  874. }
  875. /*
  876. * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
  877. *
  878. * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
  879. * "raddr" thing points to kernel space, and there has to be a wrapper around
  880. * this.
  881. */
  882. long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
  883. unsigned long shmlba)
  884. {
  885. struct shmid_kernel *shp;
  886. unsigned long addr;
  887. unsigned long size;
  888. struct file * file;
  889. int err;
  890. unsigned long flags;
  891. unsigned long prot;
  892. int acc_mode;
  893. struct ipc_namespace *ns;
  894. struct shm_file_data *sfd;
  895. struct path path;
  896. fmode_t f_mode;
  897. unsigned long populate = 0;
  898. err = -EINVAL;
  899. if (shmid < 0)
  900. goto out;
  901. else if ((addr = (ulong)shmaddr)) {
  902. if (addr & (shmlba - 1)) {
  903. if (shmflg & SHM_RND)
  904. addr &= ~(shmlba - 1); /* round down */
  905. else
  906. #ifndef __ARCH_FORCE_SHMLBA
  907. if (addr & ~PAGE_MASK)
  908. #endif
  909. goto out;
  910. }
  911. flags = MAP_SHARED | MAP_FIXED;
  912. } else {
  913. if ((shmflg & SHM_REMAP))
  914. goto out;
  915. flags = MAP_SHARED;
  916. }
  917. if (shmflg & SHM_RDONLY) {
  918. prot = PROT_READ;
  919. acc_mode = S_IRUGO;
  920. f_mode = FMODE_READ;
  921. } else {
  922. prot = PROT_READ | PROT_WRITE;
  923. acc_mode = S_IRUGO | S_IWUGO;
  924. f_mode = FMODE_READ | FMODE_WRITE;
  925. }
  926. if (shmflg & SHM_EXEC) {
  927. prot |= PROT_EXEC;
  928. acc_mode |= S_IXUGO;
  929. }
  930. /*
  931. * We cannot rely on the fs check since SYSV IPC does have an
  932. * additional creator id...
  933. */
  934. ns = current->nsproxy->ipc_ns;
  935. rcu_read_lock();
  936. shp = shm_obtain_object_check(ns, shmid);
  937. if (IS_ERR(shp)) {
  938. err = PTR_ERR(shp);
  939. goto out_unlock;
  940. }
  941. err = -EACCES;
  942. if (ipcperms(ns, &shp->shm_perm, acc_mode))
  943. goto out_unlock;
  944. err = security_shm_shmat(shp, shmaddr, shmflg);
  945. if (err)
  946. goto out_unlock;
  947. ipc_lock_object(&shp->shm_perm);
  948. path = shp->shm_file->f_path;
  949. path_get(&path);
  950. shp->shm_nattch++;
  951. size = i_size_read(path.dentry->d_inode);
  952. ipc_unlock_object(&shp->shm_perm);
  953. rcu_read_unlock();
  954. err = -ENOMEM;
  955. sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
  956. if (!sfd) {
  957. path_put(&path);
  958. goto out_nattch;
  959. }
  960. file = alloc_file(&path, f_mode,
  961. is_file_hugepages(shp->shm_file) ?
  962. &shm_file_operations_huge :
  963. &shm_file_operations);
  964. err = PTR_ERR(file);
  965. if (IS_ERR(file)) {
  966. kfree(sfd);
  967. path_put(&path);
  968. goto out_nattch;
  969. }
  970. file->private_data = sfd;
  971. file->f_mapping = shp->shm_file->f_mapping;
  972. sfd->id = shp->shm_perm.id;
  973. sfd->ns = get_ipc_ns(ns);
  974. sfd->file = shp->shm_file;
  975. sfd->vm_ops = NULL;
  976. err = security_mmap_file(file, prot, flags);
  977. if (err)
  978. goto out_fput;
  979. down_write(&current->mm->mmap_sem);
  980. if (addr && !(shmflg & SHM_REMAP)) {
  981. err = -EINVAL;
  982. if (find_vma_intersection(current->mm, addr, addr + size))
  983. goto invalid;
  984. /*
  985. * If shm segment goes below stack, make sure there is some
  986. * space left for the stack to grow (at least 4 pages).
  987. */
  988. if (addr < current->mm->start_stack &&
  989. addr > current->mm->start_stack - size - PAGE_SIZE * 5)
  990. goto invalid;
  991. }
  992. addr = do_mmap_pgoff(file, addr, size, prot, flags, 0, &populate);
  993. *raddr = addr;
  994. err = 0;
  995. if (IS_ERR_VALUE(addr))
  996. err = (long)addr;
  997. invalid:
  998. up_write(&current->mm->mmap_sem);
  999. if (populate)
  1000. mm_populate(addr, populate);
  1001. out_fput:
  1002. fput(file);
  1003. out_nattch:
  1004. down_write(&shm_ids(ns).rw_mutex);
  1005. shp = shm_lock(ns, shmid);
  1006. BUG_ON(IS_ERR(shp));
  1007. shp->shm_nattch--;
  1008. if (shm_may_destroy(ns, shp))
  1009. shm_destroy(ns, shp);
  1010. else
  1011. shm_unlock(shp);
  1012. up_write(&shm_ids(ns).rw_mutex);
  1013. return err;
  1014. out_unlock:
  1015. rcu_read_unlock();
  1016. out:
  1017. return err;
  1018. }
  1019. SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg)
  1020. {
  1021. unsigned long ret;
  1022. long err;
  1023. err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA);
  1024. if (err)
  1025. return err;
  1026. force_successful_syscall_return();
  1027. return (long)ret;
  1028. }
  1029. /*
  1030. * detach and kill segment if marked destroyed.
  1031. * The work is done in shm_close.
  1032. */
  1033. SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
  1034. {
  1035. struct mm_struct *mm = current->mm;
  1036. struct vm_area_struct *vma;
  1037. unsigned long addr = (unsigned long)shmaddr;
  1038. int retval = -EINVAL;
  1039. #ifdef CONFIG_MMU
  1040. loff_t size = 0;
  1041. struct vm_area_struct *next;
  1042. #endif
  1043. if (addr & ~PAGE_MASK)
  1044. return retval;
  1045. down_write(&mm->mmap_sem);
  1046. /*
  1047. * This function tries to be smart and unmap shm segments that
  1048. * were modified by partial mlock or munmap calls:
  1049. * - It first determines the size of the shm segment that should be
  1050. * unmapped: It searches for a vma that is backed by shm and that
  1051. * started at address shmaddr. It records it's size and then unmaps
  1052. * it.
  1053. * - Then it unmaps all shm vmas that started at shmaddr and that
  1054. * are within the initially determined size.
  1055. * Errors from do_munmap are ignored: the function only fails if
  1056. * it's called with invalid parameters or if it's called to unmap
  1057. * a part of a vma. Both calls in this function are for full vmas,
  1058. * the parameters are directly copied from the vma itself and always
  1059. * valid - therefore do_munmap cannot fail. (famous last words?)
  1060. */
  1061. /*
  1062. * If it had been mremap()'d, the starting address would not
  1063. * match the usual checks anyway. So assume all vma's are
  1064. * above the starting address given.
  1065. */
  1066. vma = find_vma(mm, addr);
  1067. #ifdef CONFIG_MMU
  1068. while (vma) {
  1069. next = vma->vm_next;
  1070. /*
  1071. * Check if the starting address would match, i.e. it's
  1072. * a fragment created by mprotect() and/or munmap(), or it
  1073. * otherwise it starts at this address with no hassles.
  1074. */
  1075. if ((vma->vm_ops == &shm_vm_ops) &&
  1076. (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
  1077. size = file_inode(vma->vm_file)->i_size;
  1078. do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
  1079. /*
  1080. * We discovered the size of the shm segment, so
  1081. * break out of here and fall through to the next
  1082. * loop that uses the size information to stop
  1083. * searching for matching vma's.
  1084. */
  1085. retval = 0;
  1086. vma = next;
  1087. break;
  1088. }
  1089. vma = next;
  1090. }
  1091. /*
  1092. * We need look no further than the maximum address a fragment
  1093. * could possibly have landed at. Also cast things to loff_t to
  1094. * prevent overflows and make comparisons vs. equal-width types.
  1095. */
  1096. size = PAGE_ALIGN(size);
  1097. while (vma && (loff_t)(vma->vm_end - addr) <= size) {
  1098. next = vma->vm_next;
  1099. /* finding a matching vma now does not alter retval */
  1100. if ((vma->vm_ops == &shm_vm_ops) &&
  1101. (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff)
  1102. do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
  1103. vma = next;
  1104. }
  1105. #else /* CONFIG_MMU */
  1106. /* under NOMMU conditions, the exact address to be destroyed must be
  1107. * given */
  1108. retval = -EINVAL;
  1109. if (vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
  1110. do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
  1111. retval = 0;
  1112. }
  1113. #endif
  1114. up_write(&mm->mmap_sem);
  1115. return retval;
  1116. }
  1117. #ifdef CONFIG_PROC_FS
  1118. static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
  1119. {
  1120. struct user_namespace *user_ns = seq_user_ns(s);
  1121. struct shmid_kernel *shp = it;
  1122. unsigned long rss = 0, swp = 0;
  1123. shm_add_rss_swap(shp, &rss, &swp);
  1124. #if BITS_PER_LONG <= 32
  1125. #define SIZE_SPEC "%10lu"
  1126. #else
  1127. #define SIZE_SPEC "%21lu"
  1128. #endif
  1129. return seq_printf(s,
  1130. "%10d %10d %4o " SIZE_SPEC " %5u %5u "
  1131. "%5lu %5u %5u %5u %5u %10lu %10lu %10lu "
  1132. SIZE_SPEC " " SIZE_SPEC "\n",
  1133. shp->shm_perm.key,
  1134. shp->shm_perm.id,
  1135. shp->shm_perm.mode,
  1136. shp->shm_segsz,
  1137. shp->shm_cprid,
  1138. shp->shm_lprid,
  1139. shp->shm_nattch,
  1140. from_kuid_munged(user_ns, shp->shm_perm.uid),
  1141. from_kgid_munged(user_ns, shp->shm_perm.gid),
  1142. from_kuid_munged(user_ns, shp->shm_perm.cuid),
  1143. from_kgid_munged(user_ns, shp->shm_perm.cgid),
  1144. shp->shm_atim,
  1145. shp->shm_dtim,
  1146. shp->shm_ctim,
  1147. rss * PAGE_SIZE,
  1148. swp * PAGE_SIZE);
  1149. }
  1150. #endif