shm.c 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327
  1. /*
  2. * linux/ipc/shm.c
  3. * Copyright (C) 1992, 1993 Krishna Balasubramanian
  4. * Many improvements/fixes by Bruno Haible.
  5. * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
  6. * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
  7. *
  8. * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
  9. * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
  10. * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
  11. * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
  12. * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
  13. * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
  14. * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
  15. *
  16. * support for audit of ipc object properties and permission changes
  17. * Dustin Kirkland <dustin.kirkland@us.ibm.com>
  18. *
  19. * namespaces support
  20. * OpenVZ, SWsoft Inc.
  21. * Pavel Emelianov <xemul@openvz.org>
  22. *
  23. * Better ipc lock (kern_ipc_perm.lock) handling
  24. * Davidlohr Bueso <davidlohr.bueso@hp.com>, June 2013.
  25. */
  26. #include <linux/slab.h>
  27. #include <linux/mm.h>
  28. #include <linux/hugetlb.h>
  29. #include <linux/shm.h>
  30. #include <linux/init.h>
  31. #include <linux/file.h>
  32. #include <linux/mman.h>
  33. #include <linux/shmem_fs.h>
  34. #include <linux/security.h>
  35. #include <linux/syscalls.h>
  36. #include <linux/audit.h>
  37. #include <linux/capability.h>
  38. #include <linux/ptrace.h>
  39. #include <linux/seq_file.h>
  40. #include <linux/rwsem.h>
  41. #include <linux/nsproxy.h>
  42. #include <linux/mount.h>
  43. #include <linux/ipc_namespace.h>
  44. #include <asm/uaccess.h>
  45. #include "util.h"
  46. struct shm_file_data {
  47. int id;
  48. struct ipc_namespace *ns;
  49. struct file *file;
  50. const struct vm_operations_struct *vm_ops;
  51. };
  52. #define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data))
  53. static const struct file_operations shm_file_operations;
  54. static const struct vm_operations_struct shm_vm_ops;
  55. #define shm_ids(ns) ((ns)->ids[IPC_SHM_IDS])
  56. #define shm_unlock(shp) \
  57. ipc_unlock(&(shp)->shm_perm)
  58. static int newseg(struct ipc_namespace *, struct ipc_params *);
  59. static void shm_open(struct vm_area_struct *vma);
  60. static void shm_close(struct vm_area_struct *vma);
  61. static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
  62. #ifdef CONFIG_PROC_FS
  63. static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
  64. #endif
  65. void shm_init_ns(struct ipc_namespace *ns)
  66. {
  67. ns->shm_ctlmax = SHMMAX;
  68. ns->shm_ctlall = SHMALL;
  69. ns->shm_ctlmni = SHMMNI;
  70. ns->shm_rmid_forced = 0;
  71. ns->shm_tot = 0;
  72. ipc_init_ids(&shm_ids(ns));
  73. }
  74. /*
  75. * Called with shm_ids.rwsem (writer) and the shp structure locked.
  76. * Only shm_ids.rwsem remains locked on exit.
  77. */
  78. static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
  79. {
  80. struct shmid_kernel *shp;
  81. shp = container_of(ipcp, struct shmid_kernel, shm_perm);
  82. if (shp->shm_nattch){
  83. shp->shm_perm.mode |= SHM_DEST;
  84. /* Do not find it any more */
  85. shp->shm_perm.key = IPC_PRIVATE;
  86. shm_unlock(shp);
  87. } else
  88. shm_destroy(ns, shp);
  89. }
  90. #ifdef CONFIG_IPC_NS
  91. void shm_exit_ns(struct ipc_namespace *ns)
  92. {
  93. free_ipcs(ns, &shm_ids(ns), do_shm_rmid);
  94. idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr);
  95. }
  96. #endif
  97. static int __init ipc_ns_init(void)
  98. {
  99. shm_init_ns(&init_ipc_ns);
  100. return 0;
  101. }
  102. pure_initcall(ipc_ns_init);
  103. void __init shm_init (void)
  104. {
  105. ipc_init_proc_interface("sysvipc/shm",
  106. #if BITS_PER_LONG <= 32
  107. " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n",
  108. #else
  109. " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n",
  110. #endif
  111. IPC_SHM_IDS, sysvipc_shm_proc_show);
  112. }
  113. static inline struct shmid_kernel *shm_obtain_object(struct ipc_namespace *ns, int id)
  114. {
  115. struct kern_ipc_perm *ipcp = ipc_obtain_object(&shm_ids(ns), id);
  116. if (IS_ERR(ipcp))
  117. return ERR_CAST(ipcp);
  118. return container_of(ipcp, struct shmid_kernel, shm_perm);
  119. }
  120. static inline struct shmid_kernel *shm_obtain_object_check(struct ipc_namespace *ns, int id)
  121. {
  122. struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&shm_ids(ns), id);
  123. if (IS_ERR(ipcp))
  124. return ERR_CAST(ipcp);
  125. return container_of(ipcp, struct shmid_kernel, shm_perm);
  126. }
  127. /*
  128. * shm_lock_(check_) routines are called in the paths where the rwsem
  129. * is not necessarily held.
  130. */
  131. static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
  132. {
  133. struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id);
  134. if (IS_ERR(ipcp))
  135. return (struct shmid_kernel *)ipcp;
  136. return container_of(ipcp, struct shmid_kernel, shm_perm);
  137. }
  138. static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
  139. {
  140. rcu_read_lock();
  141. ipc_lock_object(&ipcp->shm_perm);
  142. }
  143. static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
  144. {
  145. ipc_rmid(&shm_ids(ns), &s->shm_perm);
  146. }
  147. /* This is called by fork, once for every shm attach. */
  148. static void shm_open(struct vm_area_struct *vma)
  149. {
  150. struct file *file = vma->vm_file;
  151. struct shm_file_data *sfd = shm_file_data(file);
  152. struct shmid_kernel *shp;
  153. shp = shm_lock(sfd->ns, sfd->id);
  154. BUG_ON(IS_ERR(shp));
  155. shp->shm_atim = get_seconds();
  156. shp->shm_lprid = task_tgid_vnr(current);
  157. shp->shm_nattch++;
  158. shm_unlock(shp);
  159. }
  160. /*
  161. * shm_destroy - free the struct shmid_kernel
  162. *
  163. * @ns: namespace
  164. * @shp: struct to free
  165. *
  166. * It has to be called with shp and shm_ids.rwsem (writer) locked,
  167. * but returns with shp unlocked and freed.
  168. */
  169. static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
  170. {
  171. ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
  172. shm_rmid(ns, shp);
  173. shm_unlock(shp);
  174. if (!is_file_hugepages(shp->shm_file))
  175. shmem_lock(shp->shm_file, 0, shp->mlock_user);
  176. else if (shp->mlock_user)
  177. user_shm_unlock(file_inode(shp->shm_file)->i_size,
  178. shp->mlock_user);
  179. fput (shp->shm_file);
  180. security_shm_free(shp);
  181. ipc_rcu_putref(shp);
  182. }
  183. /*
  184. * shm_may_destroy - identifies whether shm segment should be destroyed now
  185. *
  186. * Returns true if and only if there are no active users of the segment and
  187. * one of the following is true:
  188. *
  189. * 1) shmctl(id, IPC_RMID, NULL) was called for this shp
  190. *
  191. * 2) sysctl kernel.shm_rmid_forced is set to 1.
  192. */
  193. static bool shm_may_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
  194. {
  195. return (shp->shm_nattch == 0) &&
  196. (ns->shm_rmid_forced ||
  197. (shp->shm_perm.mode & SHM_DEST));
  198. }
  199. /*
  200. * remove the attach descriptor vma.
  201. * free memory for segment if it is marked destroyed.
  202. * The descriptor has already been removed from the current->mm->mmap list
  203. * and will later be kfree()d.
  204. */
  205. static void shm_close(struct vm_area_struct *vma)
  206. {
  207. struct file * file = vma->vm_file;
  208. struct shm_file_data *sfd = shm_file_data(file);
  209. struct shmid_kernel *shp;
  210. struct ipc_namespace *ns = sfd->ns;
  211. down_write(&shm_ids(ns).rwsem);
  212. /* remove from the list of attaches of the shm segment */
  213. shp = shm_lock(ns, sfd->id);
  214. BUG_ON(IS_ERR(shp));
  215. shp->shm_lprid = task_tgid_vnr(current);
  216. shp->shm_dtim = get_seconds();
  217. shp->shm_nattch--;
  218. if (shm_may_destroy(ns, shp))
  219. shm_destroy(ns, shp);
  220. else
  221. shm_unlock(shp);
  222. up_write(&shm_ids(ns).rwsem);
  223. }
  224. /* Called with ns->shm_ids(ns).rwsem locked */
  225. static int shm_try_destroy_current(int id, void *p, void *data)
  226. {
  227. struct ipc_namespace *ns = data;
  228. struct kern_ipc_perm *ipcp = p;
  229. struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm);
  230. if (shp->shm_creator != current)
  231. return 0;
  232. /*
  233. * Mark it as orphaned to destroy the segment when
  234. * kernel.shm_rmid_forced is changed.
  235. * It is noop if the following shm_may_destroy() returns true.
  236. */
  237. shp->shm_creator = NULL;
  238. /*
  239. * Don't even try to destroy it. If shm_rmid_forced=0 and IPC_RMID
  240. * is not set, it shouldn't be deleted here.
  241. */
  242. if (!ns->shm_rmid_forced)
  243. return 0;
  244. if (shm_may_destroy(ns, shp)) {
  245. shm_lock_by_ptr(shp);
  246. shm_destroy(ns, shp);
  247. }
  248. return 0;
  249. }
  250. /* Called with ns->shm_ids(ns).rwsem locked */
  251. static int shm_try_destroy_orphaned(int id, void *p, void *data)
  252. {
  253. struct ipc_namespace *ns = data;
  254. struct kern_ipc_perm *ipcp = p;
  255. struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm);
  256. /*
  257. * We want to destroy segments without users and with already
  258. * exit'ed originating process.
  259. *
  260. * As shp->* are changed under rwsem, it's safe to skip shp locking.
  261. */
  262. if (shp->shm_creator != NULL)
  263. return 0;
  264. if (shm_may_destroy(ns, shp)) {
  265. shm_lock_by_ptr(shp);
  266. shm_destroy(ns, shp);
  267. }
  268. return 0;
  269. }
  270. void shm_destroy_orphaned(struct ipc_namespace *ns)
  271. {
  272. down_write(&shm_ids(ns).rwsem);
  273. if (shm_ids(ns).in_use)
  274. idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns);
  275. up_write(&shm_ids(ns).rwsem);
  276. }
  277. void exit_shm(struct task_struct *task)
  278. {
  279. struct ipc_namespace *ns = task->nsproxy->ipc_ns;
  280. if (shm_ids(ns).in_use == 0)
  281. return;
  282. /* Destroy all already created segments, but not mapped yet */
  283. down_write(&shm_ids(ns).rwsem);
  284. if (shm_ids(ns).in_use)
  285. idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_current, ns);
  286. up_write(&shm_ids(ns).rwsem);
  287. }
  288. static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  289. {
  290. struct file *file = vma->vm_file;
  291. struct shm_file_data *sfd = shm_file_data(file);
  292. return sfd->vm_ops->fault(vma, vmf);
  293. }
  294. #ifdef CONFIG_NUMA
  295. static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
  296. {
  297. struct file *file = vma->vm_file;
  298. struct shm_file_data *sfd = shm_file_data(file);
  299. int err = 0;
  300. if (sfd->vm_ops->set_policy)
  301. err = sfd->vm_ops->set_policy(vma, new);
  302. return err;
  303. }
  304. static struct mempolicy *shm_get_policy(struct vm_area_struct *vma,
  305. unsigned long addr)
  306. {
  307. struct file *file = vma->vm_file;
  308. struct shm_file_data *sfd = shm_file_data(file);
  309. struct mempolicy *pol = NULL;
  310. if (sfd->vm_ops->get_policy)
  311. pol = sfd->vm_ops->get_policy(vma, addr);
  312. else if (vma->vm_policy)
  313. pol = vma->vm_policy;
  314. return pol;
  315. }
  316. #endif
  317. static int shm_mmap(struct file * file, struct vm_area_struct * vma)
  318. {
  319. struct shm_file_data *sfd = shm_file_data(file);
  320. int ret;
  321. ret = sfd->file->f_op->mmap(sfd->file, vma);
  322. if (ret != 0)
  323. return ret;
  324. sfd->vm_ops = vma->vm_ops;
  325. #ifdef CONFIG_MMU
  326. BUG_ON(!sfd->vm_ops->fault);
  327. #endif
  328. vma->vm_ops = &shm_vm_ops;
  329. shm_open(vma);
  330. return ret;
  331. }
  332. static int shm_release(struct inode *ino, struct file *file)
  333. {
  334. struct shm_file_data *sfd = shm_file_data(file);
  335. put_ipc_ns(sfd->ns);
  336. shm_file_data(file) = NULL;
  337. kfree(sfd);
  338. return 0;
  339. }
  340. static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync)
  341. {
  342. struct shm_file_data *sfd = shm_file_data(file);
  343. if (!sfd->file->f_op->fsync)
  344. return -EINVAL;
  345. return sfd->file->f_op->fsync(sfd->file, start, end, datasync);
  346. }
  347. static long shm_fallocate(struct file *file, int mode, loff_t offset,
  348. loff_t len)
  349. {
  350. struct shm_file_data *sfd = shm_file_data(file);
  351. if (!sfd->file->f_op->fallocate)
  352. return -EOPNOTSUPP;
  353. return sfd->file->f_op->fallocate(file, mode, offset, len);
  354. }
  355. static unsigned long shm_get_unmapped_area(struct file *file,
  356. unsigned long addr, unsigned long len, unsigned long pgoff,
  357. unsigned long flags)
  358. {
  359. struct shm_file_data *sfd = shm_file_data(file);
  360. return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len,
  361. pgoff, flags);
  362. }
  363. static const struct file_operations shm_file_operations = {
  364. .mmap = shm_mmap,
  365. .fsync = shm_fsync,
  366. .release = shm_release,
  367. #ifndef CONFIG_MMU
  368. .get_unmapped_area = shm_get_unmapped_area,
  369. #endif
  370. .llseek = noop_llseek,
  371. .fallocate = shm_fallocate,
  372. };
  373. static const struct file_operations shm_file_operations_huge = {
  374. .mmap = shm_mmap,
  375. .fsync = shm_fsync,
  376. .release = shm_release,
  377. .get_unmapped_area = shm_get_unmapped_area,
  378. .llseek = noop_llseek,
  379. .fallocate = shm_fallocate,
  380. };
  381. int is_file_shm_hugepages(struct file *file)
  382. {
  383. return file->f_op == &shm_file_operations_huge;
  384. }
  385. static const struct vm_operations_struct shm_vm_ops = {
  386. .open = shm_open, /* callback for a new vm-area open */
  387. .close = shm_close, /* callback for when the vm-area is released */
  388. .fault = shm_fault,
  389. #if defined(CONFIG_NUMA)
  390. .set_policy = shm_set_policy,
  391. .get_policy = shm_get_policy,
  392. #endif
  393. };
  394. /**
  395. * newseg - Create a new shared memory segment
  396. * @ns: namespace
  397. * @params: ptr to the structure that contains key, size and shmflg
  398. *
  399. * Called with shm_ids.rwsem held as a writer.
  400. */
  401. static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
  402. {
  403. key_t key = params->key;
  404. int shmflg = params->flg;
  405. size_t size = params->u.size;
  406. int error;
  407. struct shmid_kernel *shp;
  408. size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
  409. struct file * file;
  410. char name[13];
  411. int id;
  412. vm_flags_t acctflag = 0;
  413. if (size < SHMMIN || size > ns->shm_ctlmax)
  414. return -EINVAL;
  415. if (ns->shm_tot + numpages > ns->shm_ctlall)
  416. return -ENOSPC;
  417. shp = ipc_rcu_alloc(sizeof(*shp));
  418. if (!shp)
  419. return -ENOMEM;
  420. shp->shm_perm.key = key;
  421. shp->shm_perm.mode = (shmflg & S_IRWXUGO);
  422. shp->mlock_user = NULL;
  423. shp->shm_perm.security = NULL;
  424. error = security_shm_alloc(shp);
  425. if (error) {
  426. ipc_rcu_putref(shp);
  427. return error;
  428. }
  429. sprintf (name, "SYSV%08x", key);
  430. if (shmflg & SHM_HUGETLB) {
  431. struct hstate *hs;
  432. size_t hugesize;
  433. hs = hstate_sizelog((shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
  434. if (!hs) {
  435. error = -EINVAL;
  436. goto no_file;
  437. }
  438. hugesize = ALIGN(size, huge_page_size(hs));
  439. /* hugetlb_file_setup applies strict accounting */
  440. if (shmflg & SHM_NORESERVE)
  441. acctflag = VM_NORESERVE;
  442. file = hugetlb_file_setup(name, hugesize, acctflag,
  443. &shp->mlock_user, HUGETLB_SHMFS_INODE,
  444. (shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
  445. } else {
  446. /*
  447. * Do not allow no accounting for OVERCOMMIT_NEVER, even
  448. * if it's asked for.
  449. */
  450. if ((shmflg & SHM_NORESERVE) &&
  451. sysctl_overcommit_memory != OVERCOMMIT_NEVER)
  452. acctflag = VM_NORESERVE;
  453. file = shmem_file_setup(name, size, acctflag);
  454. }
  455. error = PTR_ERR(file);
  456. if (IS_ERR(file))
  457. goto no_file;
  458. id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
  459. if (id < 0) {
  460. error = id;
  461. goto no_id;
  462. }
  463. shp->shm_cprid = task_tgid_vnr(current);
  464. shp->shm_lprid = 0;
  465. shp->shm_atim = shp->shm_dtim = 0;
  466. shp->shm_ctim = get_seconds();
  467. shp->shm_segsz = size;
  468. shp->shm_nattch = 0;
  469. shp->shm_file = file;
  470. shp->shm_creator = current;
  471. /*
  472. * shmid gets reported as "inode#" in /proc/pid/maps.
  473. * proc-ps tools use this. Changing this will break them.
  474. */
  475. file_inode(file)->i_ino = shp->shm_perm.id;
  476. ns->shm_tot += numpages;
  477. error = shp->shm_perm.id;
  478. ipc_unlock_object(&shp->shm_perm);
  479. rcu_read_unlock();
  480. return error;
  481. no_id:
  482. if (is_file_hugepages(file) && shp->mlock_user)
  483. user_shm_unlock(size, shp->mlock_user);
  484. fput(file);
  485. no_file:
  486. security_shm_free(shp);
  487. ipc_rcu_putref(shp);
  488. return error;
  489. }
  490. /*
  491. * Called with shm_ids.rwsem and ipcp locked.
  492. */
  493. static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg)
  494. {
  495. struct shmid_kernel *shp;
  496. shp = container_of(ipcp, struct shmid_kernel, shm_perm);
  497. return security_shm_associate(shp, shmflg);
  498. }
  499. /*
  500. * Called with shm_ids.rwsem and ipcp locked.
  501. */
  502. static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
  503. struct ipc_params *params)
  504. {
  505. struct shmid_kernel *shp;
  506. shp = container_of(ipcp, struct shmid_kernel, shm_perm);
  507. if (shp->shm_segsz < params->u.size)
  508. return -EINVAL;
  509. return 0;
  510. }
  511. SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
  512. {
  513. struct ipc_namespace *ns;
  514. struct ipc_ops shm_ops;
  515. struct ipc_params shm_params;
  516. ns = current->nsproxy->ipc_ns;
  517. shm_ops.getnew = newseg;
  518. shm_ops.associate = shm_security;
  519. shm_ops.more_checks = shm_more_checks;
  520. shm_params.key = key;
  521. shm_params.flg = shmflg;
  522. shm_params.u.size = size;
  523. return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params);
  524. }
  525. static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
  526. {
  527. switch(version) {
  528. case IPC_64:
  529. return copy_to_user(buf, in, sizeof(*in));
  530. case IPC_OLD:
  531. {
  532. struct shmid_ds out;
  533. memset(&out, 0, sizeof(out));
  534. ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
  535. out.shm_segsz = in->shm_segsz;
  536. out.shm_atime = in->shm_atime;
  537. out.shm_dtime = in->shm_dtime;
  538. out.shm_ctime = in->shm_ctime;
  539. out.shm_cpid = in->shm_cpid;
  540. out.shm_lpid = in->shm_lpid;
  541. out.shm_nattch = in->shm_nattch;
  542. return copy_to_user(buf, &out, sizeof(out));
  543. }
  544. default:
  545. return -EINVAL;
  546. }
  547. }
  548. static inline unsigned long
  549. copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version)
  550. {
  551. switch(version) {
  552. case IPC_64:
  553. if (copy_from_user(out, buf, sizeof(*out)))
  554. return -EFAULT;
  555. return 0;
  556. case IPC_OLD:
  557. {
  558. struct shmid_ds tbuf_old;
  559. if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
  560. return -EFAULT;
  561. out->shm_perm.uid = tbuf_old.shm_perm.uid;
  562. out->shm_perm.gid = tbuf_old.shm_perm.gid;
  563. out->shm_perm.mode = tbuf_old.shm_perm.mode;
  564. return 0;
  565. }
  566. default:
  567. return -EINVAL;
  568. }
  569. }
  570. static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
  571. {
  572. switch(version) {
  573. case IPC_64:
  574. return copy_to_user(buf, in, sizeof(*in));
  575. case IPC_OLD:
  576. {
  577. struct shminfo out;
  578. if(in->shmmax > INT_MAX)
  579. out.shmmax = INT_MAX;
  580. else
  581. out.shmmax = (int)in->shmmax;
  582. out.shmmin = in->shmmin;
  583. out.shmmni = in->shmmni;
  584. out.shmseg = in->shmseg;
  585. out.shmall = in->shmall;
  586. return copy_to_user(buf, &out, sizeof(out));
  587. }
  588. default:
  589. return -EINVAL;
  590. }
  591. }
  592. /*
  593. * Calculate and add used RSS and swap pages of a shm.
  594. * Called with shm_ids.rwsem held as a reader
  595. */
  596. static void shm_add_rss_swap(struct shmid_kernel *shp,
  597. unsigned long *rss_add, unsigned long *swp_add)
  598. {
  599. struct inode *inode;
  600. inode = file_inode(shp->shm_file);
  601. if (is_file_hugepages(shp->shm_file)) {
  602. struct address_space *mapping = inode->i_mapping;
  603. struct hstate *h = hstate_file(shp->shm_file);
  604. *rss_add += pages_per_huge_page(h) * mapping->nrpages;
  605. } else {
  606. #ifdef CONFIG_SHMEM
  607. struct shmem_inode_info *info = SHMEM_I(inode);
  608. spin_lock(&info->lock);
  609. *rss_add += inode->i_mapping->nrpages;
  610. *swp_add += info->swapped;
  611. spin_unlock(&info->lock);
  612. #else
  613. *rss_add += inode->i_mapping->nrpages;
  614. #endif
  615. }
  616. }
  617. /*
  618. * Called with shm_ids.rwsem held as a reader
  619. */
  620. static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
  621. unsigned long *swp)
  622. {
  623. int next_id;
  624. int total, in_use;
  625. *rss = 0;
  626. *swp = 0;
  627. in_use = shm_ids(ns).in_use;
  628. for (total = 0, next_id = 0; total < in_use; next_id++) {
  629. struct kern_ipc_perm *ipc;
  630. struct shmid_kernel *shp;
  631. ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id);
  632. if (ipc == NULL)
  633. continue;
  634. shp = container_of(ipc, struct shmid_kernel, shm_perm);
  635. shm_add_rss_swap(shp, rss, swp);
  636. total++;
  637. }
  638. }
  639. /*
  640. * This function handles some shmctl commands which require the rwsem
  641. * to be held in write mode.
  642. * NOTE: no locks must be held, the rwsem is taken inside this function.
  643. */
  644. static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
  645. struct shmid_ds __user *buf, int version)
  646. {
  647. struct kern_ipc_perm *ipcp;
  648. struct shmid64_ds shmid64;
  649. struct shmid_kernel *shp;
  650. int err;
  651. if (cmd == IPC_SET) {
  652. if (copy_shmid_from_user(&shmid64, buf, version))
  653. return -EFAULT;
  654. }
  655. down_write(&shm_ids(ns).rwsem);
  656. rcu_read_lock();
  657. ipcp = ipcctl_pre_down_nolock(ns, &shm_ids(ns), shmid, cmd,
  658. &shmid64.shm_perm, 0);
  659. if (IS_ERR(ipcp)) {
  660. err = PTR_ERR(ipcp);
  661. goto out_unlock1;
  662. }
  663. shp = container_of(ipcp, struct shmid_kernel, shm_perm);
  664. err = security_shm_shmctl(shp, cmd);
  665. if (err)
  666. goto out_unlock1;
  667. switch (cmd) {
  668. case IPC_RMID:
  669. ipc_lock_object(&shp->shm_perm);
  670. /* do_shm_rmid unlocks the ipc object and rcu */
  671. do_shm_rmid(ns, ipcp);
  672. goto out_up;
  673. case IPC_SET:
  674. ipc_lock_object(&shp->shm_perm);
  675. err = ipc_update_perm(&shmid64.shm_perm, ipcp);
  676. if (err)
  677. goto out_unlock0;
  678. shp->shm_ctim = get_seconds();
  679. break;
  680. default:
  681. err = -EINVAL;
  682. goto out_unlock1;
  683. }
  684. out_unlock0:
  685. ipc_unlock_object(&shp->shm_perm);
  686. out_unlock1:
  687. rcu_read_unlock();
  688. out_up:
  689. up_write(&shm_ids(ns).rwsem);
  690. return err;
  691. }
  692. static int shmctl_nolock(struct ipc_namespace *ns, int shmid,
  693. int cmd, int version, void __user *buf)
  694. {
  695. int err;
  696. struct shmid_kernel *shp;
  697. /* preliminary security checks for *_INFO */
  698. if (cmd == IPC_INFO || cmd == SHM_INFO) {
  699. err = security_shm_shmctl(NULL, cmd);
  700. if (err)
  701. return err;
  702. }
  703. switch (cmd) {
  704. case IPC_INFO:
  705. {
  706. struct shminfo64 shminfo;
  707. memset(&shminfo, 0, sizeof(shminfo));
  708. shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni;
  709. shminfo.shmmax = ns->shm_ctlmax;
  710. shminfo.shmall = ns->shm_ctlall;
  711. shminfo.shmmin = SHMMIN;
  712. if(copy_shminfo_to_user (buf, &shminfo, version))
  713. return -EFAULT;
  714. down_read(&shm_ids(ns).rwsem);
  715. err = ipc_get_maxid(&shm_ids(ns));
  716. up_read(&shm_ids(ns).rwsem);
  717. if(err<0)
  718. err = 0;
  719. goto out;
  720. }
  721. case SHM_INFO:
  722. {
  723. struct shm_info shm_info;
  724. memset(&shm_info, 0, sizeof(shm_info));
  725. down_read(&shm_ids(ns).rwsem);
  726. shm_info.used_ids = shm_ids(ns).in_use;
  727. shm_get_stat (ns, &shm_info.shm_rss, &shm_info.shm_swp);
  728. shm_info.shm_tot = ns->shm_tot;
  729. shm_info.swap_attempts = 0;
  730. shm_info.swap_successes = 0;
  731. err = ipc_get_maxid(&shm_ids(ns));
  732. up_read(&shm_ids(ns).rwsem);
  733. if (copy_to_user(buf, &shm_info, sizeof(shm_info))) {
  734. err = -EFAULT;
  735. goto out;
  736. }
  737. err = err < 0 ? 0 : err;
  738. goto out;
  739. }
  740. case SHM_STAT:
  741. case IPC_STAT:
  742. {
  743. struct shmid64_ds tbuf;
  744. int result;
  745. rcu_read_lock();
  746. if (cmd == SHM_STAT) {
  747. shp = shm_obtain_object(ns, shmid);
  748. if (IS_ERR(shp)) {
  749. err = PTR_ERR(shp);
  750. goto out_unlock;
  751. }
  752. result = shp->shm_perm.id;
  753. } else {
  754. shp = shm_obtain_object_check(ns, shmid);
  755. if (IS_ERR(shp)) {
  756. err = PTR_ERR(shp);
  757. goto out_unlock;
  758. }
  759. result = 0;
  760. }
  761. err = -EACCES;
  762. if (ipcperms(ns, &shp->shm_perm, S_IRUGO))
  763. goto out_unlock;
  764. err = security_shm_shmctl(shp, cmd);
  765. if (err)
  766. goto out_unlock;
  767. memset(&tbuf, 0, sizeof(tbuf));
  768. kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm);
  769. tbuf.shm_segsz = shp->shm_segsz;
  770. tbuf.shm_atime = shp->shm_atim;
  771. tbuf.shm_dtime = shp->shm_dtim;
  772. tbuf.shm_ctime = shp->shm_ctim;
  773. tbuf.shm_cpid = shp->shm_cprid;
  774. tbuf.shm_lpid = shp->shm_lprid;
  775. tbuf.shm_nattch = shp->shm_nattch;
  776. rcu_read_unlock();
  777. if (copy_shmid_to_user(buf, &tbuf, version))
  778. err = -EFAULT;
  779. else
  780. err = result;
  781. goto out;
  782. }
  783. default:
  784. return -EINVAL;
  785. }
  786. out_unlock:
  787. rcu_read_unlock();
  788. out:
  789. return err;
  790. }
  791. SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
  792. {
  793. struct shmid_kernel *shp;
  794. int err, version;
  795. struct ipc_namespace *ns;
  796. if (cmd < 0 || shmid < 0)
  797. return -EINVAL;
  798. version = ipc_parse_version(&cmd);
  799. ns = current->nsproxy->ipc_ns;
  800. switch (cmd) {
  801. case IPC_INFO:
  802. case SHM_INFO:
  803. case SHM_STAT:
  804. case IPC_STAT:
  805. return shmctl_nolock(ns, shmid, cmd, version, buf);
  806. case IPC_RMID:
  807. case IPC_SET:
  808. return shmctl_down(ns, shmid, cmd, buf, version);
  809. case SHM_LOCK:
  810. case SHM_UNLOCK:
  811. {
  812. struct file *shm_file;
  813. rcu_read_lock();
  814. shp = shm_obtain_object_check(ns, shmid);
  815. if (IS_ERR(shp)) {
  816. err = PTR_ERR(shp);
  817. goto out_unlock1;
  818. }
  819. audit_ipc_obj(&(shp->shm_perm));
  820. err = security_shm_shmctl(shp, cmd);
  821. if (err)
  822. goto out_unlock1;
  823. ipc_lock_object(&shp->shm_perm);
  824. if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) {
  825. kuid_t euid = current_euid();
  826. err = -EPERM;
  827. if (!uid_eq(euid, shp->shm_perm.uid) &&
  828. !uid_eq(euid, shp->shm_perm.cuid))
  829. goto out_unlock0;
  830. if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK))
  831. goto out_unlock0;
  832. }
  833. shm_file = shp->shm_file;
  834. if (is_file_hugepages(shm_file))
  835. goto out_unlock0;
  836. if (cmd == SHM_LOCK) {
  837. struct user_struct *user = current_user();
  838. err = shmem_lock(shm_file, 1, user);
  839. if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) {
  840. shp->shm_perm.mode |= SHM_LOCKED;
  841. shp->mlock_user = user;
  842. }
  843. goto out_unlock0;
  844. }
  845. /* SHM_UNLOCK */
  846. if (!(shp->shm_perm.mode & SHM_LOCKED))
  847. goto out_unlock0;
  848. shmem_lock(shm_file, 0, shp->mlock_user);
  849. shp->shm_perm.mode &= ~SHM_LOCKED;
  850. shp->mlock_user = NULL;
  851. get_file(shm_file);
  852. ipc_unlock_object(&shp->shm_perm);
  853. rcu_read_unlock();
  854. shmem_unlock_mapping(shm_file->f_mapping);
  855. fput(shm_file);
  856. return err;
  857. }
  858. default:
  859. return -EINVAL;
  860. }
  861. out_unlock0:
  862. ipc_unlock_object(&shp->shm_perm);
  863. out_unlock1:
  864. rcu_read_unlock();
  865. return err;
  866. }
  867. /*
  868. * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
  869. *
  870. * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
  871. * "raddr" thing points to kernel space, and there has to be a wrapper around
  872. * this.
  873. */
  874. long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
  875. unsigned long shmlba)
  876. {
  877. struct shmid_kernel *shp;
  878. unsigned long addr;
  879. unsigned long size;
  880. struct file * file;
  881. int err;
  882. unsigned long flags;
  883. unsigned long prot;
  884. int acc_mode;
  885. struct ipc_namespace *ns;
  886. struct shm_file_data *sfd;
  887. struct path path;
  888. fmode_t f_mode;
  889. unsigned long populate = 0;
  890. err = -EINVAL;
  891. if (shmid < 0)
  892. goto out;
  893. else if ((addr = (ulong)shmaddr)) {
  894. if (addr & (shmlba - 1)) {
  895. if (shmflg & SHM_RND)
  896. addr &= ~(shmlba - 1); /* round down */
  897. else
  898. #ifndef __ARCH_FORCE_SHMLBA
  899. if (addr & ~PAGE_MASK)
  900. #endif
  901. goto out;
  902. }
  903. flags = MAP_SHARED | MAP_FIXED;
  904. } else {
  905. if ((shmflg & SHM_REMAP))
  906. goto out;
  907. flags = MAP_SHARED;
  908. }
  909. if (shmflg & SHM_RDONLY) {
  910. prot = PROT_READ;
  911. acc_mode = S_IRUGO;
  912. f_mode = FMODE_READ;
  913. } else {
  914. prot = PROT_READ | PROT_WRITE;
  915. acc_mode = S_IRUGO | S_IWUGO;
  916. f_mode = FMODE_READ | FMODE_WRITE;
  917. }
  918. if (shmflg & SHM_EXEC) {
  919. prot |= PROT_EXEC;
  920. acc_mode |= S_IXUGO;
  921. }
  922. /*
  923. * We cannot rely on the fs check since SYSV IPC does have an
  924. * additional creator id...
  925. */
  926. ns = current->nsproxy->ipc_ns;
  927. rcu_read_lock();
  928. shp = shm_obtain_object_check(ns, shmid);
  929. if (IS_ERR(shp)) {
  930. err = PTR_ERR(shp);
  931. goto out_unlock;
  932. }
  933. err = -EACCES;
  934. if (ipcperms(ns, &shp->shm_perm, acc_mode))
  935. goto out_unlock;
  936. err = security_shm_shmat(shp, shmaddr, shmflg);
  937. if (err)
  938. goto out_unlock;
  939. ipc_lock_object(&shp->shm_perm);
  940. path = shp->shm_file->f_path;
  941. path_get(&path);
  942. shp->shm_nattch++;
  943. size = i_size_read(path.dentry->d_inode);
  944. ipc_unlock_object(&shp->shm_perm);
  945. rcu_read_unlock();
  946. err = -ENOMEM;
  947. sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
  948. if (!sfd) {
  949. path_put(&path);
  950. goto out_nattch;
  951. }
  952. file = alloc_file(&path, f_mode,
  953. is_file_hugepages(shp->shm_file) ?
  954. &shm_file_operations_huge :
  955. &shm_file_operations);
  956. err = PTR_ERR(file);
  957. if (IS_ERR(file)) {
  958. kfree(sfd);
  959. path_put(&path);
  960. goto out_nattch;
  961. }
  962. file->private_data = sfd;
  963. file->f_mapping = shp->shm_file->f_mapping;
  964. sfd->id = shp->shm_perm.id;
  965. sfd->ns = get_ipc_ns(ns);
  966. sfd->file = shp->shm_file;
  967. sfd->vm_ops = NULL;
  968. err = security_mmap_file(file, prot, flags);
  969. if (err)
  970. goto out_fput;
  971. down_write(&current->mm->mmap_sem);
  972. if (addr && !(shmflg & SHM_REMAP)) {
  973. err = -EINVAL;
  974. if (find_vma_intersection(current->mm, addr, addr + size))
  975. goto invalid;
  976. /*
  977. * If shm segment goes below stack, make sure there is some
  978. * space left for the stack to grow (at least 4 pages).
  979. */
  980. if (addr < current->mm->start_stack &&
  981. addr > current->mm->start_stack - size - PAGE_SIZE * 5)
  982. goto invalid;
  983. }
  984. addr = do_mmap_pgoff(file, addr, size, prot, flags, 0, &populate);
  985. *raddr = addr;
  986. err = 0;
  987. if (IS_ERR_VALUE(addr))
  988. err = (long)addr;
  989. invalid:
  990. up_write(&current->mm->mmap_sem);
  991. if (populate)
  992. mm_populate(addr, populate);
  993. out_fput:
  994. fput(file);
  995. out_nattch:
  996. down_write(&shm_ids(ns).rwsem);
  997. shp = shm_lock(ns, shmid);
  998. BUG_ON(IS_ERR(shp));
  999. shp->shm_nattch--;
  1000. if (shm_may_destroy(ns, shp))
  1001. shm_destroy(ns, shp);
  1002. else
  1003. shm_unlock(shp);
  1004. up_write(&shm_ids(ns).rwsem);
  1005. return err;
  1006. out_unlock:
  1007. rcu_read_unlock();
  1008. out:
  1009. return err;
  1010. }
  1011. SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg)
  1012. {
  1013. unsigned long ret;
  1014. long err;
  1015. err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA);
  1016. if (err)
  1017. return err;
  1018. force_successful_syscall_return();
  1019. return (long)ret;
  1020. }
  1021. /*
  1022. * detach and kill segment if marked destroyed.
  1023. * The work is done in shm_close.
  1024. */
  1025. SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
  1026. {
  1027. struct mm_struct *mm = current->mm;
  1028. struct vm_area_struct *vma;
  1029. unsigned long addr = (unsigned long)shmaddr;
  1030. int retval = -EINVAL;
  1031. #ifdef CONFIG_MMU
  1032. loff_t size = 0;
  1033. struct vm_area_struct *next;
  1034. #endif
  1035. if (addr & ~PAGE_MASK)
  1036. return retval;
  1037. down_write(&mm->mmap_sem);
  1038. /*
  1039. * This function tries to be smart and unmap shm segments that
  1040. * were modified by partial mlock or munmap calls:
  1041. * - It first determines the size of the shm segment that should be
  1042. * unmapped: It searches for a vma that is backed by shm and that
  1043. * started at address shmaddr. It records it's size and then unmaps
  1044. * it.
  1045. * - Then it unmaps all shm vmas that started at shmaddr and that
  1046. * are within the initially determined size.
  1047. * Errors from do_munmap are ignored: the function only fails if
  1048. * it's called with invalid parameters or if it's called to unmap
  1049. * a part of a vma. Both calls in this function are for full vmas,
  1050. * the parameters are directly copied from the vma itself and always
  1051. * valid - therefore do_munmap cannot fail. (famous last words?)
  1052. */
  1053. /*
  1054. * If it had been mremap()'d, the starting address would not
  1055. * match the usual checks anyway. So assume all vma's are
  1056. * above the starting address given.
  1057. */
  1058. vma = find_vma(mm, addr);
  1059. #ifdef CONFIG_MMU
  1060. while (vma) {
  1061. next = vma->vm_next;
  1062. /*
  1063. * Check if the starting address would match, i.e. it's
  1064. * a fragment created by mprotect() and/or munmap(), or it
  1065. * otherwise it starts at this address with no hassles.
  1066. */
  1067. if ((vma->vm_ops == &shm_vm_ops) &&
  1068. (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
  1069. size = file_inode(vma->vm_file)->i_size;
  1070. do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
  1071. /*
  1072. * We discovered the size of the shm segment, so
  1073. * break out of here and fall through to the next
  1074. * loop that uses the size information to stop
  1075. * searching for matching vma's.
  1076. */
  1077. retval = 0;
  1078. vma = next;
  1079. break;
  1080. }
  1081. vma = next;
  1082. }
  1083. /*
  1084. * We need look no further than the maximum address a fragment
  1085. * could possibly have landed at. Also cast things to loff_t to
  1086. * prevent overflows and make comparisons vs. equal-width types.
  1087. */
  1088. size = PAGE_ALIGN(size);
  1089. while (vma && (loff_t)(vma->vm_end - addr) <= size) {
  1090. next = vma->vm_next;
  1091. /* finding a matching vma now does not alter retval */
  1092. if ((vma->vm_ops == &shm_vm_ops) &&
  1093. (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff)
  1094. do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
  1095. vma = next;
  1096. }
  1097. #else /* CONFIG_MMU */
  1098. /* under NOMMU conditions, the exact address to be destroyed must be
  1099. * given */
  1100. if (vma && vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
  1101. do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
  1102. retval = 0;
  1103. }
  1104. #endif
  1105. up_write(&mm->mmap_sem);
  1106. return retval;
  1107. }
  1108. #ifdef CONFIG_PROC_FS
  1109. static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
  1110. {
  1111. struct user_namespace *user_ns = seq_user_ns(s);
  1112. struct shmid_kernel *shp = it;
  1113. unsigned long rss = 0, swp = 0;
  1114. shm_add_rss_swap(shp, &rss, &swp);
  1115. #if BITS_PER_LONG <= 32
  1116. #define SIZE_SPEC "%10lu"
  1117. #else
  1118. #define SIZE_SPEC "%21lu"
  1119. #endif
  1120. return seq_printf(s,
  1121. "%10d %10d %4o " SIZE_SPEC " %5u %5u "
  1122. "%5lu %5u %5u %5u %5u %10lu %10lu %10lu "
  1123. SIZE_SPEC " " SIZE_SPEC "\n",
  1124. shp->shm_perm.key,
  1125. shp->shm_perm.id,
  1126. shp->shm_perm.mode,
  1127. shp->shm_segsz,
  1128. shp->shm_cprid,
  1129. shp->shm_lprid,
  1130. shp->shm_nattch,
  1131. from_kuid_munged(user_ns, shp->shm_perm.uid),
  1132. from_kgid_munged(user_ns, shp->shm_perm.gid),
  1133. from_kuid_munged(user_ns, shp->shm_perm.cuid),
  1134. from_kgid_munged(user_ns, shp->shm_perm.cgid),
  1135. shp->shm_atim,
  1136. shp->shm_dtim,
  1137. shp->shm_ctim,
  1138. rss * PAGE_SIZE,
  1139. swp * PAGE_SIZE);
  1140. }
  1141. #endif