shm.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308
  1. /*
  2. * linux/ipc/shm.c
  3. * Copyright (C) 1992, 1993 Krishna Balasubramanian
  4. * Many improvements/fixes by Bruno Haible.
  5. * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
  6. * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
  7. *
  8. * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
  9. * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
  10. * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
  11. * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
  12. * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
  13. * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
  14. * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
  15. *
  16. * support for audit of ipc object properties and permission changes
  17. * Dustin Kirkland <dustin.kirkland@us.ibm.com>
  18. *
  19. * namespaces support
  20. * OpenVZ, SWsoft Inc.
  21. * Pavel Emelianov <xemul@openvz.org>
  22. */
  23. #include <linux/slab.h>
  24. #include <linux/mm.h>
  25. #include <linux/hugetlb.h>
  26. #include <linux/shm.h>
  27. #include <linux/init.h>
  28. #include <linux/file.h>
  29. #include <linux/mman.h>
  30. #include <linux/shmem_fs.h>
  31. #include <linux/security.h>
  32. #include <linux/syscalls.h>
  33. #include <linux/audit.h>
  34. #include <linux/capability.h>
  35. #include <linux/ptrace.h>
  36. #include <linux/seq_file.h>
  37. #include <linux/rwsem.h>
  38. #include <linux/nsproxy.h>
  39. #include <linux/mount.h>
  40. #include <linux/ipc_namespace.h>
  41. #include <asm/uaccess.h>
  42. #include "util.h"
  43. struct shm_file_data {
  44. int id;
  45. struct ipc_namespace *ns;
  46. struct file *file;
  47. const struct vm_operations_struct *vm_ops;
  48. };
  49. #define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data))
  50. static const struct file_operations shm_file_operations;
  51. static const struct vm_operations_struct shm_vm_ops;
  52. #define shm_ids(ns) ((ns)->ids[IPC_SHM_IDS])
  53. #define shm_unlock(shp) \
  54. ipc_unlock(&(shp)->shm_perm)
  55. static int newseg(struct ipc_namespace *, struct ipc_params *);
  56. static void shm_open(struct vm_area_struct *vma);
  57. static void shm_close(struct vm_area_struct *vma);
  58. static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
  59. #ifdef CONFIG_PROC_FS
  60. static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
  61. #endif
  62. void shm_init_ns(struct ipc_namespace *ns)
  63. {
  64. ns->shm_ctlmax = SHMMAX;
  65. ns->shm_ctlall = SHMALL;
  66. ns->shm_ctlmni = SHMMNI;
  67. ns->shm_rmid_forced = 0;
  68. ns->shm_tot = 0;
  69. ipc_init_ids(&shm_ids(ns));
  70. }
  71. /*
  72. * Called with shm_ids.rw_mutex (writer) and the shp structure locked.
  73. * Only shm_ids.rw_mutex remains locked on exit.
  74. */
  75. static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
  76. {
  77. struct shmid_kernel *shp;
  78. shp = container_of(ipcp, struct shmid_kernel, shm_perm);
  79. if (shp->shm_nattch){
  80. shp->shm_perm.mode |= SHM_DEST;
  81. /* Do not find it any more */
  82. shp->shm_perm.key = IPC_PRIVATE;
  83. shm_unlock(shp);
  84. } else
  85. shm_destroy(ns, shp);
  86. }
  87. #ifdef CONFIG_IPC_NS
  88. void shm_exit_ns(struct ipc_namespace *ns)
  89. {
  90. free_ipcs(ns, &shm_ids(ns), do_shm_rmid);
  91. idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr);
  92. }
  93. #endif
  94. static int __init ipc_ns_init(void)
  95. {
  96. shm_init_ns(&init_ipc_ns);
  97. return 0;
  98. }
  99. pure_initcall(ipc_ns_init);
  100. void __init shm_init (void)
  101. {
  102. ipc_init_proc_interface("sysvipc/shm",
  103. #if BITS_PER_LONG <= 32
  104. " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n",
  105. #else
  106. " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n",
  107. #endif
  108. IPC_SHM_IDS, sysvipc_shm_proc_show);
  109. }
  110. static inline struct shmid_kernel *shm_obtain_object(struct ipc_namespace *ns, int id)
  111. {
  112. struct kern_ipc_perm *ipcp = ipc_obtain_object(&shm_ids(ns), id);
  113. if (IS_ERR(ipcp))
  114. return ERR_CAST(ipcp);
  115. return container_of(ipcp, struct shmid_kernel, shm_perm);
  116. }
  117. static inline struct shmid_kernel *shm_obtain_object_check(struct ipc_namespace *ns, int id)
  118. {
  119. struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&shm_ids(ns), id);
  120. if (IS_ERR(ipcp))
  121. return ERR_CAST(ipcp);
  122. return container_of(ipcp, struct shmid_kernel, shm_perm);
  123. }
  124. /*
  125. * shm_lock_(check_) routines are called in the paths where the rw_mutex
  126. * is not necessarily held.
  127. */
  128. static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
  129. {
  130. struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id);
  131. if (IS_ERR(ipcp))
  132. return (struct shmid_kernel *)ipcp;
  133. return container_of(ipcp, struct shmid_kernel, shm_perm);
  134. }
  135. static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
  136. {
  137. rcu_read_lock();
  138. ipc_lock_object(&ipcp->shm_perm);
  139. }
  140. static inline struct shmid_kernel *shm_lock_check(struct ipc_namespace *ns,
  141. int id)
  142. {
  143. struct kern_ipc_perm *ipcp = ipc_lock_check(&shm_ids(ns), id);
  144. if (IS_ERR(ipcp))
  145. return (struct shmid_kernel *)ipcp;
  146. return container_of(ipcp, struct shmid_kernel, shm_perm);
  147. }
  148. static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
  149. {
  150. ipc_rmid(&shm_ids(ns), &s->shm_perm);
  151. }
  152. /* This is called by fork, once for every shm attach. */
  153. static void shm_open(struct vm_area_struct *vma)
  154. {
  155. struct file *file = vma->vm_file;
  156. struct shm_file_data *sfd = shm_file_data(file);
  157. struct shmid_kernel *shp;
  158. shp = shm_lock(sfd->ns, sfd->id);
  159. BUG_ON(IS_ERR(shp));
  160. shp->shm_atim = get_seconds();
  161. shp->shm_lprid = task_tgid_vnr(current);
  162. shp->shm_nattch++;
  163. shm_unlock(shp);
  164. }
  165. /*
  166. * shm_destroy - free the struct shmid_kernel
  167. *
  168. * @ns: namespace
  169. * @shp: struct to free
  170. *
  171. * It has to be called with shp and shm_ids.rw_mutex (writer) locked,
  172. * but returns with shp unlocked and freed.
  173. */
  174. static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
  175. {
  176. ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
  177. shm_rmid(ns, shp);
  178. shm_unlock(shp);
  179. if (!is_file_hugepages(shp->shm_file))
  180. shmem_lock(shp->shm_file, 0, shp->mlock_user);
  181. else if (shp->mlock_user)
  182. user_shm_unlock(file_inode(shp->shm_file)->i_size,
  183. shp->mlock_user);
  184. fput (shp->shm_file);
  185. security_shm_free(shp);
  186. ipc_rcu_putref(shp);
  187. }
  188. /*
  189. * shm_may_destroy - identifies whether shm segment should be destroyed now
  190. *
  191. * Returns true if and only if there are no active users of the segment and
  192. * one of the following is true:
  193. *
  194. * 1) shmctl(id, IPC_RMID, NULL) was called for this shp
  195. *
  196. * 2) sysctl kernel.shm_rmid_forced is set to 1.
  197. */
  198. static bool shm_may_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
  199. {
  200. return (shp->shm_nattch == 0) &&
  201. (ns->shm_rmid_forced ||
  202. (shp->shm_perm.mode & SHM_DEST));
  203. }
  204. /*
  205. * remove the attach descriptor vma.
  206. * free memory for segment if it is marked destroyed.
  207. * The descriptor has already been removed from the current->mm->mmap list
  208. * and will later be kfree()d.
  209. */
  210. static void shm_close(struct vm_area_struct *vma)
  211. {
  212. struct file * file = vma->vm_file;
  213. struct shm_file_data *sfd = shm_file_data(file);
  214. struct shmid_kernel *shp;
  215. struct ipc_namespace *ns = sfd->ns;
  216. down_write(&shm_ids(ns).rw_mutex);
  217. /* remove from the list of attaches of the shm segment */
  218. shp = shm_lock(ns, sfd->id);
  219. BUG_ON(IS_ERR(shp));
  220. shp->shm_lprid = task_tgid_vnr(current);
  221. shp->shm_dtim = get_seconds();
  222. shp->shm_nattch--;
  223. if (shm_may_destroy(ns, shp))
  224. shm_destroy(ns, shp);
  225. else
  226. shm_unlock(shp);
  227. up_write(&shm_ids(ns).rw_mutex);
  228. }
  229. /* Called with ns->shm_ids(ns).rw_mutex locked */
  230. static int shm_try_destroy_current(int id, void *p, void *data)
  231. {
  232. struct ipc_namespace *ns = data;
  233. struct kern_ipc_perm *ipcp = p;
  234. struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm);
  235. if (shp->shm_creator != current)
  236. return 0;
  237. /*
  238. * Mark it as orphaned to destroy the segment when
  239. * kernel.shm_rmid_forced is changed.
  240. * It is noop if the following shm_may_destroy() returns true.
  241. */
  242. shp->shm_creator = NULL;
  243. /*
  244. * Don't even try to destroy it. If shm_rmid_forced=0 and IPC_RMID
  245. * is not set, it shouldn't be deleted here.
  246. */
  247. if (!ns->shm_rmid_forced)
  248. return 0;
  249. if (shm_may_destroy(ns, shp)) {
  250. shm_lock_by_ptr(shp);
  251. shm_destroy(ns, shp);
  252. }
  253. return 0;
  254. }
  255. /* Called with ns->shm_ids(ns).rw_mutex locked */
  256. static int shm_try_destroy_orphaned(int id, void *p, void *data)
  257. {
  258. struct ipc_namespace *ns = data;
  259. struct kern_ipc_perm *ipcp = p;
  260. struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm);
  261. /*
  262. * We want to destroy segments without users and with already
  263. * exit'ed originating process.
  264. *
  265. * As shp->* are changed under rw_mutex, it's safe to skip shp locking.
  266. */
  267. if (shp->shm_creator != NULL)
  268. return 0;
  269. if (shm_may_destroy(ns, shp)) {
  270. shm_lock_by_ptr(shp);
  271. shm_destroy(ns, shp);
  272. }
  273. return 0;
  274. }
  275. void shm_destroy_orphaned(struct ipc_namespace *ns)
  276. {
  277. down_write(&shm_ids(ns).rw_mutex);
  278. if (shm_ids(ns).in_use)
  279. idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns);
  280. up_write(&shm_ids(ns).rw_mutex);
  281. }
  282. void exit_shm(struct task_struct *task)
  283. {
  284. struct ipc_namespace *ns = task->nsproxy->ipc_ns;
  285. if (shm_ids(ns).in_use == 0)
  286. return;
  287. /* Destroy all already created segments, but not mapped yet */
  288. down_write(&shm_ids(ns).rw_mutex);
  289. if (shm_ids(ns).in_use)
  290. idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_current, ns);
  291. up_write(&shm_ids(ns).rw_mutex);
  292. }
  293. static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  294. {
  295. struct file *file = vma->vm_file;
  296. struct shm_file_data *sfd = shm_file_data(file);
  297. return sfd->vm_ops->fault(vma, vmf);
  298. }
  299. #ifdef CONFIG_NUMA
  300. static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
  301. {
  302. struct file *file = vma->vm_file;
  303. struct shm_file_data *sfd = shm_file_data(file);
  304. int err = 0;
  305. if (sfd->vm_ops->set_policy)
  306. err = sfd->vm_ops->set_policy(vma, new);
  307. return err;
  308. }
  309. static struct mempolicy *shm_get_policy(struct vm_area_struct *vma,
  310. unsigned long addr)
  311. {
  312. struct file *file = vma->vm_file;
  313. struct shm_file_data *sfd = shm_file_data(file);
  314. struct mempolicy *pol = NULL;
  315. if (sfd->vm_ops->get_policy)
  316. pol = sfd->vm_ops->get_policy(vma, addr);
  317. else if (vma->vm_policy)
  318. pol = vma->vm_policy;
  319. return pol;
  320. }
  321. #endif
  322. static int shm_mmap(struct file * file, struct vm_area_struct * vma)
  323. {
  324. struct shm_file_data *sfd = shm_file_data(file);
  325. int ret;
  326. ret = sfd->file->f_op->mmap(sfd->file, vma);
  327. if (ret != 0)
  328. return ret;
  329. sfd->vm_ops = vma->vm_ops;
  330. #ifdef CONFIG_MMU
  331. BUG_ON(!sfd->vm_ops->fault);
  332. #endif
  333. vma->vm_ops = &shm_vm_ops;
  334. shm_open(vma);
  335. return ret;
  336. }
  337. static int shm_release(struct inode *ino, struct file *file)
  338. {
  339. struct shm_file_data *sfd = shm_file_data(file);
  340. put_ipc_ns(sfd->ns);
  341. shm_file_data(file) = NULL;
  342. kfree(sfd);
  343. return 0;
  344. }
  345. static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync)
  346. {
  347. struct shm_file_data *sfd = shm_file_data(file);
  348. if (!sfd->file->f_op->fsync)
  349. return -EINVAL;
  350. return sfd->file->f_op->fsync(sfd->file, start, end, datasync);
  351. }
  352. static long shm_fallocate(struct file *file, int mode, loff_t offset,
  353. loff_t len)
  354. {
  355. struct shm_file_data *sfd = shm_file_data(file);
  356. if (!sfd->file->f_op->fallocate)
  357. return -EOPNOTSUPP;
  358. return sfd->file->f_op->fallocate(file, mode, offset, len);
  359. }
  360. static unsigned long shm_get_unmapped_area(struct file *file,
  361. unsigned long addr, unsigned long len, unsigned long pgoff,
  362. unsigned long flags)
  363. {
  364. struct shm_file_data *sfd = shm_file_data(file);
  365. return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len,
  366. pgoff, flags);
  367. }
  368. static const struct file_operations shm_file_operations = {
  369. .mmap = shm_mmap,
  370. .fsync = shm_fsync,
  371. .release = shm_release,
  372. #ifndef CONFIG_MMU
  373. .get_unmapped_area = shm_get_unmapped_area,
  374. #endif
  375. .llseek = noop_llseek,
  376. .fallocate = shm_fallocate,
  377. };
  378. static const struct file_operations shm_file_operations_huge = {
  379. .mmap = shm_mmap,
  380. .fsync = shm_fsync,
  381. .release = shm_release,
  382. .get_unmapped_area = shm_get_unmapped_area,
  383. .llseek = noop_llseek,
  384. .fallocate = shm_fallocate,
  385. };
  386. int is_file_shm_hugepages(struct file *file)
  387. {
  388. return file->f_op == &shm_file_operations_huge;
  389. }
  390. static const struct vm_operations_struct shm_vm_ops = {
  391. .open = shm_open, /* callback for a new vm-area open */
  392. .close = shm_close, /* callback for when the vm-area is released */
  393. .fault = shm_fault,
  394. #if defined(CONFIG_NUMA)
  395. .set_policy = shm_set_policy,
  396. .get_policy = shm_get_policy,
  397. #endif
  398. };
  399. /**
  400. * newseg - Create a new shared memory segment
  401. * @ns: namespace
  402. * @params: ptr to the structure that contains key, size and shmflg
  403. *
  404. * Called with shm_ids.rw_mutex held as a writer.
  405. */
  406. static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
  407. {
  408. key_t key = params->key;
  409. int shmflg = params->flg;
  410. size_t size = params->u.size;
  411. int error;
  412. struct shmid_kernel *shp;
  413. size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
  414. struct file * file;
  415. char name[13];
  416. int id;
  417. vm_flags_t acctflag = 0;
  418. if (size < SHMMIN || size > ns->shm_ctlmax)
  419. return -EINVAL;
  420. if (ns->shm_tot + numpages > ns->shm_ctlall)
  421. return -ENOSPC;
  422. shp = ipc_rcu_alloc(sizeof(*shp));
  423. if (!shp)
  424. return -ENOMEM;
  425. shp->shm_perm.key = key;
  426. shp->shm_perm.mode = (shmflg & S_IRWXUGO);
  427. shp->mlock_user = NULL;
  428. shp->shm_perm.security = NULL;
  429. error = security_shm_alloc(shp);
  430. if (error) {
  431. ipc_rcu_putref(shp);
  432. return error;
  433. }
  434. sprintf (name, "SYSV%08x", key);
  435. if (shmflg & SHM_HUGETLB) {
  436. struct hstate *hs;
  437. size_t hugesize;
  438. hs = hstate_sizelog((shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
  439. if (!hs) {
  440. error = -EINVAL;
  441. goto no_file;
  442. }
  443. hugesize = ALIGN(size, huge_page_size(hs));
  444. /* hugetlb_file_setup applies strict accounting */
  445. if (shmflg & SHM_NORESERVE)
  446. acctflag = VM_NORESERVE;
  447. file = hugetlb_file_setup(name, hugesize, acctflag,
  448. &shp->mlock_user, HUGETLB_SHMFS_INODE,
  449. (shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
  450. } else {
  451. /*
  452. * Do not allow no accounting for OVERCOMMIT_NEVER, even
  453. * if it's asked for.
  454. */
  455. if ((shmflg & SHM_NORESERVE) &&
  456. sysctl_overcommit_memory != OVERCOMMIT_NEVER)
  457. acctflag = VM_NORESERVE;
  458. file = shmem_file_setup(name, size, acctflag);
  459. }
  460. error = PTR_ERR(file);
  461. if (IS_ERR(file))
  462. goto no_file;
  463. id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
  464. if (id < 0) {
  465. error = id;
  466. goto no_id;
  467. }
  468. shp->shm_cprid = task_tgid_vnr(current);
  469. shp->shm_lprid = 0;
  470. shp->shm_atim = shp->shm_dtim = 0;
  471. shp->shm_ctim = get_seconds();
  472. shp->shm_segsz = size;
  473. shp->shm_nattch = 0;
  474. shp->shm_file = file;
  475. shp->shm_creator = current;
  476. /*
  477. * shmid gets reported as "inode#" in /proc/pid/maps.
  478. * proc-ps tools use this. Changing this will break them.
  479. */
  480. file_inode(file)->i_ino = shp->shm_perm.id;
  481. ns->shm_tot += numpages;
  482. error = shp->shm_perm.id;
  483. ipc_unlock_object(&shp->shm_perm);
  484. rcu_read_unlock();
  485. return error;
  486. no_id:
  487. if (is_file_hugepages(file) && shp->mlock_user)
  488. user_shm_unlock(size, shp->mlock_user);
  489. fput(file);
  490. no_file:
  491. security_shm_free(shp);
  492. ipc_rcu_putref(shp);
  493. return error;
  494. }
  495. /*
  496. * Called with shm_ids.rw_mutex and ipcp locked.
  497. */
  498. static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg)
  499. {
  500. struct shmid_kernel *shp;
  501. shp = container_of(ipcp, struct shmid_kernel, shm_perm);
  502. return security_shm_associate(shp, shmflg);
  503. }
  504. /*
  505. * Called with shm_ids.rw_mutex and ipcp locked.
  506. */
  507. static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
  508. struct ipc_params *params)
  509. {
  510. struct shmid_kernel *shp;
  511. shp = container_of(ipcp, struct shmid_kernel, shm_perm);
  512. if (shp->shm_segsz < params->u.size)
  513. return -EINVAL;
  514. return 0;
  515. }
  516. SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
  517. {
  518. struct ipc_namespace *ns;
  519. struct ipc_ops shm_ops;
  520. struct ipc_params shm_params;
  521. ns = current->nsproxy->ipc_ns;
  522. shm_ops.getnew = newseg;
  523. shm_ops.associate = shm_security;
  524. shm_ops.more_checks = shm_more_checks;
  525. shm_params.key = key;
  526. shm_params.flg = shmflg;
  527. shm_params.u.size = size;
  528. return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params);
  529. }
  530. static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
  531. {
  532. switch(version) {
  533. case IPC_64:
  534. return copy_to_user(buf, in, sizeof(*in));
  535. case IPC_OLD:
  536. {
  537. struct shmid_ds out;
  538. memset(&out, 0, sizeof(out));
  539. ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
  540. out.shm_segsz = in->shm_segsz;
  541. out.shm_atime = in->shm_atime;
  542. out.shm_dtime = in->shm_dtime;
  543. out.shm_ctime = in->shm_ctime;
  544. out.shm_cpid = in->shm_cpid;
  545. out.shm_lpid = in->shm_lpid;
  546. out.shm_nattch = in->shm_nattch;
  547. return copy_to_user(buf, &out, sizeof(out));
  548. }
  549. default:
  550. return -EINVAL;
  551. }
  552. }
  553. static inline unsigned long
  554. copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version)
  555. {
  556. switch(version) {
  557. case IPC_64:
  558. if (copy_from_user(out, buf, sizeof(*out)))
  559. return -EFAULT;
  560. return 0;
  561. case IPC_OLD:
  562. {
  563. struct shmid_ds tbuf_old;
  564. if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
  565. return -EFAULT;
  566. out->shm_perm.uid = tbuf_old.shm_perm.uid;
  567. out->shm_perm.gid = tbuf_old.shm_perm.gid;
  568. out->shm_perm.mode = tbuf_old.shm_perm.mode;
  569. return 0;
  570. }
  571. default:
  572. return -EINVAL;
  573. }
  574. }
  575. static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
  576. {
  577. switch(version) {
  578. case IPC_64:
  579. return copy_to_user(buf, in, sizeof(*in));
  580. case IPC_OLD:
  581. {
  582. struct shminfo out;
  583. if(in->shmmax > INT_MAX)
  584. out.shmmax = INT_MAX;
  585. else
  586. out.shmmax = (int)in->shmmax;
  587. out.shmmin = in->shmmin;
  588. out.shmmni = in->shmmni;
  589. out.shmseg = in->shmseg;
  590. out.shmall = in->shmall;
  591. return copy_to_user(buf, &out, sizeof(out));
  592. }
  593. default:
  594. return -EINVAL;
  595. }
  596. }
  597. /*
  598. * Calculate and add used RSS and swap pages of a shm.
  599. * Called with shm_ids.rw_mutex held as a reader
  600. */
  601. static void shm_add_rss_swap(struct shmid_kernel *shp,
  602. unsigned long *rss_add, unsigned long *swp_add)
  603. {
  604. struct inode *inode;
  605. inode = file_inode(shp->shm_file);
  606. if (is_file_hugepages(shp->shm_file)) {
  607. struct address_space *mapping = inode->i_mapping;
  608. struct hstate *h = hstate_file(shp->shm_file);
  609. *rss_add += pages_per_huge_page(h) * mapping->nrpages;
  610. } else {
  611. #ifdef CONFIG_SHMEM
  612. struct shmem_inode_info *info = SHMEM_I(inode);
  613. spin_lock(&info->lock);
  614. *rss_add += inode->i_mapping->nrpages;
  615. *swp_add += info->swapped;
  616. spin_unlock(&info->lock);
  617. #else
  618. *rss_add += inode->i_mapping->nrpages;
  619. #endif
  620. }
  621. }
  622. /*
  623. * Called with shm_ids.rw_mutex held as a reader
  624. */
  625. static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
  626. unsigned long *swp)
  627. {
  628. int next_id;
  629. int total, in_use;
  630. *rss = 0;
  631. *swp = 0;
  632. in_use = shm_ids(ns).in_use;
  633. for (total = 0, next_id = 0; total < in_use; next_id++) {
  634. struct kern_ipc_perm *ipc;
  635. struct shmid_kernel *shp;
  636. ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id);
  637. if (ipc == NULL)
  638. continue;
  639. shp = container_of(ipc, struct shmid_kernel, shm_perm);
  640. shm_add_rss_swap(shp, rss, swp);
  641. total++;
  642. }
  643. }
  644. /*
  645. * This function handles some shmctl commands which require the rw_mutex
  646. * to be held in write mode.
  647. * NOTE: no locks must be held, the rw_mutex is taken inside this function.
  648. */
  649. static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
  650. struct shmid_ds __user *buf, int version)
  651. {
  652. struct kern_ipc_perm *ipcp;
  653. struct shmid64_ds shmid64;
  654. struct shmid_kernel *shp;
  655. int err;
  656. if (cmd == IPC_SET) {
  657. if (copy_shmid_from_user(&shmid64, buf, version))
  658. return -EFAULT;
  659. }
  660. down_write(&shm_ids(ns).rw_mutex);
  661. rcu_read_lock();
  662. ipcp = ipcctl_pre_down_nolock(ns, &shm_ids(ns), shmid, cmd,
  663. &shmid64.shm_perm, 0);
  664. if (IS_ERR(ipcp)) {
  665. err = PTR_ERR(ipcp);
  666. goto out_unlock1;
  667. }
  668. shp = container_of(ipcp, struct shmid_kernel, shm_perm);
  669. err = security_shm_shmctl(shp, cmd);
  670. if (err)
  671. goto out_unlock1;
  672. switch (cmd) {
  673. case IPC_RMID:
  674. ipc_lock_object(&shp->shm_perm);
  675. /* do_shm_rmid unlocks the ipc object and rcu */
  676. do_shm_rmid(ns, ipcp);
  677. goto out_up;
  678. case IPC_SET:
  679. ipc_lock_object(&shp->shm_perm);
  680. err = ipc_update_perm(&shmid64.shm_perm, ipcp);
  681. if (err)
  682. goto out_unlock0;
  683. shp->shm_ctim = get_seconds();
  684. break;
  685. default:
  686. err = -EINVAL;
  687. goto out_unlock1;
  688. }
  689. out_unlock0:
  690. ipc_unlock_object(&shp->shm_perm);
  691. out_unlock1:
  692. rcu_read_unlock();
  693. out_up:
  694. up_write(&shm_ids(ns).rw_mutex);
  695. return err;
  696. }
  697. SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
  698. {
  699. struct shmid_kernel *shp;
  700. int err, version;
  701. struct ipc_namespace *ns;
  702. if (cmd < 0 || shmid < 0) {
  703. err = -EINVAL;
  704. goto out;
  705. }
  706. version = ipc_parse_version(&cmd);
  707. ns = current->nsproxy->ipc_ns;
  708. switch (cmd) { /* replace with proc interface ? */
  709. case IPC_INFO:
  710. {
  711. struct shminfo64 shminfo;
  712. err = security_shm_shmctl(NULL, cmd);
  713. if (err)
  714. return err;
  715. memset(&shminfo, 0, sizeof(shminfo));
  716. shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni;
  717. shminfo.shmmax = ns->shm_ctlmax;
  718. shminfo.shmall = ns->shm_ctlall;
  719. shminfo.shmmin = SHMMIN;
  720. if(copy_shminfo_to_user (buf, &shminfo, version))
  721. return -EFAULT;
  722. down_read(&shm_ids(ns).rw_mutex);
  723. err = ipc_get_maxid(&shm_ids(ns));
  724. up_read(&shm_ids(ns).rw_mutex);
  725. if(err<0)
  726. err = 0;
  727. goto out;
  728. }
  729. case SHM_INFO:
  730. {
  731. struct shm_info shm_info;
  732. err = security_shm_shmctl(NULL, cmd);
  733. if (err)
  734. return err;
  735. memset(&shm_info, 0, sizeof(shm_info));
  736. down_read(&shm_ids(ns).rw_mutex);
  737. shm_info.used_ids = shm_ids(ns).in_use;
  738. shm_get_stat (ns, &shm_info.shm_rss, &shm_info.shm_swp);
  739. shm_info.shm_tot = ns->shm_tot;
  740. shm_info.swap_attempts = 0;
  741. shm_info.swap_successes = 0;
  742. err = ipc_get_maxid(&shm_ids(ns));
  743. up_read(&shm_ids(ns).rw_mutex);
  744. if (copy_to_user(buf, &shm_info, sizeof(shm_info))) {
  745. err = -EFAULT;
  746. goto out;
  747. }
  748. err = err < 0 ? 0 : err;
  749. goto out;
  750. }
  751. case SHM_STAT:
  752. case IPC_STAT:
  753. {
  754. struct shmid64_ds tbuf;
  755. int result;
  756. if (cmd == SHM_STAT) {
  757. shp = shm_lock(ns, shmid);
  758. if (IS_ERR(shp)) {
  759. err = PTR_ERR(shp);
  760. goto out;
  761. }
  762. result = shp->shm_perm.id;
  763. } else {
  764. shp = shm_lock_check(ns, shmid);
  765. if (IS_ERR(shp)) {
  766. err = PTR_ERR(shp);
  767. goto out;
  768. }
  769. result = 0;
  770. }
  771. err = -EACCES;
  772. if (ipcperms(ns, &shp->shm_perm, S_IRUGO))
  773. goto out_unlock;
  774. err = security_shm_shmctl(shp, cmd);
  775. if (err)
  776. goto out_unlock;
  777. memset(&tbuf, 0, sizeof(tbuf));
  778. kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm);
  779. tbuf.shm_segsz = shp->shm_segsz;
  780. tbuf.shm_atime = shp->shm_atim;
  781. tbuf.shm_dtime = shp->shm_dtim;
  782. tbuf.shm_ctime = shp->shm_ctim;
  783. tbuf.shm_cpid = shp->shm_cprid;
  784. tbuf.shm_lpid = shp->shm_lprid;
  785. tbuf.shm_nattch = shp->shm_nattch;
  786. shm_unlock(shp);
  787. if(copy_shmid_to_user (buf, &tbuf, version))
  788. err = -EFAULT;
  789. else
  790. err = result;
  791. goto out;
  792. }
  793. case SHM_LOCK:
  794. case SHM_UNLOCK:
  795. {
  796. struct file *shm_file;
  797. shp = shm_lock_check(ns, shmid);
  798. if (IS_ERR(shp)) {
  799. err = PTR_ERR(shp);
  800. goto out;
  801. }
  802. audit_ipc_obj(&(shp->shm_perm));
  803. if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) {
  804. kuid_t euid = current_euid();
  805. err = -EPERM;
  806. if (!uid_eq(euid, shp->shm_perm.uid) &&
  807. !uid_eq(euid, shp->shm_perm.cuid))
  808. goto out_unlock;
  809. if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK))
  810. goto out_unlock;
  811. }
  812. err = security_shm_shmctl(shp, cmd);
  813. if (err)
  814. goto out_unlock;
  815. shm_file = shp->shm_file;
  816. if (is_file_hugepages(shm_file))
  817. goto out_unlock;
  818. if (cmd == SHM_LOCK) {
  819. struct user_struct *user = current_user();
  820. err = shmem_lock(shm_file, 1, user);
  821. if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) {
  822. shp->shm_perm.mode |= SHM_LOCKED;
  823. shp->mlock_user = user;
  824. }
  825. goto out_unlock;
  826. }
  827. /* SHM_UNLOCK */
  828. if (!(shp->shm_perm.mode & SHM_LOCKED))
  829. goto out_unlock;
  830. shmem_lock(shm_file, 0, shp->mlock_user);
  831. shp->shm_perm.mode &= ~SHM_LOCKED;
  832. shp->mlock_user = NULL;
  833. get_file(shm_file);
  834. shm_unlock(shp);
  835. shmem_unlock_mapping(shm_file->f_mapping);
  836. fput(shm_file);
  837. goto out;
  838. }
  839. case IPC_RMID:
  840. case IPC_SET:
  841. err = shmctl_down(ns, shmid, cmd, buf, version);
  842. return err;
  843. default:
  844. return -EINVAL;
  845. }
  846. out_unlock:
  847. shm_unlock(shp);
  848. out:
  849. return err;
  850. }
  851. /*
  852. * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
  853. *
  854. * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
  855. * "raddr" thing points to kernel space, and there has to be a wrapper around
  856. * this.
  857. */
  858. long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
  859. unsigned long shmlba)
  860. {
  861. struct shmid_kernel *shp;
  862. unsigned long addr;
  863. unsigned long size;
  864. struct file * file;
  865. int err;
  866. unsigned long flags;
  867. unsigned long prot;
  868. int acc_mode;
  869. struct ipc_namespace *ns;
  870. struct shm_file_data *sfd;
  871. struct path path;
  872. fmode_t f_mode;
  873. unsigned long populate = 0;
  874. err = -EINVAL;
  875. if (shmid < 0)
  876. goto out;
  877. else if ((addr = (ulong)shmaddr)) {
  878. if (addr & (shmlba - 1)) {
  879. if (shmflg & SHM_RND)
  880. addr &= ~(shmlba - 1); /* round down */
  881. else
  882. #ifndef __ARCH_FORCE_SHMLBA
  883. if (addr & ~PAGE_MASK)
  884. #endif
  885. goto out;
  886. }
  887. flags = MAP_SHARED | MAP_FIXED;
  888. } else {
  889. if ((shmflg & SHM_REMAP))
  890. goto out;
  891. flags = MAP_SHARED;
  892. }
  893. if (shmflg & SHM_RDONLY) {
  894. prot = PROT_READ;
  895. acc_mode = S_IRUGO;
  896. f_mode = FMODE_READ;
  897. } else {
  898. prot = PROT_READ | PROT_WRITE;
  899. acc_mode = S_IRUGO | S_IWUGO;
  900. f_mode = FMODE_READ | FMODE_WRITE;
  901. }
  902. if (shmflg & SHM_EXEC) {
  903. prot |= PROT_EXEC;
  904. acc_mode |= S_IXUGO;
  905. }
  906. /*
  907. * We cannot rely on the fs check since SYSV IPC does have an
  908. * additional creator id...
  909. */
  910. ns = current->nsproxy->ipc_ns;
  911. shp = shm_lock_check(ns, shmid);
  912. if (IS_ERR(shp)) {
  913. err = PTR_ERR(shp);
  914. goto out;
  915. }
  916. err = -EACCES;
  917. if (ipcperms(ns, &shp->shm_perm, acc_mode))
  918. goto out_unlock;
  919. err = security_shm_shmat(shp, shmaddr, shmflg);
  920. if (err)
  921. goto out_unlock;
  922. path = shp->shm_file->f_path;
  923. path_get(&path);
  924. shp->shm_nattch++;
  925. size = i_size_read(path.dentry->d_inode);
  926. shm_unlock(shp);
  927. err = -ENOMEM;
  928. sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
  929. if (!sfd)
  930. goto out_put_dentry;
  931. file = alloc_file(&path, f_mode,
  932. is_file_hugepages(shp->shm_file) ?
  933. &shm_file_operations_huge :
  934. &shm_file_operations);
  935. err = PTR_ERR(file);
  936. if (IS_ERR(file))
  937. goto out_free;
  938. file->private_data = sfd;
  939. file->f_mapping = shp->shm_file->f_mapping;
  940. sfd->id = shp->shm_perm.id;
  941. sfd->ns = get_ipc_ns(ns);
  942. sfd->file = shp->shm_file;
  943. sfd->vm_ops = NULL;
  944. err = security_mmap_file(file, prot, flags);
  945. if (err)
  946. goto out_fput;
  947. down_write(&current->mm->mmap_sem);
  948. if (addr && !(shmflg & SHM_REMAP)) {
  949. err = -EINVAL;
  950. if (find_vma_intersection(current->mm, addr, addr + size))
  951. goto invalid;
  952. /*
  953. * If shm segment goes below stack, make sure there is some
  954. * space left for the stack to grow (at least 4 pages).
  955. */
  956. if (addr < current->mm->start_stack &&
  957. addr > current->mm->start_stack - size - PAGE_SIZE * 5)
  958. goto invalid;
  959. }
  960. addr = do_mmap_pgoff(file, addr, size, prot, flags, 0, &populate);
  961. *raddr = addr;
  962. err = 0;
  963. if (IS_ERR_VALUE(addr))
  964. err = (long)addr;
  965. invalid:
  966. up_write(&current->mm->mmap_sem);
  967. if (populate)
  968. mm_populate(addr, populate);
  969. out_fput:
  970. fput(file);
  971. out_nattch:
  972. down_write(&shm_ids(ns).rw_mutex);
  973. shp = shm_lock(ns, shmid);
  974. BUG_ON(IS_ERR(shp));
  975. shp->shm_nattch--;
  976. if (shm_may_destroy(ns, shp))
  977. shm_destroy(ns, shp);
  978. else
  979. shm_unlock(shp);
  980. up_write(&shm_ids(ns).rw_mutex);
  981. out:
  982. return err;
  983. out_unlock:
  984. shm_unlock(shp);
  985. goto out;
  986. out_free:
  987. kfree(sfd);
  988. out_put_dentry:
  989. path_put(&path);
  990. goto out_nattch;
  991. }
  992. SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg)
  993. {
  994. unsigned long ret;
  995. long err;
  996. err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA);
  997. if (err)
  998. return err;
  999. force_successful_syscall_return();
  1000. return (long)ret;
  1001. }
  1002. /*
  1003. * detach and kill segment if marked destroyed.
  1004. * The work is done in shm_close.
  1005. */
  1006. SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
  1007. {
  1008. struct mm_struct *mm = current->mm;
  1009. struct vm_area_struct *vma;
  1010. unsigned long addr = (unsigned long)shmaddr;
  1011. int retval = -EINVAL;
  1012. #ifdef CONFIG_MMU
  1013. loff_t size = 0;
  1014. struct vm_area_struct *next;
  1015. #endif
  1016. if (addr & ~PAGE_MASK)
  1017. return retval;
  1018. down_write(&mm->mmap_sem);
  1019. /*
  1020. * This function tries to be smart and unmap shm segments that
  1021. * were modified by partial mlock or munmap calls:
  1022. * - It first determines the size of the shm segment that should be
  1023. * unmapped: It searches for a vma that is backed by shm and that
  1024. * started at address shmaddr. It records it's size and then unmaps
  1025. * it.
  1026. * - Then it unmaps all shm vmas that started at shmaddr and that
  1027. * are within the initially determined size.
  1028. * Errors from do_munmap are ignored: the function only fails if
  1029. * it's called with invalid parameters or if it's called to unmap
  1030. * a part of a vma. Both calls in this function are for full vmas,
  1031. * the parameters are directly copied from the vma itself and always
  1032. * valid - therefore do_munmap cannot fail. (famous last words?)
  1033. */
  1034. /*
  1035. * If it had been mremap()'d, the starting address would not
  1036. * match the usual checks anyway. So assume all vma's are
  1037. * above the starting address given.
  1038. */
  1039. vma = find_vma(mm, addr);
  1040. #ifdef CONFIG_MMU
  1041. while (vma) {
  1042. next = vma->vm_next;
  1043. /*
  1044. * Check if the starting address would match, i.e. it's
  1045. * a fragment created by mprotect() and/or munmap(), or it
  1046. * otherwise it starts at this address with no hassles.
  1047. */
  1048. if ((vma->vm_ops == &shm_vm_ops) &&
  1049. (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
  1050. size = file_inode(vma->vm_file)->i_size;
  1051. do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
  1052. /*
  1053. * We discovered the size of the shm segment, so
  1054. * break out of here and fall through to the next
  1055. * loop that uses the size information to stop
  1056. * searching for matching vma's.
  1057. */
  1058. retval = 0;
  1059. vma = next;
  1060. break;
  1061. }
  1062. vma = next;
  1063. }
  1064. /*
  1065. * We need look no further than the maximum address a fragment
  1066. * could possibly have landed at. Also cast things to loff_t to
  1067. * prevent overflows and make comparisons vs. equal-width types.
  1068. */
  1069. size = PAGE_ALIGN(size);
  1070. while (vma && (loff_t)(vma->vm_end - addr) <= size) {
  1071. next = vma->vm_next;
  1072. /* finding a matching vma now does not alter retval */
  1073. if ((vma->vm_ops == &shm_vm_ops) &&
  1074. (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff)
  1075. do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
  1076. vma = next;
  1077. }
  1078. #else /* CONFIG_MMU */
  1079. /* under NOMMU conditions, the exact address to be destroyed must be
  1080. * given */
  1081. retval = -EINVAL;
  1082. if (vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
  1083. do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
  1084. retval = 0;
  1085. }
  1086. #endif
  1087. up_write(&mm->mmap_sem);
  1088. return retval;
  1089. }
  1090. #ifdef CONFIG_PROC_FS
  1091. static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
  1092. {
  1093. struct user_namespace *user_ns = seq_user_ns(s);
  1094. struct shmid_kernel *shp = it;
  1095. unsigned long rss = 0, swp = 0;
  1096. shm_add_rss_swap(shp, &rss, &swp);
  1097. #if BITS_PER_LONG <= 32
  1098. #define SIZE_SPEC "%10lu"
  1099. #else
  1100. #define SIZE_SPEC "%21lu"
  1101. #endif
  1102. return seq_printf(s,
  1103. "%10d %10d %4o " SIZE_SPEC " %5u %5u "
  1104. "%5lu %5u %5u %5u %5u %10lu %10lu %10lu "
  1105. SIZE_SPEC " " SIZE_SPEC "\n",
  1106. shp->shm_perm.key,
  1107. shp->shm_perm.id,
  1108. shp->shm_perm.mode,
  1109. shp->shm_segsz,
  1110. shp->shm_cprid,
  1111. shp->shm_lprid,
  1112. shp->shm_nattch,
  1113. from_kuid_munged(user_ns, shp->shm_perm.uid),
  1114. from_kgid_munged(user_ns, shp->shm_perm.gid),
  1115. from_kuid_munged(user_ns, shp->shm_perm.cuid),
  1116. from_kgid_munged(user_ns, shp->shm_perm.cgid),
  1117. shp->shm_atim,
  1118. shp->shm_dtim,
  1119. shp->shm_ctim,
  1120. rss * PAGE_SIZE,
  1121. swp * PAGE_SIZE);
  1122. }
  1123. #endif