shm.c 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334
  1. /*
  2. * linux/ipc/shm.c
  3. * Copyright (C) 1992, 1993 Krishna Balasubramanian
  4. * Many improvements/fixes by Bruno Haible.
  5. * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
  6. * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
  7. *
  8. * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
  9. * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
  10. * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
  11. * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
  12. * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
  13. * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
  14. * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
  15. *
  16. * support for audit of ipc object properties and permission changes
  17. * Dustin Kirkland <dustin.kirkland@us.ibm.com>
  18. *
  19. * namespaces support
  20. * OpenVZ, SWsoft Inc.
  21. * Pavel Emelianov <xemul@openvz.org>
  22. *
  23. * Better ipc lock (kern_ipc_perm.lock) handling
  24. * Davidlohr Bueso <davidlohr.bueso@hp.com>, June 2013.
  25. */
  26. #include <linux/slab.h>
  27. #include <linux/mm.h>
  28. #include <linux/hugetlb.h>
  29. #include <linux/shm.h>
  30. #include <linux/init.h>
  31. #include <linux/file.h>
  32. #include <linux/mman.h>
  33. #include <linux/shmem_fs.h>
  34. #include <linux/security.h>
  35. #include <linux/syscalls.h>
  36. #include <linux/audit.h>
  37. #include <linux/capability.h>
  38. #include <linux/ptrace.h>
  39. #include <linux/seq_file.h>
  40. #include <linux/rwsem.h>
  41. #include <linux/nsproxy.h>
  42. #include <linux/mount.h>
  43. #include <linux/ipc_namespace.h>
  44. #include <asm/uaccess.h>
  45. #include "util.h"
  46. struct shm_file_data {
  47. int id;
  48. struct ipc_namespace *ns;
  49. struct file *file;
  50. const struct vm_operations_struct *vm_ops;
  51. };
  52. #define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data))
  53. static const struct file_operations shm_file_operations;
  54. static const struct vm_operations_struct shm_vm_ops;
  55. #define shm_ids(ns) ((ns)->ids[IPC_SHM_IDS])
  56. #define shm_unlock(shp) \
  57. ipc_unlock(&(shp)->shm_perm)
  58. static int newseg(struct ipc_namespace *, struct ipc_params *);
  59. static void shm_open(struct vm_area_struct *vma);
  60. static void shm_close(struct vm_area_struct *vma);
  61. static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
  62. #ifdef CONFIG_PROC_FS
  63. static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
  64. #endif
  65. void shm_init_ns(struct ipc_namespace *ns)
  66. {
  67. ns->shm_ctlmax = SHMMAX;
  68. ns->shm_ctlall = SHMALL;
  69. ns->shm_ctlmni = SHMMNI;
  70. ns->shm_rmid_forced = 0;
  71. ns->shm_tot = 0;
  72. ipc_init_ids(&shm_ids(ns));
  73. }
  74. /*
  75. * Called with shm_ids.rwsem (writer) and the shp structure locked.
  76. * Only shm_ids.rwsem remains locked on exit.
  77. */
  78. static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
  79. {
  80. struct shmid_kernel *shp;
  81. shp = container_of(ipcp, struct shmid_kernel, shm_perm);
  82. if (shp->shm_nattch){
  83. shp->shm_perm.mode |= SHM_DEST;
  84. /* Do not find it any more */
  85. shp->shm_perm.key = IPC_PRIVATE;
  86. shm_unlock(shp);
  87. } else
  88. shm_destroy(ns, shp);
  89. }
  90. #ifdef CONFIG_IPC_NS
  91. void shm_exit_ns(struct ipc_namespace *ns)
  92. {
  93. free_ipcs(ns, &shm_ids(ns), do_shm_rmid);
  94. idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr);
  95. }
  96. #endif
  97. static int __init ipc_ns_init(void)
  98. {
  99. shm_init_ns(&init_ipc_ns);
  100. return 0;
  101. }
  102. pure_initcall(ipc_ns_init);
  103. void __init shm_init (void)
  104. {
  105. ipc_init_proc_interface("sysvipc/shm",
  106. #if BITS_PER_LONG <= 32
  107. " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n",
  108. #else
  109. " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n",
  110. #endif
  111. IPC_SHM_IDS, sysvipc_shm_proc_show);
  112. }
  113. static inline struct shmid_kernel *shm_obtain_object(struct ipc_namespace *ns, int id)
  114. {
  115. struct kern_ipc_perm *ipcp = ipc_obtain_object(&shm_ids(ns), id);
  116. if (IS_ERR(ipcp))
  117. return ERR_CAST(ipcp);
  118. return container_of(ipcp, struct shmid_kernel, shm_perm);
  119. }
  120. static inline struct shmid_kernel *shm_obtain_object_check(struct ipc_namespace *ns, int id)
  121. {
  122. struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&shm_ids(ns), id);
  123. if (IS_ERR(ipcp))
  124. return ERR_CAST(ipcp);
  125. return container_of(ipcp, struct shmid_kernel, shm_perm);
  126. }
  127. /*
  128. * shm_lock_(check_) routines are called in the paths where the rwsem
  129. * is not necessarily held.
  130. */
  131. static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
  132. {
  133. struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id);
  134. if (IS_ERR(ipcp))
  135. return (struct shmid_kernel *)ipcp;
  136. return container_of(ipcp, struct shmid_kernel, shm_perm);
  137. }
  138. static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
  139. {
  140. rcu_read_lock();
  141. ipc_lock_object(&ipcp->shm_perm);
  142. }
  143. static void shm_rcu_free(struct rcu_head *head)
  144. {
  145. struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
  146. struct shmid_kernel *shp = ipc_rcu_to_struct(p);
  147. security_shm_free(shp);
  148. ipc_rcu_free(head);
  149. }
  150. static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
  151. {
  152. ipc_rmid(&shm_ids(ns), &s->shm_perm);
  153. }
  154. /* This is called by fork, once for every shm attach. */
  155. static void shm_open(struct vm_area_struct *vma)
  156. {
  157. struct file *file = vma->vm_file;
  158. struct shm_file_data *sfd = shm_file_data(file);
  159. struct shmid_kernel *shp;
  160. shp = shm_lock(sfd->ns, sfd->id);
  161. BUG_ON(IS_ERR(shp));
  162. shp->shm_atim = get_seconds();
  163. shp->shm_lprid = task_tgid_vnr(current);
  164. shp->shm_nattch++;
  165. shm_unlock(shp);
  166. }
  167. /*
  168. * shm_destroy - free the struct shmid_kernel
  169. *
  170. * @ns: namespace
  171. * @shp: struct to free
  172. *
  173. * It has to be called with shp and shm_ids.rwsem (writer) locked,
  174. * but returns with shp unlocked and freed.
  175. */
  176. static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
  177. {
  178. ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
  179. shm_rmid(ns, shp);
  180. shm_unlock(shp);
  181. if (!is_file_hugepages(shp->shm_file))
  182. shmem_lock(shp->shm_file, 0, shp->mlock_user);
  183. else if (shp->mlock_user)
  184. user_shm_unlock(file_inode(shp->shm_file)->i_size,
  185. shp->mlock_user);
  186. fput (shp->shm_file);
  187. ipc_rcu_putref(shp, shm_rcu_free);
  188. }
  189. /*
  190. * shm_may_destroy - identifies whether shm segment should be destroyed now
  191. *
  192. * Returns true if and only if there are no active users of the segment and
  193. * one of the following is true:
  194. *
  195. * 1) shmctl(id, IPC_RMID, NULL) was called for this shp
  196. *
  197. * 2) sysctl kernel.shm_rmid_forced is set to 1.
  198. */
  199. static bool shm_may_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
  200. {
  201. return (shp->shm_nattch == 0) &&
  202. (ns->shm_rmid_forced ||
  203. (shp->shm_perm.mode & SHM_DEST));
  204. }
  205. /*
  206. * remove the attach descriptor vma.
  207. * free memory for segment if it is marked destroyed.
  208. * The descriptor has already been removed from the current->mm->mmap list
  209. * and will later be kfree()d.
  210. */
  211. static void shm_close(struct vm_area_struct *vma)
  212. {
  213. struct file * file = vma->vm_file;
  214. struct shm_file_data *sfd = shm_file_data(file);
  215. struct shmid_kernel *shp;
  216. struct ipc_namespace *ns = sfd->ns;
  217. down_write(&shm_ids(ns).rwsem);
  218. /* remove from the list of attaches of the shm segment */
  219. shp = shm_lock(ns, sfd->id);
  220. BUG_ON(IS_ERR(shp));
  221. shp->shm_lprid = task_tgid_vnr(current);
  222. shp->shm_dtim = get_seconds();
  223. shp->shm_nattch--;
  224. if (shm_may_destroy(ns, shp))
  225. shm_destroy(ns, shp);
  226. else
  227. shm_unlock(shp);
  228. up_write(&shm_ids(ns).rwsem);
  229. }
  230. /* Called with ns->shm_ids(ns).rwsem locked */
  231. static int shm_try_destroy_current(int id, void *p, void *data)
  232. {
  233. struct ipc_namespace *ns = data;
  234. struct kern_ipc_perm *ipcp = p;
  235. struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm);
  236. if (shp->shm_creator != current)
  237. return 0;
  238. /*
  239. * Mark it as orphaned to destroy the segment when
  240. * kernel.shm_rmid_forced is changed.
  241. * It is noop if the following shm_may_destroy() returns true.
  242. */
  243. shp->shm_creator = NULL;
  244. /*
  245. * Don't even try to destroy it. If shm_rmid_forced=0 and IPC_RMID
  246. * is not set, it shouldn't be deleted here.
  247. */
  248. if (!ns->shm_rmid_forced)
  249. return 0;
  250. if (shm_may_destroy(ns, shp)) {
  251. shm_lock_by_ptr(shp);
  252. shm_destroy(ns, shp);
  253. }
  254. return 0;
  255. }
  256. /* Called with ns->shm_ids(ns).rwsem locked */
  257. static int shm_try_destroy_orphaned(int id, void *p, void *data)
  258. {
  259. struct ipc_namespace *ns = data;
  260. struct kern_ipc_perm *ipcp = p;
  261. struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm);
  262. /*
  263. * We want to destroy segments without users and with already
  264. * exit'ed originating process.
  265. *
  266. * As shp->* are changed under rwsem, it's safe to skip shp locking.
  267. */
  268. if (shp->shm_creator != NULL)
  269. return 0;
  270. if (shm_may_destroy(ns, shp)) {
  271. shm_lock_by_ptr(shp);
  272. shm_destroy(ns, shp);
  273. }
  274. return 0;
  275. }
  276. void shm_destroy_orphaned(struct ipc_namespace *ns)
  277. {
  278. down_write(&shm_ids(ns).rwsem);
  279. if (shm_ids(ns).in_use)
  280. idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns);
  281. up_write(&shm_ids(ns).rwsem);
  282. }
  283. void exit_shm(struct task_struct *task)
  284. {
  285. struct ipc_namespace *ns = task->nsproxy->ipc_ns;
  286. if (shm_ids(ns).in_use == 0)
  287. return;
  288. /* Destroy all already created segments, but not mapped yet */
  289. down_write(&shm_ids(ns).rwsem);
  290. if (shm_ids(ns).in_use)
  291. idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_current, ns);
  292. up_write(&shm_ids(ns).rwsem);
  293. }
  294. static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  295. {
  296. struct file *file = vma->vm_file;
  297. struct shm_file_data *sfd = shm_file_data(file);
  298. return sfd->vm_ops->fault(vma, vmf);
  299. }
  300. #ifdef CONFIG_NUMA
  301. static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
  302. {
  303. struct file *file = vma->vm_file;
  304. struct shm_file_data *sfd = shm_file_data(file);
  305. int err = 0;
  306. if (sfd->vm_ops->set_policy)
  307. err = sfd->vm_ops->set_policy(vma, new);
  308. return err;
  309. }
  310. static struct mempolicy *shm_get_policy(struct vm_area_struct *vma,
  311. unsigned long addr)
  312. {
  313. struct file *file = vma->vm_file;
  314. struct shm_file_data *sfd = shm_file_data(file);
  315. struct mempolicy *pol = NULL;
  316. if (sfd->vm_ops->get_policy)
  317. pol = sfd->vm_ops->get_policy(vma, addr);
  318. else if (vma->vm_policy)
  319. pol = vma->vm_policy;
  320. return pol;
  321. }
  322. #endif
  323. static int shm_mmap(struct file * file, struct vm_area_struct * vma)
  324. {
  325. struct shm_file_data *sfd = shm_file_data(file);
  326. int ret;
  327. ret = sfd->file->f_op->mmap(sfd->file, vma);
  328. if (ret != 0)
  329. return ret;
  330. sfd->vm_ops = vma->vm_ops;
  331. #ifdef CONFIG_MMU
  332. BUG_ON(!sfd->vm_ops->fault);
  333. #endif
  334. vma->vm_ops = &shm_vm_ops;
  335. shm_open(vma);
  336. return ret;
  337. }
  338. static int shm_release(struct inode *ino, struct file *file)
  339. {
  340. struct shm_file_data *sfd = shm_file_data(file);
  341. put_ipc_ns(sfd->ns);
  342. shm_file_data(file) = NULL;
  343. kfree(sfd);
  344. return 0;
  345. }
  346. static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync)
  347. {
  348. struct shm_file_data *sfd = shm_file_data(file);
  349. if (!sfd->file->f_op->fsync)
  350. return -EINVAL;
  351. return sfd->file->f_op->fsync(sfd->file, start, end, datasync);
  352. }
  353. static long shm_fallocate(struct file *file, int mode, loff_t offset,
  354. loff_t len)
  355. {
  356. struct shm_file_data *sfd = shm_file_data(file);
  357. if (!sfd->file->f_op->fallocate)
  358. return -EOPNOTSUPP;
  359. return sfd->file->f_op->fallocate(file, mode, offset, len);
  360. }
  361. static unsigned long shm_get_unmapped_area(struct file *file,
  362. unsigned long addr, unsigned long len, unsigned long pgoff,
  363. unsigned long flags)
  364. {
  365. struct shm_file_data *sfd = shm_file_data(file);
  366. return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len,
  367. pgoff, flags);
  368. }
  369. static const struct file_operations shm_file_operations = {
  370. .mmap = shm_mmap,
  371. .fsync = shm_fsync,
  372. .release = shm_release,
  373. #ifndef CONFIG_MMU
  374. .get_unmapped_area = shm_get_unmapped_area,
  375. #endif
  376. .llseek = noop_llseek,
  377. .fallocate = shm_fallocate,
  378. };
  379. static const struct file_operations shm_file_operations_huge = {
  380. .mmap = shm_mmap,
  381. .fsync = shm_fsync,
  382. .release = shm_release,
  383. .get_unmapped_area = shm_get_unmapped_area,
  384. .llseek = noop_llseek,
  385. .fallocate = shm_fallocate,
  386. };
  387. int is_file_shm_hugepages(struct file *file)
  388. {
  389. return file->f_op == &shm_file_operations_huge;
  390. }
  391. static const struct vm_operations_struct shm_vm_ops = {
  392. .open = shm_open, /* callback for a new vm-area open */
  393. .close = shm_close, /* callback for when the vm-area is released */
  394. .fault = shm_fault,
  395. #if defined(CONFIG_NUMA)
  396. .set_policy = shm_set_policy,
  397. .get_policy = shm_get_policy,
  398. #endif
  399. };
  400. /**
  401. * newseg - Create a new shared memory segment
  402. * @ns: namespace
  403. * @params: ptr to the structure that contains key, size and shmflg
  404. *
  405. * Called with shm_ids.rwsem held as a writer.
  406. */
  407. static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
  408. {
  409. key_t key = params->key;
  410. int shmflg = params->flg;
  411. size_t size = params->u.size;
  412. int error;
  413. struct shmid_kernel *shp;
  414. size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
  415. struct file * file;
  416. char name[13];
  417. int id;
  418. vm_flags_t acctflag = 0;
  419. if (size < SHMMIN || size > ns->shm_ctlmax)
  420. return -EINVAL;
  421. if (ns->shm_tot + numpages > ns->shm_ctlall)
  422. return -ENOSPC;
  423. shp = ipc_rcu_alloc(sizeof(*shp));
  424. if (!shp)
  425. return -ENOMEM;
  426. shp->shm_perm.key = key;
  427. shp->shm_perm.mode = (shmflg & S_IRWXUGO);
  428. shp->mlock_user = NULL;
  429. shp->shm_perm.security = NULL;
  430. error = security_shm_alloc(shp);
  431. if (error) {
  432. ipc_rcu_putref(shp, ipc_rcu_free);
  433. return error;
  434. }
  435. sprintf (name, "SYSV%08x", key);
  436. if (shmflg & SHM_HUGETLB) {
  437. struct hstate *hs;
  438. size_t hugesize;
  439. hs = hstate_sizelog((shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
  440. if (!hs) {
  441. error = -EINVAL;
  442. goto no_file;
  443. }
  444. hugesize = ALIGN(size, huge_page_size(hs));
  445. /* hugetlb_file_setup applies strict accounting */
  446. if (shmflg & SHM_NORESERVE)
  447. acctflag = VM_NORESERVE;
  448. file = hugetlb_file_setup(name, hugesize, acctflag,
  449. &shp->mlock_user, HUGETLB_SHMFS_INODE,
  450. (shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
  451. } else {
  452. /*
  453. * Do not allow no accounting for OVERCOMMIT_NEVER, even
  454. * if it's asked for.
  455. */
  456. if ((shmflg & SHM_NORESERVE) &&
  457. sysctl_overcommit_memory != OVERCOMMIT_NEVER)
  458. acctflag = VM_NORESERVE;
  459. file = shmem_file_setup(name, size, acctflag);
  460. }
  461. error = PTR_ERR(file);
  462. if (IS_ERR(file))
  463. goto no_file;
  464. id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
  465. if (id < 0) {
  466. error = id;
  467. goto no_id;
  468. }
  469. shp->shm_cprid = task_tgid_vnr(current);
  470. shp->shm_lprid = 0;
  471. shp->shm_atim = shp->shm_dtim = 0;
  472. shp->shm_ctim = get_seconds();
  473. shp->shm_segsz = size;
  474. shp->shm_nattch = 0;
  475. shp->shm_file = file;
  476. shp->shm_creator = current;
  477. /*
  478. * shmid gets reported as "inode#" in /proc/pid/maps.
  479. * proc-ps tools use this. Changing this will break them.
  480. */
  481. file_inode(file)->i_ino = shp->shm_perm.id;
  482. ns->shm_tot += numpages;
  483. error = shp->shm_perm.id;
  484. ipc_unlock_object(&shp->shm_perm);
  485. rcu_read_unlock();
  486. return error;
  487. no_id:
  488. if (is_file_hugepages(file) && shp->mlock_user)
  489. user_shm_unlock(size, shp->mlock_user);
  490. fput(file);
  491. no_file:
  492. ipc_rcu_putref(shp, shm_rcu_free);
  493. return error;
  494. }
  495. /*
  496. * Called with shm_ids.rwsem and ipcp locked.
  497. */
  498. static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg)
  499. {
  500. struct shmid_kernel *shp;
  501. shp = container_of(ipcp, struct shmid_kernel, shm_perm);
  502. return security_shm_associate(shp, shmflg);
  503. }
  504. /*
  505. * Called with shm_ids.rwsem and ipcp locked.
  506. */
  507. static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
  508. struct ipc_params *params)
  509. {
  510. struct shmid_kernel *shp;
  511. shp = container_of(ipcp, struct shmid_kernel, shm_perm);
  512. if (shp->shm_segsz < params->u.size)
  513. return -EINVAL;
  514. return 0;
  515. }
  516. SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
  517. {
  518. struct ipc_namespace *ns;
  519. struct ipc_ops shm_ops;
  520. struct ipc_params shm_params;
  521. ns = current->nsproxy->ipc_ns;
  522. shm_ops.getnew = newseg;
  523. shm_ops.associate = shm_security;
  524. shm_ops.more_checks = shm_more_checks;
  525. shm_params.key = key;
  526. shm_params.flg = shmflg;
  527. shm_params.u.size = size;
  528. return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params);
  529. }
  530. static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
  531. {
  532. switch(version) {
  533. case IPC_64:
  534. return copy_to_user(buf, in, sizeof(*in));
  535. case IPC_OLD:
  536. {
  537. struct shmid_ds out;
  538. memset(&out, 0, sizeof(out));
  539. ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
  540. out.shm_segsz = in->shm_segsz;
  541. out.shm_atime = in->shm_atime;
  542. out.shm_dtime = in->shm_dtime;
  543. out.shm_ctime = in->shm_ctime;
  544. out.shm_cpid = in->shm_cpid;
  545. out.shm_lpid = in->shm_lpid;
  546. out.shm_nattch = in->shm_nattch;
  547. return copy_to_user(buf, &out, sizeof(out));
  548. }
  549. default:
  550. return -EINVAL;
  551. }
  552. }
  553. static inline unsigned long
  554. copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version)
  555. {
  556. switch(version) {
  557. case IPC_64:
  558. if (copy_from_user(out, buf, sizeof(*out)))
  559. return -EFAULT;
  560. return 0;
  561. case IPC_OLD:
  562. {
  563. struct shmid_ds tbuf_old;
  564. if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
  565. return -EFAULT;
  566. out->shm_perm.uid = tbuf_old.shm_perm.uid;
  567. out->shm_perm.gid = tbuf_old.shm_perm.gid;
  568. out->shm_perm.mode = tbuf_old.shm_perm.mode;
  569. return 0;
  570. }
  571. default:
  572. return -EINVAL;
  573. }
  574. }
  575. static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
  576. {
  577. switch(version) {
  578. case IPC_64:
  579. return copy_to_user(buf, in, sizeof(*in));
  580. case IPC_OLD:
  581. {
  582. struct shminfo out;
  583. if(in->shmmax > INT_MAX)
  584. out.shmmax = INT_MAX;
  585. else
  586. out.shmmax = (int)in->shmmax;
  587. out.shmmin = in->shmmin;
  588. out.shmmni = in->shmmni;
  589. out.shmseg = in->shmseg;
  590. out.shmall = in->shmall;
  591. return copy_to_user(buf, &out, sizeof(out));
  592. }
  593. default:
  594. return -EINVAL;
  595. }
  596. }
  597. /*
  598. * Calculate and add used RSS and swap pages of a shm.
  599. * Called with shm_ids.rwsem held as a reader
  600. */
  601. static void shm_add_rss_swap(struct shmid_kernel *shp,
  602. unsigned long *rss_add, unsigned long *swp_add)
  603. {
  604. struct inode *inode;
  605. inode = file_inode(shp->shm_file);
  606. if (is_file_hugepages(shp->shm_file)) {
  607. struct address_space *mapping = inode->i_mapping;
  608. struct hstate *h = hstate_file(shp->shm_file);
  609. *rss_add += pages_per_huge_page(h) * mapping->nrpages;
  610. } else {
  611. #ifdef CONFIG_SHMEM
  612. struct shmem_inode_info *info = SHMEM_I(inode);
  613. spin_lock(&info->lock);
  614. *rss_add += inode->i_mapping->nrpages;
  615. *swp_add += info->swapped;
  616. spin_unlock(&info->lock);
  617. #else
  618. *rss_add += inode->i_mapping->nrpages;
  619. #endif
  620. }
  621. }
  622. /*
  623. * Called with shm_ids.rwsem held as a reader
  624. */
  625. static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
  626. unsigned long *swp)
  627. {
  628. int next_id;
  629. int total, in_use;
  630. *rss = 0;
  631. *swp = 0;
  632. in_use = shm_ids(ns).in_use;
  633. for (total = 0, next_id = 0; total < in_use; next_id++) {
  634. struct kern_ipc_perm *ipc;
  635. struct shmid_kernel *shp;
  636. ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id);
  637. if (ipc == NULL)
  638. continue;
  639. shp = container_of(ipc, struct shmid_kernel, shm_perm);
  640. shm_add_rss_swap(shp, rss, swp);
  641. total++;
  642. }
  643. }
  644. /*
  645. * This function handles some shmctl commands which require the rwsem
  646. * to be held in write mode.
  647. * NOTE: no locks must be held, the rwsem is taken inside this function.
  648. */
  649. static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
  650. struct shmid_ds __user *buf, int version)
  651. {
  652. struct kern_ipc_perm *ipcp;
  653. struct shmid64_ds shmid64;
  654. struct shmid_kernel *shp;
  655. int err;
  656. if (cmd == IPC_SET) {
  657. if (copy_shmid_from_user(&shmid64, buf, version))
  658. return -EFAULT;
  659. }
  660. down_write(&shm_ids(ns).rwsem);
  661. rcu_read_lock();
  662. ipcp = ipcctl_pre_down_nolock(ns, &shm_ids(ns), shmid, cmd,
  663. &shmid64.shm_perm, 0);
  664. if (IS_ERR(ipcp)) {
  665. err = PTR_ERR(ipcp);
  666. goto out_unlock1;
  667. }
  668. shp = container_of(ipcp, struct shmid_kernel, shm_perm);
  669. err = security_shm_shmctl(shp, cmd);
  670. if (err)
  671. goto out_unlock1;
  672. switch (cmd) {
  673. case IPC_RMID:
  674. ipc_lock_object(&shp->shm_perm);
  675. /* do_shm_rmid unlocks the ipc object and rcu */
  676. do_shm_rmid(ns, ipcp);
  677. goto out_up;
  678. case IPC_SET:
  679. ipc_lock_object(&shp->shm_perm);
  680. err = ipc_update_perm(&shmid64.shm_perm, ipcp);
  681. if (err)
  682. goto out_unlock0;
  683. shp->shm_ctim = get_seconds();
  684. break;
  685. default:
  686. err = -EINVAL;
  687. goto out_unlock1;
  688. }
  689. out_unlock0:
  690. ipc_unlock_object(&shp->shm_perm);
  691. out_unlock1:
  692. rcu_read_unlock();
  693. out_up:
  694. up_write(&shm_ids(ns).rwsem);
  695. return err;
  696. }
  697. static int shmctl_nolock(struct ipc_namespace *ns, int shmid,
  698. int cmd, int version, void __user *buf)
  699. {
  700. int err;
  701. struct shmid_kernel *shp;
  702. /* preliminary security checks for *_INFO */
  703. if (cmd == IPC_INFO || cmd == SHM_INFO) {
  704. err = security_shm_shmctl(NULL, cmd);
  705. if (err)
  706. return err;
  707. }
  708. switch (cmd) {
  709. case IPC_INFO:
  710. {
  711. struct shminfo64 shminfo;
  712. memset(&shminfo, 0, sizeof(shminfo));
  713. shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni;
  714. shminfo.shmmax = ns->shm_ctlmax;
  715. shminfo.shmall = ns->shm_ctlall;
  716. shminfo.shmmin = SHMMIN;
  717. if(copy_shminfo_to_user (buf, &shminfo, version))
  718. return -EFAULT;
  719. down_read(&shm_ids(ns).rwsem);
  720. err = ipc_get_maxid(&shm_ids(ns));
  721. up_read(&shm_ids(ns).rwsem);
  722. if(err<0)
  723. err = 0;
  724. goto out;
  725. }
  726. case SHM_INFO:
  727. {
  728. struct shm_info shm_info;
  729. memset(&shm_info, 0, sizeof(shm_info));
  730. down_read(&shm_ids(ns).rwsem);
  731. shm_info.used_ids = shm_ids(ns).in_use;
  732. shm_get_stat (ns, &shm_info.shm_rss, &shm_info.shm_swp);
  733. shm_info.shm_tot = ns->shm_tot;
  734. shm_info.swap_attempts = 0;
  735. shm_info.swap_successes = 0;
  736. err = ipc_get_maxid(&shm_ids(ns));
  737. up_read(&shm_ids(ns).rwsem);
  738. if (copy_to_user(buf, &shm_info, sizeof(shm_info))) {
  739. err = -EFAULT;
  740. goto out;
  741. }
  742. err = err < 0 ? 0 : err;
  743. goto out;
  744. }
  745. case SHM_STAT:
  746. case IPC_STAT:
  747. {
  748. struct shmid64_ds tbuf;
  749. int result;
  750. rcu_read_lock();
  751. if (cmd == SHM_STAT) {
  752. shp = shm_obtain_object(ns, shmid);
  753. if (IS_ERR(shp)) {
  754. err = PTR_ERR(shp);
  755. goto out_unlock;
  756. }
  757. result = shp->shm_perm.id;
  758. } else {
  759. shp = shm_obtain_object_check(ns, shmid);
  760. if (IS_ERR(shp)) {
  761. err = PTR_ERR(shp);
  762. goto out_unlock;
  763. }
  764. result = 0;
  765. }
  766. err = -EACCES;
  767. if (ipcperms(ns, &shp->shm_perm, S_IRUGO))
  768. goto out_unlock;
  769. err = security_shm_shmctl(shp, cmd);
  770. if (err)
  771. goto out_unlock;
  772. memset(&tbuf, 0, sizeof(tbuf));
  773. kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm);
  774. tbuf.shm_segsz = shp->shm_segsz;
  775. tbuf.shm_atime = shp->shm_atim;
  776. tbuf.shm_dtime = shp->shm_dtim;
  777. tbuf.shm_ctime = shp->shm_ctim;
  778. tbuf.shm_cpid = shp->shm_cprid;
  779. tbuf.shm_lpid = shp->shm_lprid;
  780. tbuf.shm_nattch = shp->shm_nattch;
  781. rcu_read_unlock();
  782. if (copy_shmid_to_user(buf, &tbuf, version))
  783. err = -EFAULT;
  784. else
  785. err = result;
  786. goto out;
  787. }
  788. default:
  789. return -EINVAL;
  790. }
  791. out_unlock:
  792. rcu_read_unlock();
  793. out:
  794. return err;
  795. }
  796. SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
  797. {
  798. struct shmid_kernel *shp;
  799. int err, version;
  800. struct ipc_namespace *ns;
  801. if (cmd < 0 || shmid < 0)
  802. return -EINVAL;
  803. version = ipc_parse_version(&cmd);
  804. ns = current->nsproxy->ipc_ns;
  805. switch (cmd) {
  806. case IPC_INFO:
  807. case SHM_INFO:
  808. case SHM_STAT:
  809. case IPC_STAT:
  810. return shmctl_nolock(ns, shmid, cmd, version, buf);
  811. case IPC_RMID:
  812. case IPC_SET:
  813. return shmctl_down(ns, shmid, cmd, buf, version);
  814. case SHM_LOCK:
  815. case SHM_UNLOCK:
  816. {
  817. struct file *shm_file;
  818. rcu_read_lock();
  819. shp = shm_obtain_object_check(ns, shmid);
  820. if (IS_ERR(shp)) {
  821. err = PTR_ERR(shp);
  822. goto out_unlock1;
  823. }
  824. audit_ipc_obj(&(shp->shm_perm));
  825. err = security_shm_shmctl(shp, cmd);
  826. if (err)
  827. goto out_unlock1;
  828. ipc_lock_object(&shp->shm_perm);
  829. if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) {
  830. kuid_t euid = current_euid();
  831. err = -EPERM;
  832. if (!uid_eq(euid, shp->shm_perm.uid) &&
  833. !uid_eq(euid, shp->shm_perm.cuid))
  834. goto out_unlock0;
  835. if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK))
  836. goto out_unlock0;
  837. }
  838. shm_file = shp->shm_file;
  839. if (is_file_hugepages(shm_file))
  840. goto out_unlock0;
  841. if (cmd == SHM_LOCK) {
  842. struct user_struct *user = current_user();
  843. err = shmem_lock(shm_file, 1, user);
  844. if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) {
  845. shp->shm_perm.mode |= SHM_LOCKED;
  846. shp->mlock_user = user;
  847. }
  848. goto out_unlock0;
  849. }
  850. /* SHM_UNLOCK */
  851. if (!(shp->shm_perm.mode & SHM_LOCKED))
  852. goto out_unlock0;
  853. shmem_lock(shm_file, 0, shp->mlock_user);
  854. shp->shm_perm.mode &= ~SHM_LOCKED;
  855. shp->mlock_user = NULL;
  856. get_file(shm_file);
  857. ipc_unlock_object(&shp->shm_perm);
  858. rcu_read_unlock();
  859. shmem_unlock_mapping(shm_file->f_mapping);
  860. fput(shm_file);
  861. return err;
  862. }
  863. default:
  864. return -EINVAL;
  865. }
  866. out_unlock0:
  867. ipc_unlock_object(&shp->shm_perm);
  868. out_unlock1:
  869. rcu_read_unlock();
  870. return err;
  871. }
  872. /*
  873. * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
  874. *
  875. * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
  876. * "raddr" thing points to kernel space, and there has to be a wrapper around
  877. * this.
  878. */
  879. long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
  880. unsigned long shmlba)
  881. {
  882. struct shmid_kernel *shp;
  883. unsigned long addr;
  884. unsigned long size;
  885. struct file * file;
  886. int err;
  887. unsigned long flags;
  888. unsigned long prot;
  889. int acc_mode;
  890. struct ipc_namespace *ns;
  891. struct shm_file_data *sfd;
  892. struct path path;
  893. fmode_t f_mode;
  894. unsigned long populate = 0;
  895. err = -EINVAL;
  896. if (shmid < 0)
  897. goto out;
  898. else if ((addr = (ulong)shmaddr)) {
  899. if (addr & (shmlba - 1)) {
  900. if (shmflg & SHM_RND)
  901. addr &= ~(shmlba - 1); /* round down */
  902. else
  903. #ifndef __ARCH_FORCE_SHMLBA
  904. if (addr & ~PAGE_MASK)
  905. #endif
  906. goto out;
  907. }
  908. flags = MAP_SHARED | MAP_FIXED;
  909. } else {
  910. if ((shmflg & SHM_REMAP))
  911. goto out;
  912. flags = MAP_SHARED;
  913. }
  914. if (shmflg & SHM_RDONLY) {
  915. prot = PROT_READ;
  916. acc_mode = S_IRUGO;
  917. f_mode = FMODE_READ;
  918. } else {
  919. prot = PROT_READ | PROT_WRITE;
  920. acc_mode = S_IRUGO | S_IWUGO;
  921. f_mode = FMODE_READ | FMODE_WRITE;
  922. }
  923. if (shmflg & SHM_EXEC) {
  924. prot |= PROT_EXEC;
  925. acc_mode |= S_IXUGO;
  926. }
  927. /*
  928. * We cannot rely on the fs check since SYSV IPC does have an
  929. * additional creator id...
  930. */
  931. ns = current->nsproxy->ipc_ns;
  932. rcu_read_lock();
  933. shp = shm_obtain_object_check(ns, shmid);
  934. if (IS_ERR(shp)) {
  935. err = PTR_ERR(shp);
  936. goto out_unlock;
  937. }
  938. err = -EACCES;
  939. if (ipcperms(ns, &shp->shm_perm, acc_mode))
  940. goto out_unlock;
  941. err = security_shm_shmat(shp, shmaddr, shmflg);
  942. if (err)
  943. goto out_unlock;
  944. ipc_lock_object(&shp->shm_perm);
  945. path = shp->shm_file->f_path;
  946. path_get(&path);
  947. shp->shm_nattch++;
  948. size = i_size_read(path.dentry->d_inode);
  949. ipc_unlock_object(&shp->shm_perm);
  950. rcu_read_unlock();
  951. err = -ENOMEM;
  952. sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
  953. if (!sfd) {
  954. path_put(&path);
  955. goto out_nattch;
  956. }
  957. file = alloc_file(&path, f_mode,
  958. is_file_hugepages(shp->shm_file) ?
  959. &shm_file_operations_huge :
  960. &shm_file_operations);
  961. err = PTR_ERR(file);
  962. if (IS_ERR(file)) {
  963. kfree(sfd);
  964. path_put(&path);
  965. goto out_nattch;
  966. }
  967. file->private_data = sfd;
  968. file->f_mapping = shp->shm_file->f_mapping;
  969. sfd->id = shp->shm_perm.id;
  970. sfd->ns = get_ipc_ns(ns);
  971. sfd->file = shp->shm_file;
  972. sfd->vm_ops = NULL;
  973. err = security_mmap_file(file, prot, flags);
  974. if (err)
  975. goto out_fput;
  976. down_write(&current->mm->mmap_sem);
  977. if (addr && !(shmflg & SHM_REMAP)) {
  978. err = -EINVAL;
  979. if (find_vma_intersection(current->mm, addr, addr + size))
  980. goto invalid;
  981. /*
  982. * If shm segment goes below stack, make sure there is some
  983. * space left for the stack to grow (at least 4 pages).
  984. */
  985. if (addr < current->mm->start_stack &&
  986. addr > current->mm->start_stack - size - PAGE_SIZE * 5)
  987. goto invalid;
  988. }
  989. addr = do_mmap_pgoff(file, addr, size, prot, flags, 0, &populate);
  990. *raddr = addr;
  991. err = 0;
  992. if (IS_ERR_VALUE(addr))
  993. err = (long)addr;
  994. invalid:
  995. up_write(&current->mm->mmap_sem);
  996. if (populate)
  997. mm_populate(addr, populate);
  998. out_fput:
  999. fput(file);
  1000. out_nattch:
  1001. down_write(&shm_ids(ns).rwsem);
  1002. shp = shm_lock(ns, shmid);
  1003. BUG_ON(IS_ERR(shp));
  1004. shp->shm_nattch--;
  1005. if (shm_may_destroy(ns, shp))
  1006. shm_destroy(ns, shp);
  1007. else
  1008. shm_unlock(shp);
  1009. up_write(&shm_ids(ns).rwsem);
  1010. return err;
  1011. out_unlock:
  1012. rcu_read_unlock();
  1013. out:
  1014. return err;
  1015. }
  1016. SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg)
  1017. {
  1018. unsigned long ret;
  1019. long err;
  1020. err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA);
  1021. if (err)
  1022. return err;
  1023. force_successful_syscall_return();
  1024. return (long)ret;
  1025. }
  1026. /*
  1027. * detach and kill segment if marked destroyed.
  1028. * The work is done in shm_close.
  1029. */
  1030. SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
  1031. {
  1032. struct mm_struct *mm = current->mm;
  1033. struct vm_area_struct *vma;
  1034. unsigned long addr = (unsigned long)shmaddr;
  1035. int retval = -EINVAL;
  1036. #ifdef CONFIG_MMU
  1037. loff_t size = 0;
  1038. struct vm_area_struct *next;
  1039. #endif
  1040. if (addr & ~PAGE_MASK)
  1041. return retval;
  1042. down_write(&mm->mmap_sem);
  1043. /*
  1044. * This function tries to be smart and unmap shm segments that
  1045. * were modified by partial mlock or munmap calls:
  1046. * - It first determines the size of the shm segment that should be
  1047. * unmapped: It searches for a vma that is backed by shm and that
  1048. * started at address shmaddr. It records it's size and then unmaps
  1049. * it.
  1050. * - Then it unmaps all shm vmas that started at shmaddr and that
  1051. * are within the initially determined size.
  1052. * Errors from do_munmap are ignored: the function only fails if
  1053. * it's called with invalid parameters or if it's called to unmap
  1054. * a part of a vma. Both calls in this function are for full vmas,
  1055. * the parameters are directly copied from the vma itself and always
  1056. * valid - therefore do_munmap cannot fail. (famous last words?)
  1057. */
  1058. /*
  1059. * If it had been mremap()'d, the starting address would not
  1060. * match the usual checks anyway. So assume all vma's are
  1061. * above the starting address given.
  1062. */
  1063. vma = find_vma(mm, addr);
  1064. #ifdef CONFIG_MMU
  1065. while (vma) {
  1066. next = vma->vm_next;
  1067. /*
  1068. * Check if the starting address would match, i.e. it's
  1069. * a fragment created by mprotect() and/or munmap(), or it
  1070. * otherwise it starts at this address with no hassles.
  1071. */
  1072. if ((vma->vm_ops == &shm_vm_ops) &&
  1073. (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
  1074. size = file_inode(vma->vm_file)->i_size;
  1075. do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
  1076. /*
  1077. * We discovered the size of the shm segment, so
  1078. * break out of here and fall through to the next
  1079. * loop that uses the size information to stop
  1080. * searching for matching vma's.
  1081. */
  1082. retval = 0;
  1083. vma = next;
  1084. break;
  1085. }
  1086. vma = next;
  1087. }
  1088. /*
  1089. * We need look no further than the maximum address a fragment
  1090. * could possibly have landed at. Also cast things to loff_t to
  1091. * prevent overflows and make comparisons vs. equal-width types.
  1092. */
  1093. size = PAGE_ALIGN(size);
  1094. while (vma && (loff_t)(vma->vm_end - addr) <= size) {
  1095. next = vma->vm_next;
  1096. /* finding a matching vma now does not alter retval */
  1097. if ((vma->vm_ops == &shm_vm_ops) &&
  1098. (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff)
  1099. do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
  1100. vma = next;
  1101. }
  1102. #else /* CONFIG_MMU */
  1103. /* under NOMMU conditions, the exact address to be destroyed must be
  1104. * given */
  1105. if (vma && vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
  1106. do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
  1107. retval = 0;
  1108. }
  1109. #endif
  1110. up_write(&mm->mmap_sem);
  1111. return retval;
  1112. }
  1113. #ifdef CONFIG_PROC_FS
  1114. static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
  1115. {
  1116. struct user_namespace *user_ns = seq_user_ns(s);
  1117. struct shmid_kernel *shp = it;
  1118. unsigned long rss = 0, swp = 0;
  1119. shm_add_rss_swap(shp, &rss, &swp);
  1120. #if BITS_PER_LONG <= 32
  1121. #define SIZE_SPEC "%10lu"
  1122. #else
  1123. #define SIZE_SPEC "%21lu"
  1124. #endif
  1125. return seq_printf(s,
  1126. "%10d %10d %4o " SIZE_SPEC " %5u %5u "
  1127. "%5lu %5u %5u %5u %5u %10lu %10lu %10lu "
  1128. SIZE_SPEC " " SIZE_SPEC "\n",
  1129. shp->shm_perm.key,
  1130. shp->shm_perm.id,
  1131. shp->shm_perm.mode,
  1132. shp->shm_segsz,
  1133. shp->shm_cprid,
  1134. shp->shm_lprid,
  1135. shp->shm_nattch,
  1136. from_kuid_munged(user_ns, shp->shm_perm.uid),
  1137. from_kgid_munged(user_ns, shp->shm_perm.gid),
  1138. from_kuid_munged(user_ns, shp->shm_perm.cuid),
  1139. from_kgid_munged(user_ns, shp->shm_perm.cgid),
  1140. shp->shm_atim,
  1141. shp->shm_dtim,
  1142. shp->shm_ctim,
  1143. rss * PAGE_SIZE,
  1144. swp * PAGE_SIZE);
  1145. }
  1146. #endif