shm.c 29 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220
  1. /*
  2. * linux/ipc/shm.c
  3. * Copyright (C) 1992, 1993 Krishna Balasubramanian
  4. * Many improvements/fixes by Bruno Haible.
  5. * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
  6. * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
  7. *
  8. * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
  9. * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
  10. * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
  11. * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
  12. * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
  13. * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
  14. * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
  15. *
  16. * support for audit of ipc object properties and permission changes
  17. * Dustin Kirkland <dustin.kirkland@us.ibm.com>
  18. *
  19. * namespaces support
  20. * OpenVZ, SWsoft Inc.
  21. * Pavel Emelianov <xemul@openvz.org>
  22. */
  23. #include <linux/slab.h>
  24. #include <linux/mm.h>
  25. #include <linux/hugetlb.h>
  26. #include <linux/shm.h>
  27. #include <linux/init.h>
  28. #include <linux/file.h>
  29. #include <linux/mman.h>
  30. #include <linux/shmem_fs.h>
  31. #include <linux/security.h>
  32. #include <linux/syscalls.h>
  33. #include <linux/audit.h>
  34. #include <linux/capability.h>
  35. #include <linux/ptrace.h>
  36. #include <linux/seq_file.h>
  37. #include <linux/rwsem.h>
  38. #include <linux/nsproxy.h>
  39. #include <linux/mount.h>
  40. #include <linux/ipc_namespace.h>
  41. #include <asm/uaccess.h>
  42. #include "util.h"
  43. struct shm_file_data {
  44. int id;
  45. struct ipc_namespace *ns;
  46. struct file *file;
  47. const struct vm_operations_struct *vm_ops;
  48. };
  49. #define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data))
  50. static const struct file_operations shm_file_operations;
  51. static const struct vm_operations_struct shm_vm_ops;
  52. #define shm_ids(ns) ((ns)->ids[IPC_SHM_IDS])
  53. #define shm_unlock(shp) \
  54. ipc_unlock(&(shp)->shm_perm)
  55. static int newseg(struct ipc_namespace *, struct ipc_params *);
  56. static void shm_open(struct vm_area_struct *vma);
  57. static void shm_close(struct vm_area_struct *vma);
  58. static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
  59. #ifdef CONFIG_PROC_FS
  60. static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
  61. #endif
  62. void shm_init_ns(struct ipc_namespace *ns)
  63. {
  64. ns->shm_ctlmax = SHMMAX;
  65. ns->shm_ctlall = SHMALL;
  66. ns->shm_ctlmni = SHMMNI;
  67. ns->shm_rmid_forced = 0;
  68. ns->shm_tot = 0;
  69. ipc_init_ids(&shm_ids(ns));
  70. }
  71. /*
  72. * Called with shm_ids.rw_mutex (writer) and the shp structure locked.
  73. * Only shm_ids.rw_mutex remains locked on exit.
  74. */
  75. static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
  76. {
  77. struct shmid_kernel *shp;
  78. shp = container_of(ipcp, struct shmid_kernel, shm_perm);
  79. if (shp->shm_nattch){
  80. shp->shm_perm.mode |= SHM_DEST;
  81. /* Do not find it any more */
  82. shp->shm_perm.key = IPC_PRIVATE;
  83. shm_unlock(shp);
  84. } else
  85. shm_destroy(ns, shp);
  86. }
  87. #ifdef CONFIG_IPC_NS
  88. void shm_exit_ns(struct ipc_namespace *ns)
  89. {
  90. free_ipcs(ns, &shm_ids(ns), do_shm_rmid);
  91. idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr);
  92. }
  93. #endif
  94. void __init shm_init (void)
  95. {
  96. shm_init_ns(&init_ipc_ns);
  97. ipc_init_proc_interface("sysvipc/shm",
  98. #if BITS_PER_LONG <= 32
  99. " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n",
  100. #else
  101. " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n",
  102. #endif
  103. IPC_SHM_IDS, sysvipc_shm_proc_show);
  104. }
  105. /*
  106. * shm_lock_(check_) routines are called in the paths where the rw_mutex
  107. * is not necessarily held.
  108. */
  109. static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
  110. {
  111. struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id);
  112. if (IS_ERR(ipcp))
  113. return (struct shmid_kernel *)ipcp;
  114. return container_of(ipcp, struct shmid_kernel, shm_perm);
  115. }
  116. static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
  117. {
  118. rcu_read_lock();
  119. spin_lock(&ipcp->shm_perm.lock);
  120. }
  121. static inline struct shmid_kernel *shm_lock_check(struct ipc_namespace *ns,
  122. int id)
  123. {
  124. struct kern_ipc_perm *ipcp = ipc_lock_check(&shm_ids(ns), id);
  125. if (IS_ERR(ipcp))
  126. return (struct shmid_kernel *)ipcp;
  127. return container_of(ipcp, struct shmid_kernel, shm_perm);
  128. }
  129. static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
  130. {
  131. ipc_rmid(&shm_ids(ns), &s->shm_perm);
  132. }
  133. /* This is called by fork, once for every shm attach. */
  134. static void shm_open(struct vm_area_struct *vma)
  135. {
  136. struct file *file = vma->vm_file;
  137. struct shm_file_data *sfd = shm_file_data(file);
  138. struct shmid_kernel *shp;
  139. shp = shm_lock(sfd->ns, sfd->id);
  140. BUG_ON(IS_ERR(shp));
  141. shp->shm_atim = get_seconds();
  142. shp->shm_lprid = task_tgid_vnr(current);
  143. shp->shm_nattch++;
  144. shm_unlock(shp);
  145. }
  146. /*
  147. * shm_destroy - free the struct shmid_kernel
  148. *
  149. * @ns: namespace
  150. * @shp: struct to free
  151. *
  152. * It has to be called with shp and shm_ids.rw_mutex (writer) locked,
  153. * but returns with shp unlocked and freed.
  154. */
  155. static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
  156. {
  157. ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
  158. shm_rmid(ns, shp);
  159. shm_unlock(shp);
  160. if (!is_file_hugepages(shp->shm_file))
  161. shmem_lock(shp->shm_file, 0, shp->mlock_user);
  162. else if (shp->mlock_user)
  163. user_shm_unlock(shp->shm_file->f_path.dentry->d_inode->i_size,
  164. shp->mlock_user);
  165. fput (shp->shm_file);
  166. security_shm_free(shp);
  167. ipc_rcu_putref(shp);
  168. }
  169. /*
  170. * shm_may_destroy - identifies whether shm segment should be destroyed now
  171. *
  172. * Returns true if and only if there are no active users of the segment and
  173. * one of the following is true:
  174. *
  175. * 1) shmctl(id, IPC_RMID, NULL) was called for this shp
  176. *
  177. * 2) sysctl kernel.shm_rmid_forced is set to 1.
  178. */
  179. static bool shm_may_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
  180. {
  181. return (shp->shm_nattch == 0) &&
  182. (ns->shm_rmid_forced ||
  183. (shp->shm_perm.mode & SHM_DEST));
  184. }
  185. /*
  186. * remove the attach descriptor vma.
  187. * free memory for segment if it is marked destroyed.
  188. * The descriptor has already been removed from the current->mm->mmap list
  189. * and will later be kfree()d.
  190. */
  191. static void shm_close(struct vm_area_struct *vma)
  192. {
  193. struct file * file = vma->vm_file;
  194. struct shm_file_data *sfd = shm_file_data(file);
  195. struct shmid_kernel *shp;
  196. struct ipc_namespace *ns = sfd->ns;
  197. down_write(&shm_ids(ns).rw_mutex);
  198. /* remove from the list of attaches of the shm segment */
  199. shp = shm_lock(ns, sfd->id);
  200. BUG_ON(IS_ERR(shp));
  201. shp->shm_lprid = task_tgid_vnr(current);
  202. shp->shm_dtim = get_seconds();
  203. shp->shm_nattch--;
  204. if (shm_may_destroy(ns, shp))
  205. shm_destroy(ns, shp);
  206. else
  207. shm_unlock(shp);
  208. up_write(&shm_ids(ns).rw_mutex);
  209. }
  210. /* Called with ns->shm_ids(ns).rw_mutex locked */
  211. static int shm_try_destroy_current(int id, void *p, void *data)
  212. {
  213. struct ipc_namespace *ns = data;
  214. struct kern_ipc_perm *ipcp = p;
  215. struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm);
  216. if (shp->shm_creator != current)
  217. return 0;
  218. /*
  219. * Mark it as orphaned to destroy the segment when
  220. * kernel.shm_rmid_forced is changed.
  221. * It is noop if the following shm_may_destroy() returns true.
  222. */
  223. shp->shm_creator = NULL;
  224. /*
  225. * Don't even try to destroy it. If shm_rmid_forced=0 and IPC_RMID
  226. * is not set, it shouldn't be deleted here.
  227. */
  228. if (!ns->shm_rmid_forced)
  229. return 0;
  230. if (shm_may_destroy(ns, shp)) {
  231. shm_lock_by_ptr(shp);
  232. shm_destroy(ns, shp);
  233. }
  234. return 0;
  235. }
  236. /* Called with ns->shm_ids(ns).rw_mutex locked */
  237. static int shm_try_destroy_orphaned(int id, void *p, void *data)
  238. {
  239. struct ipc_namespace *ns = data;
  240. struct kern_ipc_perm *ipcp = p;
  241. struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm);
  242. /*
  243. * We want to destroy segments without users and with already
  244. * exit'ed originating process.
  245. *
  246. * As shp->* are changed under rw_mutex, it's safe to skip shp locking.
  247. */
  248. if (shp->shm_creator != NULL)
  249. return 0;
  250. if (shm_may_destroy(ns, shp)) {
  251. shm_lock_by_ptr(shp);
  252. shm_destroy(ns, shp);
  253. }
  254. return 0;
  255. }
  256. void shm_destroy_orphaned(struct ipc_namespace *ns)
  257. {
  258. down_write(&shm_ids(ns).rw_mutex);
  259. if (&shm_ids(ns).in_use)
  260. idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns);
  261. up_write(&shm_ids(ns).rw_mutex);
  262. }
  263. void exit_shm(struct task_struct *task)
  264. {
  265. struct ipc_namespace *ns = task->nsproxy->ipc_ns;
  266. /* Destroy all already created segments, but not mapped yet */
  267. down_write(&shm_ids(ns).rw_mutex);
  268. if (&shm_ids(ns).in_use)
  269. idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_current, ns);
  270. up_write(&shm_ids(ns).rw_mutex);
  271. }
  272. static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  273. {
  274. struct file *file = vma->vm_file;
  275. struct shm_file_data *sfd = shm_file_data(file);
  276. return sfd->vm_ops->fault(vma, vmf);
  277. }
  278. #ifdef CONFIG_NUMA
  279. static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
  280. {
  281. struct file *file = vma->vm_file;
  282. struct shm_file_data *sfd = shm_file_data(file);
  283. int err = 0;
  284. if (sfd->vm_ops->set_policy)
  285. err = sfd->vm_ops->set_policy(vma, new);
  286. return err;
  287. }
  288. static struct mempolicy *shm_get_policy(struct vm_area_struct *vma,
  289. unsigned long addr)
  290. {
  291. struct file *file = vma->vm_file;
  292. struct shm_file_data *sfd = shm_file_data(file);
  293. struct mempolicy *pol = NULL;
  294. if (sfd->vm_ops->get_policy)
  295. pol = sfd->vm_ops->get_policy(vma, addr);
  296. else if (vma->vm_policy)
  297. pol = vma->vm_policy;
  298. return pol;
  299. }
  300. #endif
  301. static int shm_mmap(struct file * file, struct vm_area_struct * vma)
  302. {
  303. struct shm_file_data *sfd = shm_file_data(file);
  304. int ret;
  305. ret = sfd->file->f_op->mmap(sfd->file, vma);
  306. if (ret != 0)
  307. return ret;
  308. sfd->vm_ops = vma->vm_ops;
  309. #ifdef CONFIG_MMU
  310. BUG_ON(!sfd->vm_ops->fault);
  311. #endif
  312. vma->vm_ops = &shm_vm_ops;
  313. shm_open(vma);
  314. return ret;
  315. }
  316. static int shm_release(struct inode *ino, struct file *file)
  317. {
  318. struct shm_file_data *sfd = shm_file_data(file);
  319. put_ipc_ns(sfd->ns);
  320. shm_file_data(file) = NULL;
  321. kfree(sfd);
  322. return 0;
  323. }
  324. static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync)
  325. {
  326. struct shm_file_data *sfd = shm_file_data(file);
  327. if (!sfd->file->f_op->fsync)
  328. return -EINVAL;
  329. return sfd->file->f_op->fsync(sfd->file, start, end, datasync);
  330. }
  331. static unsigned long shm_get_unmapped_area(struct file *file,
  332. unsigned long addr, unsigned long len, unsigned long pgoff,
  333. unsigned long flags)
  334. {
  335. struct shm_file_data *sfd = shm_file_data(file);
  336. return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len,
  337. pgoff, flags);
  338. }
  339. static const struct file_operations shm_file_operations = {
  340. .mmap = shm_mmap,
  341. .fsync = shm_fsync,
  342. .release = shm_release,
  343. #ifndef CONFIG_MMU
  344. .get_unmapped_area = shm_get_unmapped_area,
  345. #endif
  346. .llseek = noop_llseek,
  347. };
  348. static const struct file_operations shm_file_operations_huge = {
  349. .mmap = shm_mmap,
  350. .fsync = shm_fsync,
  351. .release = shm_release,
  352. .get_unmapped_area = shm_get_unmapped_area,
  353. .llseek = noop_llseek,
  354. };
  355. int is_file_shm_hugepages(struct file *file)
  356. {
  357. return file->f_op == &shm_file_operations_huge;
  358. }
  359. static const struct vm_operations_struct shm_vm_ops = {
  360. .open = shm_open, /* callback for a new vm-area open */
  361. .close = shm_close, /* callback for when the vm-area is released */
  362. .fault = shm_fault,
  363. #if defined(CONFIG_NUMA)
  364. .set_policy = shm_set_policy,
  365. .get_policy = shm_get_policy,
  366. #endif
  367. };
  368. /**
  369. * newseg - Create a new shared memory segment
  370. * @ns: namespace
  371. * @params: ptr to the structure that contains key, size and shmflg
  372. *
  373. * Called with shm_ids.rw_mutex held as a writer.
  374. */
  375. static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
  376. {
  377. key_t key = params->key;
  378. int shmflg = params->flg;
  379. size_t size = params->u.size;
  380. int error;
  381. struct shmid_kernel *shp;
  382. int numpages = (size + PAGE_SIZE -1) >> PAGE_SHIFT;
  383. struct file * file;
  384. char name[13];
  385. int id;
  386. vm_flags_t acctflag = 0;
  387. if (size < SHMMIN || size > ns->shm_ctlmax)
  388. return -EINVAL;
  389. if (ns->shm_tot + numpages > ns->shm_ctlall)
  390. return -ENOSPC;
  391. shp = ipc_rcu_alloc(sizeof(*shp));
  392. if (!shp)
  393. return -ENOMEM;
  394. shp->shm_perm.key = key;
  395. shp->shm_perm.mode = (shmflg & S_IRWXUGO);
  396. shp->mlock_user = NULL;
  397. shp->shm_perm.security = NULL;
  398. error = security_shm_alloc(shp);
  399. if (error) {
  400. ipc_rcu_putref(shp);
  401. return error;
  402. }
  403. sprintf (name, "SYSV%08x", key);
  404. if (shmflg & SHM_HUGETLB) {
  405. /* hugetlb_file_setup applies strict accounting */
  406. if (shmflg & SHM_NORESERVE)
  407. acctflag = VM_NORESERVE;
  408. file = hugetlb_file_setup(name, size, acctflag,
  409. &shp->mlock_user, HUGETLB_SHMFS_INODE);
  410. } else {
  411. /*
  412. * Do not allow no accounting for OVERCOMMIT_NEVER, even
  413. * if it's asked for.
  414. */
  415. if ((shmflg & SHM_NORESERVE) &&
  416. sysctl_overcommit_memory != OVERCOMMIT_NEVER)
  417. acctflag = VM_NORESERVE;
  418. file = shmem_file_setup(name, size, acctflag);
  419. }
  420. error = PTR_ERR(file);
  421. if (IS_ERR(file))
  422. goto no_file;
  423. id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
  424. if (id < 0) {
  425. error = id;
  426. goto no_id;
  427. }
  428. shp->shm_cprid = task_tgid_vnr(current);
  429. shp->shm_lprid = 0;
  430. shp->shm_atim = shp->shm_dtim = 0;
  431. shp->shm_ctim = get_seconds();
  432. shp->shm_segsz = size;
  433. shp->shm_nattch = 0;
  434. shp->shm_file = file;
  435. shp->shm_creator = current;
  436. /*
  437. * shmid gets reported as "inode#" in /proc/pid/maps.
  438. * proc-ps tools use this. Changing this will break them.
  439. */
  440. file->f_dentry->d_inode->i_ino = shp->shm_perm.id;
  441. ns->shm_tot += numpages;
  442. error = shp->shm_perm.id;
  443. shm_unlock(shp);
  444. return error;
  445. no_id:
  446. if (is_file_hugepages(file) && shp->mlock_user)
  447. user_shm_unlock(size, shp->mlock_user);
  448. fput(file);
  449. no_file:
  450. security_shm_free(shp);
  451. ipc_rcu_putref(shp);
  452. return error;
  453. }
  454. /*
  455. * Called with shm_ids.rw_mutex and ipcp locked.
  456. */
  457. static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg)
  458. {
  459. struct shmid_kernel *shp;
  460. shp = container_of(ipcp, struct shmid_kernel, shm_perm);
  461. return security_shm_associate(shp, shmflg);
  462. }
  463. /*
  464. * Called with shm_ids.rw_mutex and ipcp locked.
  465. */
  466. static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
  467. struct ipc_params *params)
  468. {
  469. struct shmid_kernel *shp;
  470. shp = container_of(ipcp, struct shmid_kernel, shm_perm);
  471. if (shp->shm_segsz < params->u.size)
  472. return -EINVAL;
  473. return 0;
  474. }
  475. SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
  476. {
  477. struct ipc_namespace *ns;
  478. struct ipc_ops shm_ops;
  479. struct ipc_params shm_params;
  480. ns = current->nsproxy->ipc_ns;
  481. shm_ops.getnew = newseg;
  482. shm_ops.associate = shm_security;
  483. shm_ops.more_checks = shm_more_checks;
  484. shm_params.key = key;
  485. shm_params.flg = shmflg;
  486. shm_params.u.size = size;
  487. return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params);
  488. }
  489. static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
  490. {
  491. switch(version) {
  492. case IPC_64:
  493. return copy_to_user(buf, in, sizeof(*in));
  494. case IPC_OLD:
  495. {
  496. struct shmid_ds out;
  497. memset(&out, 0, sizeof(out));
  498. ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
  499. out.shm_segsz = in->shm_segsz;
  500. out.shm_atime = in->shm_atime;
  501. out.shm_dtime = in->shm_dtime;
  502. out.shm_ctime = in->shm_ctime;
  503. out.shm_cpid = in->shm_cpid;
  504. out.shm_lpid = in->shm_lpid;
  505. out.shm_nattch = in->shm_nattch;
  506. return copy_to_user(buf, &out, sizeof(out));
  507. }
  508. default:
  509. return -EINVAL;
  510. }
  511. }
  512. static inline unsigned long
  513. copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version)
  514. {
  515. switch(version) {
  516. case IPC_64:
  517. if (copy_from_user(out, buf, sizeof(*out)))
  518. return -EFAULT;
  519. return 0;
  520. case IPC_OLD:
  521. {
  522. struct shmid_ds tbuf_old;
  523. if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
  524. return -EFAULT;
  525. out->shm_perm.uid = tbuf_old.shm_perm.uid;
  526. out->shm_perm.gid = tbuf_old.shm_perm.gid;
  527. out->shm_perm.mode = tbuf_old.shm_perm.mode;
  528. return 0;
  529. }
  530. default:
  531. return -EINVAL;
  532. }
  533. }
  534. static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
  535. {
  536. switch(version) {
  537. case IPC_64:
  538. return copy_to_user(buf, in, sizeof(*in));
  539. case IPC_OLD:
  540. {
  541. struct shminfo out;
  542. if(in->shmmax > INT_MAX)
  543. out.shmmax = INT_MAX;
  544. else
  545. out.shmmax = (int)in->shmmax;
  546. out.shmmin = in->shmmin;
  547. out.shmmni = in->shmmni;
  548. out.shmseg = in->shmseg;
  549. out.shmall = in->shmall;
  550. return copy_to_user(buf, &out, sizeof(out));
  551. }
  552. default:
  553. return -EINVAL;
  554. }
  555. }
  556. /*
  557. * Calculate and add used RSS and swap pages of a shm.
  558. * Called with shm_ids.rw_mutex held as a reader
  559. */
  560. static void shm_add_rss_swap(struct shmid_kernel *shp,
  561. unsigned long *rss_add, unsigned long *swp_add)
  562. {
  563. struct inode *inode;
  564. inode = shp->shm_file->f_path.dentry->d_inode;
  565. if (is_file_hugepages(shp->shm_file)) {
  566. struct address_space *mapping = inode->i_mapping;
  567. struct hstate *h = hstate_file(shp->shm_file);
  568. *rss_add += pages_per_huge_page(h) * mapping->nrpages;
  569. } else {
  570. #ifdef CONFIG_SHMEM
  571. struct shmem_inode_info *info = SHMEM_I(inode);
  572. spin_lock(&info->lock);
  573. *rss_add += inode->i_mapping->nrpages;
  574. *swp_add += info->swapped;
  575. spin_unlock(&info->lock);
  576. #else
  577. *rss_add += inode->i_mapping->nrpages;
  578. #endif
  579. }
  580. }
  581. /*
  582. * Called with shm_ids.rw_mutex held as a reader
  583. */
  584. static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
  585. unsigned long *swp)
  586. {
  587. int next_id;
  588. int total, in_use;
  589. *rss = 0;
  590. *swp = 0;
  591. in_use = shm_ids(ns).in_use;
  592. for (total = 0, next_id = 0; total < in_use; next_id++) {
  593. struct kern_ipc_perm *ipc;
  594. struct shmid_kernel *shp;
  595. ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id);
  596. if (ipc == NULL)
  597. continue;
  598. shp = container_of(ipc, struct shmid_kernel, shm_perm);
  599. shm_add_rss_swap(shp, rss, swp);
  600. total++;
  601. }
  602. }
  603. /*
  604. * This function handles some shmctl commands which require the rw_mutex
  605. * to be held in write mode.
  606. * NOTE: no locks must be held, the rw_mutex is taken inside this function.
  607. */
  608. static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
  609. struct shmid_ds __user *buf, int version)
  610. {
  611. struct kern_ipc_perm *ipcp;
  612. struct shmid64_ds shmid64;
  613. struct shmid_kernel *shp;
  614. int err;
  615. if (cmd == IPC_SET) {
  616. if (copy_shmid_from_user(&shmid64, buf, version))
  617. return -EFAULT;
  618. }
  619. ipcp = ipcctl_pre_down(ns, &shm_ids(ns), shmid, cmd,
  620. &shmid64.shm_perm, 0);
  621. if (IS_ERR(ipcp))
  622. return PTR_ERR(ipcp);
  623. shp = container_of(ipcp, struct shmid_kernel, shm_perm);
  624. err = security_shm_shmctl(shp, cmd);
  625. if (err)
  626. goto out_unlock;
  627. switch (cmd) {
  628. case IPC_RMID:
  629. do_shm_rmid(ns, ipcp);
  630. goto out_up;
  631. case IPC_SET:
  632. ipc_update_perm(&shmid64.shm_perm, ipcp);
  633. shp->shm_ctim = get_seconds();
  634. break;
  635. default:
  636. err = -EINVAL;
  637. }
  638. out_unlock:
  639. shm_unlock(shp);
  640. out_up:
  641. up_write(&shm_ids(ns).rw_mutex);
  642. return err;
  643. }
  644. SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
  645. {
  646. struct shmid_kernel *shp;
  647. int err, version;
  648. struct ipc_namespace *ns;
  649. if (cmd < 0 || shmid < 0) {
  650. err = -EINVAL;
  651. goto out;
  652. }
  653. version = ipc_parse_version(&cmd);
  654. ns = current->nsproxy->ipc_ns;
  655. switch (cmd) { /* replace with proc interface ? */
  656. case IPC_INFO:
  657. {
  658. struct shminfo64 shminfo;
  659. err = security_shm_shmctl(NULL, cmd);
  660. if (err)
  661. return err;
  662. memset(&shminfo, 0, sizeof(shminfo));
  663. shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni;
  664. shminfo.shmmax = ns->shm_ctlmax;
  665. shminfo.shmall = ns->shm_ctlall;
  666. shminfo.shmmin = SHMMIN;
  667. if(copy_shminfo_to_user (buf, &shminfo, version))
  668. return -EFAULT;
  669. down_read(&shm_ids(ns).rw_mutex);
  670. err = ipc_get_maxid(&shm_ids(ns));
  671. up_read(&shm_ids(ns).rw_mutex);
  672. if(err<0)
  673. err = 0;
  674. goto out;
  675. }
  676. case SHM_INFO:
  677. {
  678. struct shm_info shm_info;
  679. err = security_shm_shmctl(NULL, cmd);
  680. if (err)
  681. return err;
  682. memset(&shm_info, 0, sizeof(shm_info));
  683. down_read(&shm_ids(ns).rw_mutex);
  684. shm_info.used_ids = shm_ids(ns).in_use;
  685. shm_get_stat (ns, &shm_info.shm_rss, &shm_info.shm_swp);
  686. shm_info.shm_tot = ns->shm_tot;
  687. shm_info.swap_attempts = 0;
  688. shm_info.swap_successes = 0;
  689. err = ipc_get_maxid(&shm_ids(ns));
  690. up_read(&shm_ids(ns).rw_mutex);
  691. if (copy_to_user(buf, &shm_info, sizeof(shm_info))) {
  692. err = -EFAULT;
  693. goto out;
  694. }
  695. err = err < 0 ? 0 : err;
  696. goto out;
  697. }
  698. case SHM_STAT:
  699. case IPC_STAT:
  700. {
  701. struct shmid64_ds tbuf;
  702. int result;
  703. if (cmd == SHM_STAT) {
  704. shp = shm_lock(ns, shmid);
  705. if (IS_ERR(shp)) {
  706. err = PTR_ERR(shp);
  707. goto out;
  708. }
  709. result = shp->shm_perm.id;
  710. } else {
  711. shp = shm_lock_check(ns, shmid);
  712. if (IS_ERR(shp)) {
  713. err = PTR_ERR(shp);
  714. goto out;
  715. }
  716. result = 0;
  717. }
  718. err = -EACCES;
  719. if (ipcperms(ns, &shp->shm_perm, S_IRUGO))
  720. goto out_unlock;
  721. err = security_shm_shmctl(shp, cmd);
  722. if (err)
  723. goto out_unlock;
  724. memset(&tbuf, 0, sizeof(tbuf));
  725. kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm);
  726. tbuf.shm_segsz = shp->shm_segsz;
  727. tbuf.shm_atime = shp->shm_atim;
  728. tbuf.shm_dtime = shp->shm_dtim;
  729. tbuf.shm_ctime = shp->shm_ctim;
  730. tbuf.shm_cpid = shp->shm_cprid;
  731. tbuf.shm_lpid = shp->shm_lprid;
  732. tbuf.shm_nattch = shp->shm_nattch;
  733. shm_unlock(shp);
  734. if(copy_shmid_to_user (buf, &tbuf, version))
  735. err = -EFAULT;
  736. else
  737. err = result;
  738. goto out;
  739. }
  740. case SHM_LOCK:
  741. case SHM_UNLOCK:
  742. {
  743. struct file *uninitialized_var(shm_file);
  744. lru_add_drain_all(); /* drain pagevecs to lru lists */
  745. shp = shm_lock_check(ns, shmid);
  746. if (IS_ERR(shp)) {
  747. err = PTR_ERR(shp);
  748. goto out;
  749. }
  750. audit_ipc_obj(&(shp->shm_perm));
  751. if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) {
  752. uid_t euid = current_euid();
  753. err = -EPERM;
  754. if (euid != shp->shm_perm.uid &&
  755. euid != shp->shm_perm.cuid)
  756. goto out_unlock;
  757. if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK))
  758. goto out_unlock;
  759. }
  760. err = security_shm_shmctl(shp, cmd);
  761. if (err)
  762. goto out_unlock;
  763. if(cmd==SHM_LOCK) {
  764. struct user_struct *user = current_user();
  765. if (!is_file_hugepages(shp->shm_file)) {
  766. err = shmem_lock(shp->shm_file, 1, user);
  767. if (!err && !(shp->shm_perm.mode & SHM_LOCKED)){
  768. shp->shm_perm.mode |= SHM_LOCKED;
  769. shp->mlock_user = user;
  770. }
  771. }
  772. } else if (!is_file_hugepages(shp->shm_file)) {
  773. shmem_lock(shp->shm_file, 0, shp->mlock_user);
  774. shp->shm_perm.mode &= ~SHM_LOCKED;
  775. shp->mlock_user = NULL;
  776. }
  777. shm_unlock(shp);
  778. goto out;
  779. }
  780. case IPC_RMID:
  781. case IPC_SET:
  782. err = shmctl_down(ns, shmid, cmd, buf, version);
  783. return err;
  784. default:
  785. return -EINVAL;
  786. }
  787. out_unlock:
  788. shm_unlock(shp);
  789. out:
  790. return err;
  791. }
  792. /*
  793. * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
  794. *
  795. * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
  796. * "raddr" thing points to kernel space, and there has to be a wrapper around
  797. * this.
  798. */
  799. long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
  800. {
  801. struct shmid_kernel *shp;
  802. unsigned long addr;
  803. unsigned long size;
  804. struct file * file;
  805. int err;
  806. unsigned long flags;
  807. unsigned long prot;
  808. int acc_mode;
  809. unsigned long user_addr;
  810. struct ipc_namespace *ns;
  811. struct shm_file_data *sfd;
  812. struct path path;
  813. fmode_t f_mode;
  814. err = -EINVAL;
  815. if (shmid < 0)
  816. goto out;
  817. else if ((addr = (ulong)shmaddr)) {
  818. if (addr & (SHMLBA-1)) {
  819. if (shmflg & SHM_RND)
  820. addr &= ~(SHMLBA-1); /* round down */
  821. else
  822. #ifndef __ARCH_FORCE_SHMLBA
  823. if (addr & ~PAGE_MASK)
  824. #endif
  825. goto out;
  826. }
  827. flags = MAP_SHARED | MAP_FIXED;
  828. } else {
  829. if ((shmflg & SHM_REMAP))
  830. goto out;
  831. flags = MAP_SHARED;
  832. }
  833. if (shmflg & SHM_RDONLY) {
  834. prot = PROT_READ;
  835. acc_mode = S_IRUGO;
  836. f_mode = FMODE_READ;
  837. } else {
  838. prot = PROT_READ | PROT_WRITE;
  839. acc_mode = S_IRUGO | S_IWUGO;
  840. f_mode = FMODE_READ | FMODE_WRITE;
  841. }
  842. if (shmflg & SHM_EXEC) {
  843. prot |= PROT_EXEC;
  844. acc_mode |= S_IXUGO;
  845. }
  846. /*
  847. * We cannot rely on the fs check since SYSV IPC does have an
  848. * additional creator id...
  849. */
  850. ns = current->nsproxy->ipc_ns;
  851. shp = shm_lock_check(ns, shmid);
  852. if (IS_ERR(shp)) {
  853. err = PTR_ERR(shp);
  854. goto out;
  855. }
  856. err = -EACCES;
  857. if (ipcperms(ns, &shp->shm_perm, acc_mode))
  858. goto out_unlock;
  859. err = security_shm_shmat(shp, shmaddr, shmflg);
  860. if (err)
  861. goto out_unlock;
  862. path = shp->shm_file->f_path;
  863. path_get(&path);
  864. shp->shm_nattch++;
  865. size = i_size_read(path.dentry->d_inode);
  866. shm_unlock(shp);
  867. err = -ENOMEM;
  868. sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
  869. if (!sfd)
  870. goto out_put_dentry;
  871. file = alloc_file(&path, f_mode,
  872. is_file_hugepages(shp->shm_file) ?
  873. &shm_file_operations_huge :
  874. &shm_file_operations);
  875. if (!file)
  876. goto out_free;
  877. file->private_data = sfd;
  878. file->f_mapping = shp->shm_file->f_mapping;
  879. sfd->id = shp->shm_perm.id;
  880. sfd->ns = get_ipc_ns(ns);
  881. sfd->file = shp->shm_file;
  882. sfd->vm_ops = NULL;
  883. down_write(&current->mm->mmap_sem);
  884. if (addr && !(shmflg & SHM_REMAP)) {
  885. err = -EINVAL;
  886. if (find_vma_intersection(current->mm, addr, addr + size))
  887. goto invalid;
  888. /*
  889. * If shm segment goes below stack, make sure there is some
  890. * space left for the stack to grow (at least 4 pages).
  891. */
  892. if (addr < current->mm->start_stack &&
  893. addr > current->mm->start_stack - size - PAGE_SIZE * 5)
  894. goto invalid;
  895. }
  896. user_addr = do_mmap (file, addr, size, prot, flags, 0);
  897. *raddr = user_addr;
  898. err = 0;
  899. if (IS_ERR_VALUE(user_addr))
  900. err = (long)user_addr;
  901. invalid:
  902. up_write(&current->mm->mmap_sem);
  903. fput(file);
  904. out_nattch:
  905. down_write(&shm_ids(ns).rw_mutex);
  906. shp = shm_lock(ns, shmid);
  907. BUG_ON(IS_ERR(shp));
  908. shp->shm_nattch--;
  909. if (shm_may_destroy(ns, shp))
  910. shm_destroy(ns, shp);
  911. else
  912. shm_unlock(shp);
  913. up_write(&shm_ids(ns).rw_mutex);
  914. out:
  915. return err;
  916. out_unlock:
  917. shm_unlock(shp);
  918. goto out;
  919. out_free:
  920. kfree(sfd);
  921. out_put_dentry:
  922. path_put(&path);
  923. goto out_nattch;
  924. }
  925. SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg)
  926. {
  927. unsigned long ret;
  928. long err;
  929. err = do_shmat(shmid, shmaddr, shmflg, &ret);
  930. if (err)
  931. return err;
  932. force_successful_syscall_return();
  933. return (long)ret;
  934. }
  935. /*
  936. * detach and kill segment if marked destroyed.
  937. * The work is done in shm_close.
  938. */
  939. SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
  940. {
  941. struct mm_struct *mm = current->mm;
  942. struct vm_area_struct *vma;
  943. unsigned long addr = (unsigned long)shmaddr;
  944. int retval = -EINVAL;
  945. #ifdef CONFIG_MMU
  946. loff_t size = 0;
  947. struct vm_area_struct *next;
  948. #endif
  949. if (addr & ~PAGE_MASK)
  950. return retval;
  951. down_write(&mm->mmap_sem);
  952. /*
  953. * This function tries to be smart and unmap shm segments that
  954. * were modified by partial mlock or munmap calls:
  955. * - It first determines the size of the shm segment that should be
  956. * unmapped: It searches for a vma that is backed by shm and that
  957. * started at address shmaddr. It records it's size and then unmaps
  958. * it.
  959. * - Then it unmaps all shm vmas that started at shmaddr and that
  960. * are within the initially determined size.
  961. * Errors from do_munmap are ignored: the function only fails if
  962. * it's called with invalid parameters or if it's called to unmap
  963. * a part of a vma. Both calls in this function are for full vmas,
  964. * the parameters are directly copied from the vma itself and always
  965. * valid - therefore do_munmap cannot fail. (famous last words?)
  966. */
  967. /*
  968. * If it had been mremap()'d, the starting address would not
  969. * match the usual checks anyway. So assume all vma's are
  970. * above the starting address given.
  971. */
  972. vma = find_vma(mm, addr);
  973. #ifdef CONFIG_MMU
  974. while (vma) {
  975. next = vma->vm_next;
  976. /*
  977. * Check if the starting address would match, i.e. it's
  978. * a fragment created by mprotect() and/or munmap(), or it
  979. * otherwise it starts at this address with no hassles.
  980. */
  981. if ((vma->vm_ops == &shm_vm_ops) &&
  982. (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
  983. size = vma->vm_file->f_path.dentry->d_inode->i_size;
  984. do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
  985. /*
  986. * We discovered the size of the shm segment, so
  987. * break out of here and fall through to the next
  988. * loop that uses the size information to stop
  989. * searching for matching vma's.
  990. */
  991. retval = 0;
  992. vma = next;
  993. break;
  994. }
  995. vma = next;
  996. }
  997. /*
  998. * We need look no further than the maximum address a fragment
  999. * could possibly have landed at. Also cast things to loff_t to
  1000. * prevent overflows and make comparisons vs. equal-width types.
  1001. */
  1002. size = PAGE_ALIGN(size);
  1003. while (vma && (loff_t)(vma->vm_end - addr) <= size) {
  1004. next = vma->vm_next;
  1005. /* finding a matching vma now does not alter retval */
  1006. if ((vma->vm_ops == &shm_vm_ops) &&
  1007. (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff)
  1008. do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
  1009. vma = next;
  1010. }
  1011. #else /* CONFIG_MMU */
  1012. /* under NOMMU conditions, the exact address to be destroyed must be
  1013. * given */
  1014. retval = -EINVAL;
  1015. if (vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
  1016. do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
  1017. retval = 0;
  1018. }
  1019. #endif
  1020. up_write(&mm->mmap_sem);
  1021. return retval;
  1022. }
  1023. #ifdef CONFIG_PROC_FS
  1024. static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
  1025. {
  1026. struct shmid_kernel *shp = it;
  1027. unsigned long rss = 0, swp = 0;
  1028. shm_add_rss_swap(shp, &rss, &swp);
  1029. #if BITS_PER_LONG <= 32
  1030. #define SIZE_SPEC "%10lu"
  1031. #else
  1032. #define SIZE_SPEC "%21lu"
  1033. #endif
  1034. return seq_printf(s,
  1035. "%10d %10d %4o " SIZE_SPEC " %5u %5u "
  1036. "%5lu %5u %5u %5u %5u %10lu %10lu %10lu "
  1037. SIZE_SPEC " " SIZE_SPEC "\n",
  1038. shp->shm_perm.key,
  1039. shp->shm_perm.id,
  1040. shp->shm_perm.mode,
  1041. shp->shm_segsz,
  1042. shp->shm_cprid,
  1043. shp->shm_lprid,
  1044. shp->shm_nattch,
  1045. shp->shm_perm.uid,
  1046. shp->shm_perm.gid,
  1047. shp->shm_perm.cuid,
  1048. shp->shm_perm.cgid,
  1049. shp->shm_atim,
  1050. shp->shm_dtim,
  1051. shp->shm_ctim,
  1052. rss * PAGE_SIZE,
  1053. swp * PAGE_SIZE);
  1054. }
  1055. #endif