shm.c 26 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088
  1. /*
  2. * linux/ipc/shm.c
  3. * Copyright (C) 1992, 1993 Krishna Balasubramanian
  4. * Many improvements/fixes by Bruno Haible.
  5. * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
  6. * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
  7. *
  8. * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
  9. * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
  10. * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
  11. * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
  12. * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
  13. * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
  14. * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
  15. *
  16. * support for audit of ipc object properties and permission changes
  17. * Dustin Kirkland <dustin.kirkland@us.ibm.com>
  18. *
  19. * namespaces support
  20. * OpenVZ, SWsoft Inc.
  21. * Pavel Emelianov <xemul@openvz.org>
  22. */
  23. #include <linux/slab.h>
  24. #include <linux/mm.h>
  25. #include <linux/hugetlb.h>
  26. #include <linux/shm.h>
  27. #include <linux/init.h>
  28. #include <linux/file.h>
  29. #include <linux/mman.h>
  30. #include <linux/shmem_fs.h>
  31. #include <linux/security.h>
  32. #include <linux/syscalls.h>
  33. #include <linux/audit.h>
  34. #include <linux/capability.h>
  35. #include <linux/ptrace.h>
  36. #include <linux/seq_file.h>
  37. #include <linux/rwsem.h>
  38. #include <linux/nsproxy.h>
  39. #include <linux/mount.h>
  40. #include <linux/ipc_namespace.h>
  41. #include <asm/uaccess.h>
  42. #include "util.h"
  43. struct shm_file_data {
  44. int id;
  45. struct ipc_namespace *ns;
  46. struct file *file;
  47. const struct vm_operations_struct *vm_ops;
  48. };
  49. #define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data))
  50. static const struct file_operations shm_file_operations;
  51. static struct vm_operations_struct shm_vm_ops;
  52. #define shm_ids(ns) ((ns)->ids[IPC_SHM_IDS])
  53. #define shm_unlock(shp) \
  54. ipc_unlock(&(shp)->shm_perm)
  55. static int newseg(struct ipc_namespace *, struct ipc_params *);
  56. static void shm_open(struct vm_area_struct *vma);
  57. static void shm_close(struct vm_area_struct *vma);
  58. static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
  59. #ifdef CONFIG_PROC_FS
  60. static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
  61. #endif
  62. void shm_init_ns(struct ipc_namespace *ns)
  63. {
  64. ns->shm_ctlmax = SHMMAX;
  65. ns->shm_ctlall = SHMALL;
  66. ns->shm_ctlmni = SHMMNI;
  67. ns->shm_tot = 0;
  68. ipc_init_ids(&shm_ids(ns));
  69. }
  70. /*
  71. * Called with shm_ids.rw_mutex (writer) and the shp structure locked.
  72. * Only shm_ids.rw_mutex remains locked on exit.
  73. */
  74. static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
  75. {
  76. struct shmid_kernel *shp;
  77. shp = container_of(ipcp, struct shmid_kernel, shm_perm);
  78. if (shp->shm_nattch){
  79. shp->shm_perm.mode |= SHM_DEST;
  80. /* Do not find it any more */
  81. shp->shm_perm.key = IPC_PRIVATE;
  82. shm_unlock(shp);
  83. } else
  84. shm_destroy(ns, shp);
  85. }
  86. #ifdef CONFIG_IPC_NS
  87. void shm_exit_ns(struct ipc_namespace *ns)
  88. {
  89. free_ipcs(ns, &shm_ids(ns), do_shm_rmid);
  90. }
  91. #endif
  92. void __init shm_init (void)
  93. {
  94. shm_init_ns(&init_ipc_ns);
  95. ipc_init_proc_interface("sysvipc/shm",
  96. " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime\n",
  97. IPC_SHM_IDS, sysvipc_shm_proc_show);
  98. }
  99. /*
  100. * shm_lock_(check_) routines are called in the paths where the rw_mutex
  101. * is not necessarily held.
  102. */
  103. static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
  104. {
  105. struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id);
  106. if (IS_ERR(ipcp))
  107. return (struct shmid_kernel *)ipcp;
  108. return container_of(ipcp, struct shmid_kernel, shm_perm);
  109. }
  110. static inline struct shmid_kernel *shm_lock_check(struct ipc_namespace *ns,
  111. int id)
  112. {
  113. struct kern_ipc_perm *ipcp = ipc_lock_check(&shm_ids(ns), id);
  114. if (IS_ERR(ipcp))
  115. return (struct shmid_kernel *)ipcp;
  116. return container_of(ipcp, struct shmid_kernel, shm_perm);
  117. }
  118. static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
  119. {
  120. ipc_rmid(&shm_ids(ns), &s->shm_perm);
  121. }
  122. /* This is called by fork, once for every shm attach. */
  123. static void shm_open(struct vm_area_struct *vma)
  124. {
  125. struct file *file = vma->vm_file;
  126. struct shm_file_data *sfd = shm_file_data(file);
  127. struct shmid_kernel *shp;
  128. shp = shm_lock(sfd->ns, sfd->id);
  129. BUG_ON(IS_ERR(shp));
  130. shp->shm_atim = get_seconds();
  131. shp->shm_lprid = task_tgid_vnr(current);
  132. shp->shm_nattch++;
  133. shm_unlock(shp);
  134. }
  135. /*
  136. * shm_destroy - free the struct shmid_kernel
  137. *
  138. * @ns: namespace
  139. * @shp: struct to free
  140. *
  141. * It has to be called with shp and shm_ids.rw_mutex (writer) locked,
  142. * but returns with shp unlocked and freed.
  143. */
  144. static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
  145. {
  146. ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
  147. shm_rmid(ns, shp);
  148. shm_unlock(shp);
  149. if (!is_file_hugepages(shp->shm_file))
  150. shmem_lock(shp->shm_file, 0, shp->mlock_user);
  151. else
  152. user_shm_unlock(shp->shm_file->f_path.dentry->d_inode->i_size,
  153. shp->mlock_user);
  154. fput (shp->shm_file);
  155. security_shm_free(shp);
  156. ipc_rcu_putref(shp);
  157. }
  158. /*
  159. * remove the attach descriptor vma.
  160. * free memory for segment if it is marked destroyed.
  161. * The descriptor has already been removed from the current->mm->mmap list
  162. * and will later be kfree()d.
  163. */
  164. static void shm_close(struct vm_area_struct *vma)
  165. {
  166. struct file * file = vma->vm_file;
  167. struct shm_file_data *sfd = shm_file_data(file);
  168. struct shmid_kernel *shp;
  169. struct ipc_namespace *ns = sfd->ns;
  170. down_write(&shm_ids(ns).rw_mutex);
  171. /* remove from the list of attaches of the shm segment */
  172. shp = shm_lock(ns, sfd->id);
  173. BUG_ON(IS_ERR(shp));
  174. shp->shm_lprid = task_tgid_vnr(current);
  175. shp->shm_dtim = get_seconds();
  176. shp->shm_nattch--;
  177. if(shp->shm_nattch == 0 &&
  178. shp->shm_perm.mode & SHM_DEST)
  179. shm_destroy(ns, shp);
  180. else
  181. shm_unlock(shp);
  182. up_write(&shm_ids(ns).rw_mutex);
  183. }
  184. static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  185. {
  186. struct file *file = vma->vm_file;
  187. struct shm_file_data *sfd = shm_file_data(file);
  188. return sfd->vm_ops->fault(vma, vmf);
  189. }
  190. #ifdef CONFIG_NUMA
  191. static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
  192. {
  193. struct file *file = vma->vm_file;
  194. struct shm_file_data *sfd = shm_file_data(file);
  195. int err = 0;
  196. if (sfd->vm_ops->set_policy)
  197. err = sfd->vm_ops->set_policy(vma, new);
  198. return err;
  199. }
  200. static struct mempolicy *shm_get_policy(struct vm_area_struct *vma,
  201. unsigned long addr)
  202. {
  203. struct file *file = vma->vm_file;
  204. struct shm_file_data *sfd = shm_file_data(file);
  205. struct mempolicy *pol = NULL;
  206. if (sfd->vm_ops->get_policy)
  207. pol = sfd->vm_ops->get_policy(vma, addr);
  208. else if (vma->vm_policy)
  209. pol = vma->vm_policy;
  210. return pol;
  211. }
  212. #endif
  213. static int shm_mmap(struct file * file, struct vm_area_struct * vma)
  214. {
  215. struct shm_file_data *sfd = shm_file_data(file);
  216. int ret;
  217. ret = sfd->file->f_op->mmap(sfd->file, vma);
  218. if (ret != 0)
  219. return ret;
  220. sfd->vm_ops = vma->vm_ops;
  221. #ifdef CONFIG_MMU
  222. BUG_ON(!sfd->vm_ops->fault);
  223. #endif
  224. vma->vm_ops = &shm_vm_ops;
  225. shm_open(vma);
  226. return ret;
  227. }
  228. static int shm_release(struct inode *ino, struct file *file)
  229. {
  230. struct shm_file_data *sfd = shm_file_data(file);
  231. put_ipc_ns(sfd->ns);
  232. shm_file_data(file) = NULL;
  233. kfree(sfd);
  234. return 0;
  235. }
  236. static int shm_fsync(struct file *file, struct dentry *dentry, int datasync)
  237. {
  238. int (*fsync) (struct file *, struct dentry *, int datasync);
  239. struct shm_file_data *sfd = shm_file_data(file);
  240. int ret = -EINVAL;
  241. fsync = sfd->file->f_op->fsync;
  242. if (fsync)
  243. ret = fsync(sfd->file, sfd->file->f_path.dentry, datasync);
  244. return ret;
  245. }
  246. static unsigned long shm_get_unmapped_area(struct file *file,
  247. unsigned long addr, unsigned long len, unsigned long pgoff,
  248. unsigned long flags)
  249. {
  250. struct shm_file_data *sfd = shm_file_data(file);
  251. return get_unmapped_area(sfd->file, addr, len, pgoff, flags);
  252. }
  253. int is_file_shm_hugepages(struct file *file)
  254. {
  255. int ret = 0;
  256. if (file->f_op == &shm_file_operations) {
  257. struct shm_file_data *sfd;
  258. sfd = shm_file_data(file);
  259. ret = is_file_hugepages(sfd->file);
  260. }
  261. return ret;
  262. }
  263. static const struct file_operations shm_file_operations = {
  264. .mmap = shm_mmap,
  265. .fsync = shm_fsync,
  266. .release = shm_release,
  267. .get_unmapped_area = shm_get_unmapped_area,
  268. };
  269. static struct vm_operations_struct shm_vm_ops = {
  270. .open = shm_open, /* callback for a new vm-area open */
  271. .close = shm_close, /* callback for when the vm-area is released */
  272. .fault = shm_fault,
  273. #if defined(CONFIG_NUMA)
  274. .set_policy = shm_set_policy,
  275. .get_policy = shm_get_policy,
  276. #endif
  277. };
  278. /**
  279. * newseg - Create a new shared memory segment
  280. * @ns: namespace
  281. * @params: ptr to the structure that contains key, size and shmflg
  282. *
  283. * Called with shm_ids.rw_mutex held as a writer.
  284. */
  285. static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
  286. {
  287. key_t key = params->key;
  288. int shmflg = params->flg;
  289. size_t size = params->u.size;
  290. int error;
  291. struct shmid_kernel *shp;
  292. int numpages = (size + PAGE_SIZE -1) >> PAGE_SHIFT;
  293. struct file * file;
  294. char name[13];
  295. int id;
  296. int acctflag = 0;
  297. if (size < SHMMIN || size > ns->shm_ctlmax)
  298. return -EINVAL;
  299. if (ns->shm_tot + numpages > ns->shm_ctlall)
  300. return -ENOSPC;
  301. shp = ipc_rcu_alloc(sizeof(*shp));
  302. if (!shp)
  303. return -ENOMEM;
  304. shp->shm_perm.key = key;
  305. shp->shm_perm.mode = (shmflg & S_IRWXUGO);
  306. shp->mlock_user = NULL;
  307. shp->shm_perm.security = NULL;
  308. error = security_shm_alloc(shp);
  309. if (error) {
  310. ipc_rcu_putref(shp);
  311. return error;
  312. }
  313. sprintf (name, "SYSV%08x", key);
  314. if (shmflg & SHM_HUGETLB) {
  315. /* hugetlb_file_setup applies strict accounting */
  316. if (shmflg & SHM_NORESERVE)
  317. acctflag = VM_NORESERVE;
  318. file = hugetlb_file_setup(name, size, acctflag);
  319. shp->mlock_user = current_user();
  320. } else {
  321. /*
  322. * Do not allow no accounting for OVERCOMMIT_NEVER, even
  323. * if it's asked for.
  324. */
  325. if ((shmflg & SHM_NORESERVE) &&
  326. sysctl_overcommit_memory != OVERCOMMIT_NEVER)
  327. acctflag = VM_NORESERVE;
  328. file = shmem_file_setup(name, size, acctflag);
  329. }
  330. error = PTR_ERR(file);
  331. if (IS_ERR(file))
  332. goto no_file;
  333. id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
  334. if (id < 0) {
  335. error = id;
  336. goto no_id;
  337. }
  338. shp->shm_cprid = task_tgid_vnr(current);
  339. shp->shm_lprid = 0;
  340. shp->shm_atim = shp->shm_dtim = 0;
  341. shp->shm_ctim = get_seconds();
  342. shp->shm_segsz = size;
  343. shp->shm_nattch = 0;
  344. shp->shm_file = file;
  345. /*
  346. * shmid gets reported as "inode#" in /proc/pid/maps.
  347. * proc-ps tools use this. Changing this will break them.
  348. */
  349. file->f_dentry->d_inode->i_ino = shp->shm_perm.id;
  350. ns->shm_tot += numpages;
  351. error = shp->shm_perm.id;
  352. shm_unlock(shp);
  353. return error;
  354. no_id:
  355. fput(file);
  356. no_file:
  357. security_shm_free(shp);
  358. ipc_rcu_putref(shp);
  359. return error;
  360. }
  361. /*
  362. * Called with shm_ids.rw_mutex and ipcp locked.
  363. */
  364. static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg)
  365. {
  366. struct shmid_kernel *shp;
  367. shp = container_of(ipcp, struct shmid_kernel, shm_perm);
  368. return security_shm_associate(shp, shmflg);
  369. }
  370. /*
  371. * Called with shm_ids.rw_mutex and ipcp locked.
  372. */
  373. static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
  374. struct ipc_params *params)
  375. {
  376. struct shmid_kernel *shp;
  377. shp = container_of(ipcp, struct shmid_kernel, shm_perm);
  378. if (shp->shm_segsz < params->u.size)
  379. return -EINVAL;
  380. return 0;
  381. }
  382. SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
  383. {
  384. struct ipc_namespace *ns;
  385. struct ipc_ops shm_ops;
  386. struct ipc_params shm_params;
  387. ns = current->nsproxy->ipc_ns;
  388. shm_ops.getnew = newseg;
  389. shm_ops.associate = shm_security;
  390. shm_ops.more_checks = shm_more_checks;
  391. shm_params.key = key;
  392. shm_params.flg = shmflg;
  393. shm_params.u.size = size;
  394. return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params);
  395. }
  396. static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
  397. {
  398. switch(version) {
  399. case IPC_64:
  400. return copy_to_user(buf, in, sizeof(*in));
  401. case IPC_OLD:
  402. {
  403. struct shmid_ds out;
  404. ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
  405. out.shm_segsz = in->shm_segsz;
  406. out.shm_atime = in->shm_atime;
  407. out.shm_dtime = in->shm_dtime;
  408. out.shm_ctime = in->shm_ctime;
  409. out.shm_cpid = in->shm_cpid;
  410. out.shm_lpid = in->shm_lpid;
  411. out.shm_nattch = in->shm_nattch;
  412. return copy_to_user(buf, &out, sizeof(out));
  413. }
  414. default:
  415. return -EINVAL;
  416. }
  417. }
  418. static inline unsigned long
  419. copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version)
  420. {
  421. switch(version) {
  422. case IPC_64:
  423. if (copy_from_user(out, buf, sizeof(*out)))
  424. return -EFAULT;
  425. return 0;
  426. case IPC_OLD:
  427. {
  428. struct shmid_ds tbuf_old;
  429. if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
  430. return -EFAULT;
  431. out->shm_perm.uid = tbuf_old.shm_perm.uid;
  432. out->shm_perm.gid = tbuf_old.shm_perm.gid;
  433. out->shm_perm.mode = tbuf_old.shm_perm.mode;
  434. return 0;
  435. }
  436. default:
  437. return -EINVAL;
  438. }
  439. }
  440. static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
  441. {
  442. switch(version) {
  443. case IPC_64:
  444. return copy_to_user(buf, in, sizeof(*in));
  445. case IPC_OLD:
  446. {
  447. struct shminfo out;
  448. if(in->shmmax > INT_MAX)
  449. out.shmmax = INT_MAX;
  450. else
  451. out.shmmax = (int)in->shmmax;
  452. out.shmmin = in->shmmin;
  453. out.shmmni = in->shmmni;
  454. out.shmseg = in->shmseg;
  455. out.shmall = in->shmall;
  456. return copy_to_user(buf, &out, sizeof(out));
  457. }
  458. default:
  459. return -EINVAL;
  460. }
  461. }
  462. /*
  463. * Called with shm_ids.rw_mutex held as a reader
  464. */
  465. static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
  466. unsigned long *swp)
  467. {
  468. int next_id;
  469. int total, in_use;
  470. *rss = 0;
  471. *swp = 0;
  472. in_use = shm_ids(ns).in_use;
  473. for (total = 0, next_id = 0; total < in_use; next_id++) {
  474. struct shmid_kernel *shp;
  475. struct inode *inode;
  476. shp = idr_find(&shm_ids(ns).ipcs_idr, next_id);
  477. if (shp == NULL)
  478. continue;
  479. inode = shp->shm_file->f_path.dentry->d_inode;
  480. if (is_file_hugepages(shp->shm_file)) {
  481. struct address_space *mapping = inode->i_mapping;
  482. struct hstate *h = hstate_file(shp->shm_file);
  483. *rss += pages_per_huge_page(h) * mapping->nrpages;
  484. } else {
  485. #ifdef CONFIG_SHMEM
  486. struct shmem_inode_info *info = SHMEM_I(inode);
  487. spin_lock(&info->lock);
  488. *rss += inode->i_mapping->nrpages;
  489. *swp += info->swapped;
  490. spin_unlock(&info->lock);
  491. #else
  492. *rss += inode->i_mapping->nrpages;
  493. #endif
  494. }
  495. total++;
  496. }
  497. }
  498. /*
  499. * This function handles some shmctl commands which require the rw_mutex
  500. * to be held in write mode.
  501. * NOTE: no locks must be held, the rw_mutex is taken inside this function.
  502. */
  503. static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
  504. struct shmid_ds __user *buf, int version)
  505. {
  506. struct kern_ipc_perm *ipcp;
  507. struct shmid64_ds shmid64;
  508. struct shmid_kernel *shp;
  509. int err;
  510. if (cmd == IPC_SET) {
  511. if (copy_shmid_from_user(&shmid64, buf, version))
  512. return -EFAULT;
  513. }
  514. ipcp = ipcctl_pre_down(&shm_ids(ns), shmid, cmd, &shmid64.shm_perm, 0);
  515. if (IS_ERR(ipcp))
  516. return PTR_ERR(ipcp);
  517. shp = container_of(ipcp, struct shmid_kernel, shm_perm);
  518. err = security_shm_shmctl(shp, cmd);
  519. if (err)
  520. goto out_unlock;
  521. switch (cmd) {
  522. case IPC_RMID:
  523. do_shm_rmid(ns, ipcp);
  524. goto out_up;
  525. case IPC_SET:
  526. ipc_update_perm(&shmid64.shm_perm, ipcp);
  527. shp->shm_ctim = get_seconds();
  528. break;
  529. default:
  530. err = -EINVAL;
  531. }
  532. out_unlock:
  533. shm_unlock(shp);
  534. out_up:
  535. up_write(&shm_ids(ns).rw_mutex);
  536. return err;
  537. }
  538. SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
  539. {
  540. struct shmid_kernel *shp;
  541. int err, version;
  542. struct ipc_namespace *ns;
  543. if (cmd < 0 || shmid < 0) {
  544. err = -EINVAL;
  545. goto out;
  546. }
  547. version = ipc_parse_version(&cmd);
  548. ns = current->nsproxy->ipc_ns;
  549. switch (cmd) { /* replace with proc interface ? */
  550. case IPC_INFO:
  551. {
  552. struct shminfo64 shminfo;
  553. err = security_shm_shmctl(NULL, cmd);
  554. if (err)
  555. return err;
  556. memset(&shminfo, 0, sizeof(shminfo));
  557. shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni;
  558. shminfo.shmmax = ns->shm_ctlmax;
  559. shminfo.shmall = ns->shm_ctlall;
  560. shminfo.shmmin = SHMMIN;
  561. if(copy_shminfo_to_user (buf, &shminfo, version))
  562. return -EFAULT;
  563. down_read(&shm_ids(ns).rw_mutex);
  564. err = ipc_get_maxid(&shm_ids(ns));
  565. up_read(&shm_ids(ns).rw_mutex);
  566. if(err<0)
  567. err = 0;
  568. goto out;
  569. }
  570. case SHM_INFO:
  571. {
  572. struct shm_info shm_info;
  573. err = security_shm_shmctl(NULL, cmd);
  574. if (err)
  575. return err;
  576. memset(&shm_info, 0, sizeof(shm_info));
  577. down_read(&shm_ids(ns).rw_mutex);
  578. shm_info.used_ids = shm_ids(ns).in_use;
  579. shm_get_stat (ns, &shm_info.shm_rss, &shm_info.shm_swp);
  580. shm_info.shm_tot = ns->shm_tot;
  581. shm_info.swap_attempts = 0;
  582. shm_info.swap_successes = 0;
  583. err = ipc_get_maxid(&shm_ids(ns));
  584. up_read(&shm_ids(ns).rw_mutex);
  585. if (copy_to_user(buf, &shm_info, sizeof(shm_info))) {
  586. err = -EFAULT;
  587. goto out;
  588. }
  589. err = err < 0 ? 0 : err;
  590. goto out;
  591. }
  592. case SHM_STAT:
  593. case IPC_STAT:
  594. {
  595. struct shmid64_ds tbuf;
  596. int result;
  597. if (cmd == SHM_STAT) {
  598. shp = shm_lock(ns, shmid);
  599. if (IS_ERR(shp)) {
  600. err = PTR_ERR(shp);
  601. goto out;
  602. }
  603. result = shp->shm_perm.id;
  604. } else {
  605. shp = shm_lock_check(ns, shmid);
  606. if (IS_ERR(shp)) {
  607. err = PTR_ERR(shp);
  608. goto out;
  609. }
  610. result = 0;
  611. }
  612. err = -EACCES;
  613. if (ipcperms (&shp->shm_perm, S_IRUGO))
  614. goto out_unlock;
  615. err = security_shm_shmctl(shp, cmd);
  616. if (err)
  617. goto out_unlock;
  618. memset(&tbuf, 0, sizeof(tbuf));
  619. kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm);
  620. tbuf.shm_segsz = shp->shm_segsz;
  621. tbuf.shm_atime = shp->shm_atim;
  622. tbuf.shm_dtime = shp->shm_dtim;
  623. tbuf.shm_ctime = shp->shm_ctim;
  624. tbuf.shm_cpid = shp->shm_cprid;
  625. tbuf.shm_lpid = shp->shm_lprid;
  626. tbuf.shm_nattch = shp->shm_nattch;
  627. shm_unlock(shp);
  628. if(copy_shmid_to_user (buf, &tbuf, version))
  629. err = -EFAULT;
  630. else
  631. err = result;
  632. goto out;
  633. }
  634. case SHM_LOCK:
  635. case SHM_UNLOCK:
  636. {
  637. struct file *uninitialized_var(shm_file);
  638. lru_add_drain_all(); /* drain pagevecs to lru lists */
  639. shp = shm_lock_check(ns, shmid);
  640. if (IS_ERR(shp)) {
  641. err = PTR_ERR(shp);
  642. goto out;
  643. }
  644. audit_ipc_obj(&(shp->shm_perm));
  645. if (!capable(CAP_IPC_LOCK)) {
  646. uid_t euid = current_euid();
  647. err = -EPERM;
  648. if (euid != shp->shm_perm.uid &&
  649. euid != shp->shm_perm.cuid)
  650. goto out_unlock;
  651. if (cmd == SHM_LOCK &&
  652. !current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur)
  653. goto out_unlock;
  654. }
  655. err = security_shm_shmctl(shp, cmd);
  656. if (err)
  657. goto out_unlock;
  658. if(cmd==SHM_LOCK) {
  659. struct user_struct *user = current_user();
  660. if (!is_file_hugepages(shp->shm_file)) {
  661. err = shmem_lock(shp->shm_file, 1, user);
  662. if (!err && !(shp->shm_perm.mode & SHM_LOCKED)){
  663. shp->shm_perm.mode |= SHM_LOCKED;
  664. shp->mlock_user = user;
  665. }
  666. }
  667. } else if (!is_file_hugepages(shp->shm_file)) {
  668. shmem_lock(shp->shm_file, 0, shp->mlock_user);
  669. shp->shm_perm.mode &= ~SHM_LOCKED;
  670. shp->mlock_user = NULL;
  671. }
  672. shm_unlock(shp);
  673. goto out;
  674. }
  675. case IPC_RMID:
  676. case IPC_SET:
  677. err = shmctl_down(ns, shmid, cmd, buf, version);
  678. return err;
  679. default:
  680. return -EINVAL;
  681. }
  682. out_unlock:
  683. shm_unlock(shp);
  684. out:
  685. return err;
  686. }
  687. /*
  688. * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
  689. *
  690. * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
  691. * "raddr" thing points to kernel space, and there has to be a wrapper around
  692. * this.
  693. */
  694. long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
  695. {
  696. struct shmid_kernel *shp;
  697. unsigned long addr;
  698. unsigned long size;
  699. struct file * file;
  700. int err;
  701. unsigned long flags;
  702. unsigned long prot;
  703. int acc_mode;
  704. unsigned long user_addr;
  705. struct ipc_namespace *ns;
  706. struct shm_file_data *sfd;
  707. struct path path;
  708. fmode_t f_mode;
  709. err = -EINVAL;
  710. if (shmid < 0)
  711. goto out;
  712. else if ((addr = (ulong)shmaddr)) {
  713. if (addr & (SHMLBA-1)) {
  714. if (shmflg & SHM_RND)
  715. addr &= ~(SHMLBA-1); /* round down */
  716. else
  717. #ifndef __ARCH_FORCE_SHMLBA
  718. if (addr & ~PAGE_MASK)
  719. #endif
  720. goto out;
  721. }
  722. flags = MAP_SHARED | MAP_FIXED;
  723. } else {
  724. if ((shmflg & SHM_REMAP))
  725. goto out;
  726. flags = MAP_SHARED;
  727. }
  728. if (shmflg & SHM_RDONLY) {
  729. prot = PROT_READ;
  730. acc_mode = S_IRUGO;
  731. f_mode = FMODE_READ;
  732. } else {
  733. prot = PROT_READ | PROT_WRITE;
  734. acc_mode = S_IRUGO | S_IWUGO;
  735. f_mode = FMODE_READ | FMODE_WRITE;
  736. }
  737. if (shmflg & SHM_EXEC) {
  738. prot |= PROT_EXEC;
  739. acc_mode |= S_IXUGO;
  740. }
  741. /*
  742. * We cannot rely on the fs check since SYSV IPC does have an
  743. * additional creator id...
  744. */
  745. ns = current->nsproxy->ipc_ns;
  746. shp = shm_lock_check(ns, shmid);
  747. if (IS_ERR(shp)) {
  748. err = PTR_ERR(shp);
  749. goto out;
  750. }
  751. err = -EACCES;
  752. if (ipcperms(&shp->shm_perm, acc_mode))
  753. goto out_unlock;
  754. err = security_shm_shmat(shp, shmaddr, shmflg);
  755. if (err)
  756. goto out_unlock;
  757. path.dentry = dget(shp->shm_file->f_path.dentry);
  758. path.mnt = shp->shm_file->f_path.mnt;
  759. shp->shm_nattch++;
  760. size = i_size_read(path.dentry->d_inode);
  761. shm_unlock(shp);
  762. err = -ENOMEM;
  763. sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
  764. if (!sfd)
  765. goto out_put_dentry;
  766. file = alloc_file(path.mnt, path.dentry, f_mode, &shm_file_operations);
  767. if (!file)
  768. goto out_free;
  769. file->private_data = sfd;
  770. file->f_mapping = shp->shm_file->f_mapping;
  771. sfd->id = shp->shm_perm.id;
  772. sfd->ns = get_ipc_ns(ns);
  773. sfd->file = shp->shm_file;
  774. sfd->vm_ops = NULL;
  775. down_write(&current->mm->mmap_sem);
  776. if (addr && !(shmflg & SHM_REMAP)) {
  777. err = -EINVAL;
  778. if (find_vma_intersection(current->mm, addr, addr + size))
  779. goto invalid;
  780. /*
  781. * If shm segment goes below stack, make sure there is some
  782. * space left for the stack to grow (at least 4 pages).
  783. */
  784. if (addr < current->mm->start_stack &&
  785. addr > current->mm->start_stack - size - PAGE_SIZE * 5)
  786. goto invalid;
  787. }
  788. user_addr = do_mmap (file, addr, size, prot, flags, 0);
  789. *raddr = user_addr;
  790. err = 0;
  791. if (IS_ERR_VALUE(user_addr))
  792. err = (long)user_addr;
  793. invalid:
  794. up_write(&current->mm->mmap_sem);
  795. fput(file);
  796. out_nattch:
  797. down_write(&shm_ids(ns).rw_mutex);
  798. shp = shm_lock(ns, shmid);
  799. BUG_ON(IS_ERR(shp));
  800. shp->shm_nattch--;
  801. if(shp->shm_nattch == 0 &&
  802. shp->shm_perm.mode & SHM_DEST)
  803. shm_destroy(ns, shp);
  804. else
  805. shm_unlock(shp);
  806. up_write(&shm_ids(ns).rw_mutex);
  807. out:
  808. return err;
  809. out_unlock:
  810. shm_unlock(shp);
  811. goto out;
  812. out_free:
  813. kfree(sfd);
  814. out_put_dentry:
  815. dput(path.dentry);
  816. goto out_nattch;
  817. }
  818. SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg)
  819. {
  820. unsigned long ret;
  821. long err;
  822. err = do_shmat(shmid, shmaddr, shmflg, &ret);
  823. if (err)
  824. return err;
  825. force_successful_syscall_return();
  826. return (long)ret;
  827. }
  828. /*
  829. * detach and kill segment if marked destroyed.
  830. * The work is done in shm_close.
  831. */
  832. SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
  833. {
  834. struct mm_struct *mm = current->mm;
  835. struct vm_area_struct *vma, *next;
  836. unsigned long addr = (unsigned long)shmaddr;
  837. loff_t size = 0;
  838. int retval = -EINVAL;
  839. if (addr & ~PAGE_MASK)
  840. return retval;
  841. down_write(&mm->mmap_sem);
  842. /*
  843. * This function tries to be smart and unmap shm segments that
  844. * were modified by partial mlock or munmap calls:
  845. * - It first determines the size of the shm segment that should be
  846. * unmapped: It searches for a vma that is backed by shm and that
  847. * started at address shmaddr. It records it's size and then unmaps
  848. * it.
  849. * - Then it unmaps all shm vmas that started at shmaddr and that
  850. * are within the initially determined size.
  851. * Errors from do_munmap are ignored: the function only fails if
  852. * it's called with invalid parameters or if it's called to unmap
  853. * a part of a vma. Both calls in this function are for full vmas,
  854. * the parameters are directly copied from the vma itself and always
  855. * valid - therefore do_munmap cannot fail. (famous last words?)
  856. */
  857. /*
  858. * If it had been mremap()'d, the starting address would not
  859. * match the usual checks anyway. So assume all vma's are
  860. * above the starting address given.
  861. */
  862. vma = find_vma(mm, addr);
  863. #ifdef CONFIG_MMU
  864. while (vma) {
  865. next = vma->vm_next;
  866. /*
  867. * Check if the starting address would match, i.e. it's
  868. * a fragment created by mprotect() and/or munmap(), or it
  869. * otherwise it starts at this address with no hassles.
  870. */
  871. if ((vma->vm_ops == &shm_vm_ops) &&
  872. (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
  873. size = vma->vm_file->f_path.dentry->d_inode->i_size;
  874. do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
  875. /*
  876. * We discovered the size of the shm segment, so
  877. * break out of here and fall through to the next
  878. * loop that uses the size information to stop
  879. * searching for matching vma's.
  880. */
  881. retval = 0;
  882. vma = next;
  883. break;
  884. }
  885. vma = next;
  886. }
  887. /*
  888. * We need look no further than the maximum address a fragment
  889. * could possibly have landed at. Also cast things to loff_t to
  890. * prevent overflows and make comparisions vs. equal-width types.
  891. */
  892. size = PAGE_ALIGN(size);
  893. while (vma && (loff_t)(vma->vm_end - addr) <= size) {
  894. next = vma->vm_next;
  895. /* finding a matching vma now does not alter retval */
  896. if ((vma->vm_ops == &shm_vm_ops) &&
  897. (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff)
  898. do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
  899. vma = next;
  900. }
  901. #else /* CONFIG_MMU */
  902. /* under NOMMU conditions, the exact address to be destroyed must be
  903. * given */
  904. retval = -EINVAL;
  905. if (vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
  906. do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
  907. retval = 0;
  908. }
  909. #endif
  910. up_write(&mm->mmap_sem);
  911. return retval;
  912. }
  913. #ifdef CONFIG_PROC_FS
  914. static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
  915. {
  916. struct shmid_kernel *shp = it;
  917. #if BITS_PER_LONG <= 32
  918. #define SIZE_SPEC "%10lu"
  919. #else
  920. #define SIZE_SPEC "%21lu"
  921. #endif
  922. return seq_printf(s,
  923. "%10d %10d %4o " SIZE_SPEC " %5u %5u "
  924. "%5lu %5u %5u %5u %5u %10lu %10lu %10lu\n",
  925. shp->shm_perm.key,
  926. shp->shm_perm.id,
  927. shp->shm_perm.mode,
  928. shp->shm_segsz,
  929. shp->shm_cprid,
  930. shp->shm_lprid,
  931. shp->shm_nattch,
  932. shp->shm_perm.uid,
  933. shp->shm_perm.gid,
  934. shp->shm_perm.cuid,
  935. shp->shm_perm.cgid,
  936. shp->shm_atim,
  937. shp->shm_dtim,
  938. shp->shm_ctim);
  939. }
  940. #endif