shm.c 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162
  1. /*
  2. * linux/ipc/shm.c
  3. * Copyright (C) 1992, 1993 Krishna Balasubramanian
  4. * Many improvements/fixes by Bruno Haible.
  5. * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
  6. * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
  7. *
  8. * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
  9. * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
  10. * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
  11. * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
  12. * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
  13. * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
  14. * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
  15. *
  16. * support for audit of ipc object properties and permission changes
  17. * Dustin Kirkland <dustin.kirkland@us.ibm.com>
  18. *
  19. * namespaces support
  20. * OpenVZ, SWsoft Inc.
  21. * Pavel Emelianov <xemul@openvz.org>
  22. */
  23. #include <linux/slab.h>
  24. #include <linux/mm.h>
  25. #include <linux/hugetlb.h>
  26. #include <linux/shm.h>
  27. #include <linux/init.h>
  28. #include <linux/file.h>
  29. #include <linux/mman.h>
  30. #include <linux/shmem_fs.h>
  31. #include <linux/security.h>
  32. #include <linux/syscalls.h>
  33. #include <linux/audit.h>
  34. #include <linux/capability.h>
  35. #include <linux/ptrace.h>
  36. #include <linux/seq_file.h>
  37. #include <linux/rwsem.h>
  38. #include <linux/nsproxy.h>
  39. #include <linux/mount.h>
  40. #include <linux/ipc_namespace.h>
  41. #include <asm/uaccess.h>
  42. #include "util.h"
  43. struct shm_file_data {
  44. int id;
  45. struct ipc_namespace *ns;
  46. struct file *file;
  47. const struct vm_operations_struct *vm_ops;
  48. };
  49. #define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data))
  50. static const struct file_operations shm_file_operations;
  51. static struct vm_operations_struct shm_vm_ops;
  52. #define shm_ids(ns) ((ns)->ids[IPC_SHM_IDS])
  53. #define shm_unlock(shp) \
  54. ipc_unlock(&(shp)->shm_perm)
  55. #define shm_buildid(id, seq) ipc_buildid(id, seq)
  56. static int newseg(struct ipc_namespace *, struct ipc_params *);
  57. static void shm_open(struct vm_area_struct *vma);
  58. static void shm_close(struct vm_area_struct *vma);
  59. static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
  60. #ifdef CONFIG_PROC_FS
  61. static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
  62. #endif
  63. void shm_init_ns(struct ipc_namespace *ns)
  64. {
  65. ns->shm_ctlmax = SHMMAX;
  66. ns->shm_ctlall = SHMALL;
  67. ns->shm_ctlmni = SHMMNI;
  68. ns->shm_tot = 0;
  69. ipc_init_ids(&ns->ids[IPC_SHM_IDS]);
  70. }
  71. /*
  72. * Called with shm_ids.rw_mutex (writer) and the shp structure locked.
  73. * Only shm_ids.rw_mutex remains locked on exit.
  74. */
  75. static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
  76. {
  77. struct shmid_kernel *shp;
  78. shp = container_of(ipcp, struct shmid_kernel, shm_perm);
  79. if (shp->shm_nattch){
  80. shp->shm_perm.mode |= SHM_DEST;
  81. /* Do not find it any more */
  82. shp->shm_perm.key = IPC_PRIVATE;
  83. shm_unlock(shp);
  84. } else
  85. shm_destroy(ns, shp);
  86. }
  87. #ifdef CONFIG_IPC_NS
  88. void shm_exit_ns(struct ipc_namespace *ns)
  89. {
  90. free_ipcs(ns, &shm_ids(ns), do_shm_rmid);
  91. }
  92. #endif
  93. void __init shm_init (void)
  94. {
  95. shm_init_ns(&init_ipc_ns);
  96. ipc_init_proc_interface("sysvipc/shm",
  97. " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime\n",
  98. IPC_SHM_IDS, sysvipc_shm_proc_show);
  99. }
  100. /*
  101. * shm_lock_(check_)down routines are called in the paths where the rw_mutex
  102. * is held to protect access to the idr tree.
  103. */
  104. static inline struct shmid_kernel *shm_lock_down(struct ipc_namespace *ns,
  105. int id)
  106. {
  107. struct kern_ipc_perm *ipcp = ipc_lock_down(&shm_ids(ns), id);
  108. if (IS_ERR(ipcp))
  109. return (struct shmid_kernel *)ipcp;
  110. return container_of(ipcp, struct shmid_kernel, shm_perm);
  111. }
  112. static inline struct shmid_kernel *shm_lock_check_down(
  113. struct ipc_namespace *ns,
  114. int id)
  115. {
  116. struct kern_ipc_perm *ipcp = ipc_lock_check_down(&shm_ids(ns), id);
  117. if (IS_ERR(ipcp))
  118. return (struct shmid_kernel *)ipcp;
  119. return container_of(ipcp, struct shmid_kernel, shm_perm);
  120. }
  121. /*
  122. * shm_lock_(check_) routines are called in the paths where the rw_mutex
  123. * is not held.
  124. */
  125. static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
  126. {
  127. struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id);
  128. if (IS_ERR(ipcp))
  129. return (struct shmid_kernel *)ipcp;
  130. return container_of(ipcp, struct shmid_kernel, shm_perm);
  131. }
  132. static inline struct shmid_kernel *shm_lock_check(struct ipc_namespace *ns,
  133. int id)
  134. {
  135. struct kern_ipc_perm *ipcp = ipc_lock_check(&shm_ids(ns), id);
  136. if (IS_ERR(ipcp))
  137. return (struct shmid_kernel *)ipcp;
  138. return container_of(ipcp, struct shmid_kernel, shm_perm);
  139. }
  140. static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
  141. {
  142. ipc_rmid(&shm_ids(ns), &s->shm_perm);
  143. }
  144. static inline int shm_addid(struct ipc_namespace *ns, struct shmid_kernel *shp)
  145. {
  146. return ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
  147. }
  148. /* This is called by fork, once for every shm attach. */
  149. static void shm_open(struct vm_area_struct *vma)
  150. {
  151. struct file *file = vma->vm_file;
  152. struct shm_file_data *sfd = shm_file_data(file);
  153. struct shmid_kernel *shp;
  154. shp = shm_lock(sfd->ns, sfd->id);
  155. BUG_ON(IS_ERR(shp));
  156. shp->shm_atim = get_seconds();
  157. shp->shm_lprid = task_tgid_vnr(current);
  158. shp->shm_nattch++;
  159. shm_unlock(shp);
  160. }
  161. /*
  162. * shm_destroy - free the struct shmid_kernel
  163. *
  164. * @ns: namespace
  165. * @shp: struct to free
  166. *
  167. * It has to be called with shp and shm_ids.rw_mutex (writer) locked,
  168. * but returns with shp unlocked and freed.
  169. */
  170. static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
  171. {
  172. ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
  173. shm_rmid(ns, shp);
  174. shm_unlock(shp);
  175. if (!is_file_hugepages(shp->shm_file))
  176. shmem_lock(shp->shm_file, 0, shp->mlock_user);
  177. else
  178. user_shm_unlock(shp->shm_file->f_path.dentry->d_inode->i_size,
  179. shp->mlock_user);
  180. fput (shp->shm_file);
  181. security_shm_free(shp);
  182. ipc_rcu_putref(shp);
  183. }
  184. /*
  185. * remove the attach descriptor vma.
  186. * free memory for segment if it is marked destroyed.
  187. * The descriptor has already been removed from the current->mm->mmap list
  188. * and will later be kfree()d.
  189. */
  190. static void shm_close(struct vm_area_struct *vma)
  191. {
  192. struct file * file = vma->vm_file;
  193. struct shm_file_data *sfd = shm_file_data(file);
  194. struct shmid_kernel *shp;
  195. struct ipc_namespace *ns = sfd->ns;
  196. down_write(&shm_ids(ns).rw_mutex);
  197. /* remove from the list of attaches of the shm segment */
  198. shp = shm_lock_down(ns, sfd->id);
  199. BUG_ON(IS_ERR(shp));
  200. shp->shm_lprid = task_tgid_vnr(current);
  201. shp->shm_dtim = get_seconds();
  202. shp->shm_nattch--;
  203. if(shp->shm_nattch == 0 &&
  204. shp->shm_perm.mode & SHM_DEST)
  205. shm_destroy(ns, shp);
  206. else
  207. shm_unlock(shp);
  208. up_write(&shm_ids(ns).rw_mutex);
  209. }
  210. static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  211. {
  212. struct file *file = vma->vm_file;
  213. struct shm_file_data *sfd = shm_file_data(file);
  214. return sfd->vm_ops->fault(vma, vmf);
  215. }
  216. #ifdef CONFIG_NUMA
  217. static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
  218. {
  219. struct file *file = vma->vm_file;
  220. struct shm_file_data *sfd = shm_file_data(file);
  221. int err = 0;
  222. if (sfd->vm_ops->set_policy)
  223. err = sfd->vm_ops->set_policy(vma, new);
  224. return err;
  225. }
  226. static struct mempolicy *shm_get_policy(struct vm_area_struct *vma,
  227. unsigned long addr)
  228. {
  229. struct file *file = vma->vm_file;
  230. struct shm_file_data *sfd = shm_file_data(file);
  231. struct mempolicy *pol = NULL;
  232. if (sfd->vm_ops->get_policy)
  233. pol = sfd->vm_ops->get_policy(vma, addr);
  234. else if (vma->vm_policy)
  235. pol = vma->vm_policy;
  236. return pol;
  237. }
  238. #endif
  239. static int shm_mmap(struct file * file, struct vm_area_struct * vma)
  240. {
  241. struct shm_file_data *sfd = shm_file_data(file);
  242. int ret;
  243. ret = sfd->file->f_op->mmap(sfd->file, vma);
  244. if (ret != 0)
  245. return ret;
  246. sfd->vm_ops = vma->vm_ops;
  247. #ifdef CONFIG_MMU
  248. BUG_ON(!sfd->vm_ops->fault);
  249. #endif
  250. vma->vm_ops = &shm_vm_ops;
  251. shm_open(vma);
  252. return ret;
  253. }
  254. static int shm_release(struct inode *ino, struct file *file)
  255. {
  256. struct shm_file_data *sfd = shm_file_data(file);
  257. put_ipc_ns(sfd->ns);
  258. shm_file_data(file) = NULL;
  259. kfree(sfd);
  260. return 0;
  261. }
  262. static int shm_fsync(struct file *file, struct dentry *dentry, int datasync)
  263. {
  264. int (*fsync) (struct file *, struct dentry *, int datasync);
  265. struct shm_file_data *sfd = shm_file_data(file);
  266. int ret = -EINVAL;
  267. fsync = sfd->file->f_op->fsync;
  268. if (fsync)
  269. ret = fsync(sfd->file, sfd->file->f_path.dentry, datasync);
  270. return ret;
  271. }
  272. static unsigned long shm_get_unmapped_area(struct file *file,
  273. unsigned long addr, unsigned long len, unsigned long pgoff,
  274. unsigned long flags)
  275. {
  276. struct shm_file_data *sfd = shm_file_data(file);
  277. return get_unmapped_area(sfd->file, addr, len, pgoff, flags);
  278. }
  279. int is_file_shm_hugepages(struct file *file)
  280. {
  281. int ret = 0;
  282. if (file->f_op == &shm_file_operations) {
  283. struct shm_file_data *sfd;
  284. sfd = shm_file_data(file);
  285. ret = is_file_hugepages(sfd->file);
  286. }
  287. return ret;
  288. }
  289. static const struct file_operations shm_file_operations = {
  290. .mmap = shm_mmap,
  291. .fsync = shm_fsync,
  292. .release = shm_release,
  293. .get_unmapped_area = shm_get_unmapped_area,
  294. };
  295. static struct vm_operations_struct shm_vm_ops = {
  296. .open = shm_open, /* callback for a new vm-area open */
  297. .close = shm_close, /* callback for when the vm-area is released */
  298. .fault = shm_fault,
  299. #if defined(CONFIG_NUMA)
  300. .set_policy = shm_set_policy,
  301. .get_policy = shm_get_policy,
  302. #endif
  303. };
  304. /**
  305. * newseg - Create a new shared memory segment
  306. * @ns: namespace
  307. * @params: ptr to the structure that contains key, size and shmflg
  308. *
  309. * Called with shm_ids.rw_mutex held as a writer.
  310. */
  311. static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
  312. {
  313. key_t key = params->key;
  314. int shmflg = params->flg;
  315. size_t size = params->u.size;
  316. int error;
  317. struct shmid_kernel *shp;
  318. int numpages = (size + PAGE_SIZE -1) >> PAGE_SHIFT;
  319. struct file * file;
  320. char name[13];
  321. int id;
  322. if (size < SHMMIN || size > ns->shm_ctlmax)
  323. return -EINVAL;
  324. if (ns->shm_tot + numpages > ns->shm_ctlall)
  325. return -ENOSPC;
  326. shp = ipc_rcu_alloc(sizeof(*shp));
  327. if (!shp)
  328. return -ENOMEM;
  329. shp->shm_perm.key = key;
  330. shp->shm_perm.mode = (shmflg & S_IRWXUGO);
  331. shp->mlock_user = NULL;
  332. shp->shm_perm.security = NULL;
  333. error = security_shm_alloc(shp);
  334. if (error) {
  335. ipc_rcu_putref(shp);
  336. return error;
  337. }
  338. sprintf (name, "SYSV%08x", key);
  339. if (shmflg & SHM_HUGETLB) {
  340. /* hugetlb_file_setup takes care of mlock user accounting */
  341. file = hugetlb_file_setup(name, size);
  342. shp->mlock_user = current->user;
  343. } else {
  344. int acctflag = VM_ACCOUNT;
  345. /*
  346. * Do not allow no accounting for OVERCOMMIT_NEVER, even
  347. * if it's asked for.
  348. */
  349. if ((shmflg & SHM_NORESERVE) &&
  350. sysctl_overcommit_memory != OVERCOMMIT_NEVER)
  351. acctflag = 0;
  352. file = shmem_file_setup(name, size, acctflag);
  353. }
  354. error = PTR_ERR(file);
  355. if (IS_ERR(file))
  356. goto no_file;
  357. id = shm_addid(ns, shp);
  358. if (id < 0) {
  359. error = id;
  360. goto no_id;
  361. }
  362. shp->shm_cprid = task_tgid_vnr(current);
  363. shp->shm_lprid = 0;
  364. shp->shm_atim = shp->shm_dtim = 0;
  365. shp->shm_ctim = get_seconds();
  366. shp->shm_segsz = size;
  367. shp->shm_nattch = 0;
  368. shp->shm_perm.id = shm_buildid(id, shp->shm_perm.seq);
  369. shp->shm_file = file;
  370. /*
  371. * shmid gets reported as "inode#" in /proc/pid/maps.
  372. * proc-ps tools use this. Changing this will break them.
  373. */
  374. file->f_dentry->d_inode->i_ino = shp->shm_perm.id;
  375. ns->shm_tot += numpages;
  376. error = shp->shm_perm.id;
  377. shm_unlock(shp);
  378. return error;
  379. no_id:
  380. fput(file);
  381. no_file:
  382. security_shm_free(shp);
  383. ipc_rcu_putref(shp);
  384. return error;
  385. }
  386. /*
  387. * Called with shm_ids.rw_mutex and ipcp locked.
  388. */
  389. static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg)
  390. {
  391. struct shmid_kernel *shp;
  392. shp = container_of(ipcp, struct shmid_kernel, shm_perm);
  393. return security_shm_associate(shp, shmflg);
  394. }
  395. /*
  396. * Called with shm_ids.rw_mutex and ipcp locked.
  397. */
  398. static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
  399. struct ipc_params *params)
  400. {
  401. struct shmid_kernel *shp;
  402. shp = container_of(ipcp, struct shmid_kernel, shm_perm);
  403. if (shp->shm_segsz < params->u.size)
  404. return -EINVAL;
  405. return 0;
  406. }
  407. asmlinkage long sys_shmget (key_t key, size_t size, int shmflg)
  408. {
  409. struct ipc_namespace *ns;
  410. struct ipc_ops shm_ops;
  411. struct ipc_params shm_params;
  412. ns = current->nsproxy->ipc_ns;
  413. shm_ops.getnew = newseg;
  414. shm_ops.associate = shm_security;
  415. shm_ops.more_checks = shm_more_checks;
  416. shm_params.key = key;
  417. shm_params.flg = shmflg;
  418. shm_params.u.size = size;
  419. return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params);
  420. }
  421. static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
  422. {
  423. switch(version) {
  424. case IPC_64:
  425. return copy_to_user(buf, in, sizeof(*in));
  426. case IPC_OLD:
  427. {
  428. struct shmid_ds out;
  429. ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
  430. out.shm_segsz = in->shm_segsz;
  431. out.shm_atime = in->shm_atime;
  432. out.shm_dtime = in->shm_dtime;
  433. out.shm_ctime = in->shm_ctime;
  434. out.shm_cpid = in->shm_cpid;
  435. out.shm_lpid = in->shm_lpid;
  436. out.shm_nattch = in->shm_nattch;
  437. return copy_to_user(buf, &out, sizeof(out));
  438. }
  439. default:
  440. return -EINVAL;
  441. }
  442. }
  443. struct shm_setbuf {
  444. uid_t uid;
  445. gid_t gid;
  446. mode_t mode;
  447. };
  448. static inline unsigned long copy_shmid_from_user(struct shm_setbuf *out, void __user *buf, int version)
  449. {
  450. switch(version) {
  451. case IPC_64:
  452. {
  453. struct shmid64_ds tbuf;
  454. if (copy_from_user(&tbuf, buf, sizeof(tbuf)))
  455. return -EFAULT;
  456. out->uid = tbuf.shm_perm.uid;
  457. out->gid = tbuf.shm_perm.gid;
  458. out->mode = tbuf.shm_perm.mode;
  459. return 0;
  460. }
  461. case IPC_OLD:
  462. {
  463. struct shmid_ds tbuf_old;
  464. if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
  465. return -EFAULT;
  466. out->uid = tbuf_old.shm_perm.uid;
  467. out->gid = tbuf_old.shm_perm.gid;
  468. out->mode = tbuf_old.shm_perm.mode;
  469. return 0;
  470. }
  471. default:
  472. return -EINVAL;
  473. }
  474. }
  475. static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
  476. {
  477. switch(version) {
  478. case IPC_64:
  479. return copy_to_user(buf, in, sizeof(*in));
  480. case IPC_OLD:
  481. {
  482. struct shminfo out;
  483. if(in->shmmax > INT_MAX)
  484. out.shmmax = INT_MAX;
  485. else
  486. out.shmmax = (int)in->shmmax;
  487. out.shmmin = in->shmmin;
  488. out.shmmni = in->shmmni;
  489. out.shmseg = in->shmseg;
  490. out.shmall = in->shmall;
  491. return copy_to_user(buf, &out, sizeof(out));
  492. }
  493. default:
  494. return -EINVAL;
  495. }
  496. }
  497. /*
  498. * Called with shm_ids.rw_mutex held as a reader
  499. */
  500. static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
  501. unsigned long *swp)
  502. {
  503. int next_id;
  504. int total, in_use;
  505. *rss = 0;
  506. *swp = 0;
  507. in_use = shm_ids(ns).in_use;
  508. for (total = 0, next_id = 0; total < in_use; next_id++) {
  509. struct shmid_kernel *shp;
  510. struct inode *inode;
  511. shp = idr_find(&shm_ids(ns).ipcs_idr, next_id);
  512. if (shp == NULL)
  513. continue;
  514. inode = shp->shm_file->f_path.dentry->d_inode;
  515. if (is_file_hugepages(shp->shm_file)) {
  516. struct address_space *mapping = inode->i_mapping;
  517. *rss += (HPAGE_SIZE/PAGE_SIZE)*mapping->nrpages;
  518. } else {
  519. struct shmem_inode_info *info = SHMEM_I(inode);
  520. spin_lock(&info->lock);
  521. *rss += inode->i_mapping->nrpages;
  522. *swp += info->swapped;
  523. spin_unlock(&info->lock);
  524. }
  525. total++;
  526. }
  527. }
  528. asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf)
  529. {
  530. struct shm_setbuf setbuf;
  531. struct shmid_kernel *shp;
  532. int err, version;
  533. struct ipc_namespace *ns;
  534. if (cmd < 0 || shmid < 0) {
  535. err = -EINVAL;
  536. goto out;
  537. }
  538. version = ipc_parse_version(&cmd);
  539. ns = current->nsproxy->ipc_ns;
  540. switch (cmd) { /* replace with proc interface ? */
  541. case IPC_INFO:
  542. {
  543. struct shminfo64 shminfo;
  544. err = security_shm_shmctl(NULL, cmd);
  545. if (err)
  546. return err;
  547. memset(&shminfo,0,sizeof(shminfo));
  548. shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni;
  549. shminfo.shmmax = ns->shm_ctlmax;
  550. shminfo.shmall = ns->shm_ctlall;
  551. shminfo.shmmin = SHMMIN;
  552. if(copy_shminfo_to_user (buf, &shminfo, version))
  553. return -EFAULT;
  554. down_read(&shm_ids(ns).rw_mutex);
  555. err = ipc_get_maxid(&shm_ids(ns));
  556. up_read(&shm_ids(ns).rw_mutex);
  557. if(err<0)
  558. err = 0;
  559. goto out;
  560. }
  561. case SHM_INFO:
  562. {
  563. struct shm_info shm_info;
  564. err = security_shm_shmctl(NULL, cmd);
  565. if (err)
  566. return err;
  567. memset(&shm_info,0,sizeof(shm_info));
  568. down_read(&shm_ids(ns).rw_mutex);
  569. shm_info.used_ids = shm_ids(ns).in_use;
  570. shm_get_stat (ns, &shm_info.shm_rss, &shm_info.shm_swp);
  571. shm_info.shm_tot = ns->shm_tot;
  572. shm_info.swap_attempts = 0;
  573. shm_info.swap_successes = 0;
  574. err = ipc_get_maxid(&shm_ids(ns));
  575. up_read(&shm_ids(ns).rw_mutex);
  576. if(copy_to_user (buf, &shm_info, sizeof(shm_info))) {
  577. err = -EFAULT;
  578. goto out;
  579. }
  580. err = err < 0 ? 0 : err;
  581. goto out;
  582. }
  583. case SHM_STAT:
  584. case IPC_STAT:
  585. {
  586. struct shmid64_ds tbuf;
  587. int result;
  588. if (!buf) {
  589. err = -EFAULT;
  590. goto out;
  591. }
  592. if (cmd == SHM_STAT) {
  593. shp = shm_lock(ns, shmid);
  594. if (IS_ERR(shp)) {
  595. err = PTR_ERR(shp);
  596. goto out;
  597. }
  598. result = shp->shm_perm.id;
  599. } else {
  600. shp = shm_lock_check(ns, shmid);
  601. if (IS_ERR(shp)) {
  602. err = PTR_ERR(shp);
  603. goto out;
  604. }
  605. result = 0;
  606. }
  607. err=-EACCES;
  608. if (ipcperms (&shp->shm_perm, S_IRUGO))
  609. goto out_unlock;
  610. err = security_shm_shmctl(shp, cmd);
  611. if (err)
  612. goto out_unlock;
  613. memset(&tbuf, 0, sizeof(tbuf));
  614. kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm);
  615. tbuf.shm_segsz = shp->shm_segsz;
  616. tbuf.shm_atime = shp->shm_atim;
  617. tbuf.shm_dtime = shp->shm_dtim;
  618. tbuf.shm_ctime = shp->shm_ctim;
  619. tbuf.shm_cpid = shp->shm_cprid;
  620. tbuf.shm_lpid = shp->shm_lprid;
  621. tbuf.shm_nattch = shp->shm_nattch;
  622. shm_unlock(shp);
  623. if(copy_shmid_to_user (buf, &tbuf, version))
  624. err = -EFAULT;
  625. else
  626. err = result;
  627. goto out;
  628. }
  629. case SHM_LOCK:
  630. case SHM_UNLOCK:
  631. {
  632. shp = shm_lock_check(ns, shmid);
  633. if (IS_ERR(shp)) {
  634. err = PTR_ERR(shp);
  635. goto out;
  636. }
  637. err = audit_ipc_obj(&(shp->shm_perm));
  638. if (err)
  639. goto out_unlock;
  640. if (!capable(CAP_IPC_LOCK)) {
  641. err = -EPERM;
  642. if (current->euid != shp->shm_perm.uid &&
  643. current->euid != shp->shm_perm.cuid)
  644. goto out_unlock;
  645. if (cmd == SHM_LOCK &&
  646. !current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur)
  647. goto out_unlock;
  648. }
  649. err = security_shm_shmctl(shp, cmd);
  650. if (err)
  651. goto out_unlock;
  652. if(cmd==SHM_LOCK) {
  653. struct user_struct * user = current->user;
  654. if (!is_file_hugepages(shp->shm_file)) {
  655. err = shmem_lock(shp->shm_file, 1, user);
  656. if (!err && !(shp->shm_perm.mode & SHM_LOCKED)){
  657. shp->shm_perm.mode |= SHM_LOCKED;
  658. shp->mlock_user = user;
  659. }
  660. }
  661. } else if (!is_file_hugepages(shp->shm_file)) {
  662. shmem_lock(shp->shm_file, 0, shp->mlock_user);
  663. shp->shm_perm.mode &= ~SHM_LOCKED;
  664. shp->mlock_user = NULL;
  665. }
  666. shm_unlock(shp);
  667. goto out;
  668. }
  669. case IPC_RMID:
  670. {
  671. /*
  672. * We cannot simply remove the file. The SVID states
  673. * that the block remains until the last person
  674. * detaches from it, then is deleted. A shmat() on
  675. * an RMID segment is legal in older Linux and if
  676. * we change it apps break...
  677. *
  678. * Instead we set a destroyed flag, and then blow
  679. * the name away when the usage hits zero.
  680. */
  681. down_write(&shm_ids(ns).rw_mutex);
  682. shp = shm_lock_check_down(ns, shmid);
  683. if (IS_ERR(shp)) {
  684. err = PTR_ERR(shp);
  685. goto out_up;
  686. }
  687. err = audit_ipc_obj(&(shp->shm_perm));
  688. if (err)
  689. goto out_unlock_up;
  690. if (current->euid != shp->shm_perm.uid &&
  691. current->euid != shp->shm_perm.cuid &&
  692. !capable(CAP_SYS_ADMIN)) {
  693. err=-EPERM;
  694. goto out_unlock_up;
  695. }
  696. err = security_shm_shmctl(shp, cmd);
  697. if (err)
  698. goto out_unlock_up;
  699. do_shm_rmid(ns, &shp->shm_perm);
  700. up_write(&shm_ids(ns).rw_mutex);
  701. goto out;
  702. }
  703. case IPC_SET:
  704. {
  705. if (!buf) {
  706. err = -EFAULT;
  707. goto out;
  708. }
  709. if (copy_shmid_from_user (&setbuf, buf, version)) {
  710. err = -EFAULT;
  711. goto out;
  712. }
  713. down_write(&shm_ids(ns).rw_mutex);
  714. shp = shm_lock_check_down(ns, shmid);
  715. if (IS_ERR(shp)) {
  716. err = PTR_ERR(shp);
  717. goto out_up;
  718. }
  719. err = audit_ipc_obj(&(shp->shm_perm));
  720. if (err)
  721. goto out_unlock_up;
  722. err = audit_ipc_set_perm(0, setbuf.uid, setbuf.gid, setbuf.mode);
  723. if (err)
  724. goto out_unlock_up;
  725. err=-EPERM;
  726. if (current->euid != shp->shm_perm.uid &&
  727. current->euid != shp->shm_perm.cuid &&
  728. !capable(CAP_SYS_ADMIN)) {
  729. goto out_unlock_up;
  730. }
  731. err = security_shm_shmctl(shp, cmd);
  732. if (err)
  733. goto out_unlock_up;
  734. shp->shm_perm.uid = setbuf.uid;
  735. shp->shm_perm.gid = setbuf.gid;
  736. shp->shm_perm.mode = (shp->shm_perm.mode & ~S_IRWXUGO)
  737. | (setbuf.mode & S_IRWXUGO);
  738. shp->shm_ctim = get_seconds();
  739. break;
  740. }
  741. default:
  742. err = -EINVAL;
  743. goto out;
  744. }
  745. err = 0;
  746. out_unlock_up:
  747. shm_unlock(shp);
  748. out_up:
  749. up_write(&shm_ids(ns).rw_mutex);
  750. goto out;
  751. out_unlock:
  752. shm_unlock(shp);
  753. out:
  754. return err;
  755. }
  756. /*
  757. * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
  758. *
  759. * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
  760. * "raddr" thing points to kernel space, and there has to be a wrapper around
  761. * this.
  762. */
  763. long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
  764. {
  765. struct shmid_kernel *shp;
  766. unsigned long addr;
  767. unsigned long size;
  768. struct file * file;
  769. int err;
  770. unsigned long flags;
  771. unsigned long prot;
  772. int acc_mode;
  773. unsigned long user_addr;
  774. struct ipc_namespace *ns;
  775. struct shm_file_data *sfd;
  776. struct path path;
  777. mode_t f_mode;
  778. err = -EINVAL;
  779. if (shmid < 0)
  780. goto out;
  781. else if ((addr = (ulong)shmaddr)) {
  782. if (addr & (SHMLBA-1)) {
  783. if (shmflg & SHM_RND)
  784. addr &= ~(SHMLBA-1); /* round down */
  785. else
  786. #ifndef __ARCH_FORCE_SHMLBA
  787. if (addr & ~PAGE_MASK)
  788. #endif
  789. goto out;
  790. }
  791. flags = MAP_SHARED | MAP_FIXED;
  792. } else {
  793. if ((shmflg & SHM_REMAP))
  794. goto out;
  795. flags = MAP_SHARED;
  796. }
  797. if (shmflg & SHM_RDONLY) {
  798. prot = PROT_READ;
  799. acc_mode = S_IRUGO;
  800. f_mode = FMODE_READ;
  801. } else {
  802. prot = PROT_READ | PROT_WRITE;
  803. acc_mode = S_IRUGO | S_IWUGO;
  804. f_mode = FMODE_READ | FMODE_WRITE;
  805. }
  806. if (shmflg & SHM_EXEC) {
  807. prot |= PROT_EXEC;
  808. acc_mode |= S_IXUGO;
  809. }
  810. /*
  811. * We cannot rely on the fs check since SYSV IPC does have an
  812. * additional creator id...
  813. */
  814. ns = current->nsproxy->ipc_ns;
  815. shp = shm_lock_check(ns, shmid);
  816. if (IS_ERR(shp)) {
  817. err = PTR_ERR(shp);
  818. goto out;
  819. }
  820. err = -EACCES;
  821. if (ipcperms(&shp->shm_perm, acc_mode))
  822. goto out_unlock;
  823. err = security_shm_shmat(shp, shmaddr, shmflg);
  824. if (err)
  825. goto out_unlock;
  826. path.dentry = dget(shp->shm_file->f_path.dentry);
  827. path.mnt = shp->shm_file->f_path.mnt;
  828. shp->shm_nattch++;
  829. size = i_size_read(path.dentry->d_inode);
  830. shm_unlock(shp);
  831. err = -ENOMEM;
  832. sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
  833. if (!sfd)
  834. goto out_put_dentry;
  835. err = -ENOMEM;
  836. file = alloc_file(path.mnt, path.dentry, f_mode, &shm_file_operations);
  837. if (!file)
  838. goto out_free;
  839. file->private_data = sfd;
  840. file->f_mapping = shp->shm_file->f_mapping;
  841. sfd->id = shp->shm_perm.id;
  842. sfd->ns = get_ipc_ns(ns);
  843. sfd->file = shp->shm_file;
  844. sfd->vm_ops = NULL;
  845. down_write(&current->mm->mmap_sem);
  846. if (addr && !(shmflg & SHM_REMAP)) {
  847. err = -EINVAL;
  848. if (find_vma_intersection(current->mm, addr, addr + size))
  849. goto invalid;
  850. /*
  851. * If shm segment goes below stack, make sure there is some
  852. * space left for the stack to grow (at least 4 pages).
  853. */
  854. if (addr < current->mm->start_stack &&
  855. addr > current->mm->start_stack - size - PAGE_SIZE * 5)
  856. goto invalid;
  857. }
  858. user_addr = do_mmap (file, addr, size, prot, flags, 0);
  859. *raddr = user_addr;
  860. err = 0;
  861. if (IS_ERR_VALUE(user_addr))
  862. err = (long)user_addr;
  863. invalid:
  864. up_write(&current->mm->mmap_sem);
  865. fput(file);
  866. out_nattch:
  867. down_write(&shm_ids(ns).rw_mutex);
  868. shp = shm_lock_down(ns, shmid);
  869. BUG_ON(IS_ERR(shp));
  870. shp->shm_nattch--;
  871. if(shp->shm_nattch == 0 &&
  872. shp->shm_perm.mode & SHM_DEST)
  873. shm_destroy(ns, shp);
  874. else
  875. shm_unlock(shp);
  876. up_write(&shm_ids(ns).rw_mutex);
  877. out:
  878. return err;
  879. out_unlock:
  880. shm_unlock(shp);
  881. goto out;
  882. out_free:
  883. kfree(sfd);
  884. out_put_dentry:
  885. dput(path.dentry);
  886. goto out_nattch;
  887. }
  888. asmlinkage long sys_shmat(int shmid, char __user *shmaddr, int shmflg)
  889. {
  890. unsigned long ret;
  891. long err;
  892. err = do_shmat(shmid, shmaddr, shmflg, &ret);
  893. if (err)
  894. return err;
  895. force_successful_syscall_return();
  896. return (long)ret;
  897. }
  898. /*
  899. * detach and kill segment if marked destroyed.
  900. * The work is done in shm_close.
  901. */
  902. asmlinkage long sys_shmdt(char __user *shmaddr)
  903. {
  904. struct mm_struct *mm = current->mm;
  905. struct vm_area_struct *vma, *next;
  906. unsigned long addr = (unsigned long)shmaddr;
  907. loff_t size = 0;
  908. int retval = -EINVAL;
  909. if (addr & ~PAGE_MASK)
  910. return retval;
  911. down_write(&mm->mmap_sem);
  912. /*
  913. * This function tries to be smart and unmap shm segments that
  914. * were modified by partial mlock or munmap calls:
  915. * - It first determines the size of the shm segment that should be
  916. * unmapped: It searches for a vma that is backed by shm and that
  917. * started at address shmaddr. It records it's size and then unmaps
  918. * it.
  919. * - Then it unmaps all shm vmas that started at shmaddr and that
  920. * are within the initially determined size.
  921. * Errors from do_munmap are ignored: the function only fails if
  922. * it's called with invalid parameters or if it's called to unmap
  923. * a part of a vma. Both calls in this function are for full vmas,
  924. * the parameters are directly copied from the vma itself and always
  925. * valid - therefore do_munmap cannot fail. (famous last words?)
  926. */
  927. /*
  928. * If it had been mremap()'d, the starting address would not
  929. * match the usual checks anyway. So assume all vma's are
  930. * above the starting address given.
  931. */
  932. vma = find_vma(mm, addr);
  933. while (vma) {
  934. next = vma->vm_next;
  935. /*
  936. * Check if the starting address would match, i.e. it's
  937. * a fragment created by mprotect() and/or munmap(), or it
  938. * otherwise it starts at this address with no hassles.
  939. */
  940. if ((vma->vm_ops == &shm_vm_ops) &&
  941. (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
  942. size = vma->vm_file->f_path.dentry->d_inode->i_size;
  943. do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
  944. /*
  945. * We discovered the size of the shm segment, so
  946. * break out of here and fall through to the next
  947. * loop that uses the size information to stop
  948. * searching for matching vma's.
  949. */
  950. retval = 0;
  951. vma = next;
  952. break;
  953. }
  954. vma = next;
  955. }
  956. /*
  957. * We need look no further than the maximum address a fragment
  958. * could possibly have landed at. Also cast things to loff_t to
  959. * prevent overflows and make comparisions vs. equal-width types.
  960. */
  961. size = PAGE_ALIGN(size);
  962. while (vma && (loff_t)(vma->vm_end - addr) <= size) {
  963. next = vma->vm_next;
  964. /* finding a matching vma now does not alter retval */
  965. if ((vma->vm_ops == &shm_vm_ops) &&
  966. (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff)
  967. do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
  968. vma = next;
  969. }
  970. up_write(&mm->mmap_sem);
  971. return retval;
  972. }
  973. #ifdef CONFIG_PROC_FS
  974. static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
  975. {
  976. struct shmid_kernel *shp = it;
  977. char *format;
  978. #define SMALL_STRING "%10d %10d %4o %10u %5u %5u %5d %5u %5u %5u %5u %10lu %10lu %10lu\n"
  979. #define BIG_STRING "%10d %10d %4o %21u %5u %5u %5d %5u %5u %5u %5u %10lu %10lu %10lu\n"
  980. if (sizeof(size_t) <= sizeof(int))
  981. format = SMALL_STRING;
  982. else
  983. format = BIG_STRING;
  984. return seq_printf(s, format,
  985. shp->shm_perm.key,
  986. shp->shm_perm.id,
  987. shp->shm_perm.mode,
  988. shp->shm_segsz,
  989. shp->shm_cprid,
  990. shp->shm_lprid,
  991. shp->shm_nattch,
  992. shp->shm_perm.uid,
  993. shp->shm_perm.gid,
  994. shp->shm_perm.cuid,
  995. shp->shm_perm.cgid,
  996. shp->shm_atim,
  997. shp->shm_dtim,
  998. shp->shm_ctim);
  999. }
  1000. #endif