shm.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125
  1. /*
  2. * linux/ipc/shm.c
  3. * Copyright (C) 1992, 1993 Krishna Balasubramanian
  4. * Many improvements/fixes by Bruno Haible.
  5. * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
  6. * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
  7. *
  8. * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
  9. * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
  10. * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
  11. * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
  12. * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
  13. * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
  14. * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
  15. *
  16. * support for audit of ipc object properties and permission changes
  17. * Dustin Kirkland <dustin.kirkland@us.ibm.com>
  18. *
  19. * namespaces support
  20. * OpenVZ, SWsoft Inc.
  21. * Pavel Emelianov <xemul@openvz.org>
  22. */
  23. #include <linux/slab.h>
  24. #include <linux/mm.h>
  25. #include <linux/hugetlb.h>
  26. #include <linux/shm.h>
  27. #include <linux/init.h>
  28. #include <linux/file.h>
  29. #include <linux/mman.h>
  30. #include <linux/shmem_fs.h>
  31. #include <linux/security.h>
  32. #include <linux/syscalls.h>
  33. #include <linux/audit.h>
  34. #include <linux/capability.h>
  35. #include <linux/ptrace.h>
  36. #include <linux/seq_file.h>
  37. #include <linux/rwsem.h>
  38. #include <linux/nsproxy.h>
  39. #include <linux/mount.h>
  40. #include <linux/ipc_namespace.h>
  41. #include <asm/uaccess.h>
  42. #include "util.h"
  43. struct shm_file_data {
  44. int id;
  45. struct ipc_namespace *ns;
  46. struct file *file;
  47. const struct vm_operations_struct *vm_ops;
  48. };
  49. #define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data))
  50. static const struct file_operations shm_file_operations;
  51. static struct vm_operations_struct shm_vm_ops;
  52. #define shm_ids(ns) ((ns)->ids[IPC_SHM_IDS])
  53. #define shm_unlock(shp) \
  54. ipc_unlock(&(shp)->shm_perm)
  55. static int newseg(struct ipc_namespace *, struct ipc_params *);
  56. static void shm_open(struct vm_area_struct *vma);
  57. static void shm_close(struct vm_area_struct *vma);
  58. static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
  59. #ifdef CONFIG_PROC_FS
  60. static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
  61. #endif
  62. void shm_init_ns(struct ipc_namespace *ns)
  63. {
  64. ns->shm_ctlmax = SHMMAX;
  65. ns->shm_ctlall = SHMALL;
  66. ns->shm_ctlmni = SHMMNI;
  67. ns->shm_tot = 0;
  68. ipc_init_ids(&ns->ids[IPC_SHM_IDS]);
  69. }
  70. /*
  71. * Called with shm_ids.rw_mutex (writer) and the shp structure locked.
  72. * Only shm_ids.rw_mutex remains locked on exit.
  73. */
  74. static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
  75. {
  76. struct shmid_kernel *shp;
  77. shp = container_of(ipcp, struct shmid_kernel, shm_perm);
  78. if (shp->shm_nattch){
  79. shp->shm_perm.mode |= SHM_DEST;
  80. /* Do not find it any more */
  81. shp->shm_perm.key = IPC_PRIVATE;
  82. shm_unlock(shp);
  83. } else
  84. shm_destroy(ns, shp);
  85. }
  86. #ifdef CONFIG_IPC_NS
  87. void shm_exit_ns(struct ipc_namespace *ns)
  88. {
  89. free_ipcs(ns, &shm_ids(ns), do_shm_rmid);
  90. }
  91. #endif
  92. void __init shm_init (void)
  93. {
  94. shm_init_ns(&init_ipc_ns);
  95. ipc_init_proc_interface("sysvipc/shm",
  96. " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime\n",
  97. IPC_SHM_IDS, sysvipc_shm_proc_show);
  98. }
  99. /*
  100. * shm_lock_(check_)down routines are called in the paths where the rw_mutex
  101. * is held to protect access to the idr tree.
  102. */
  103. static inline struct shmid_kernel *shm_lock_down(struct ipc_namespace *ns,
  104. int id)
  105. {
  106. struct kern_ipc_perm *ipcp = ipc_lock_down(&shm_ids(ns), id);
  107. if (IS_ERR(ipcp))
  108. return (struct shmid_kernel *)ipcp;
  109. return container_of(ipcp, struct shmid_kernel, shm_perm);
  110. }
  111. static inline struct shmid_kernel *shm_lock_check_down(
  112. struct ipc_namespace *ns,
  113. int id)
  114. {
  115. struct kern_ipc_perm *ipcp = ipc_lock_check_down(&shm_ids(ns), id);
  116. if (IS_ERR(ipcp))
  117. return (struct shmid_kernel *)ipcp;
  118. return container_of(ipcp, struct shmid_kernel, shm_perm);
  119. }
  120. /*
  121. * shm_lock_(check_) routines are called in the paths where the rw_mutex
  122. * is not held.
  123. */
  124. static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
  125. {
  126. struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id);
  127. if (IS_ERR(ipcp))
  128. return (struct shmid_kernel *)ipcp;
  129. return container_of(ipcp, struct shmid_kernel, shm_perm);
  130. }
  131. static inline struct shmid_kernel *shm_lock_check(struct ipc_namespace *ns,
  132. int id)
  133. {
  134. struct kern_ipc_perm *ipcp = ipc_lock_check(&shm_ids(ns), id);
  135. if (IS_ERR(ipcp))
  136. return (struct shmid_kernel *)ipcp;
  137. return container_of(ipcp, struct shmid_kernel, shm_perm);
  138. }
  139. static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
  140. {
  141. ipc_rmid(&shm_ids(ns), &s->shm_perm);
  142. }
  143. /* This is called by fork, once for every shm attach. */
  144. static void shm_open(struct vm_area_struct *vma)
  145. {
  146. struct file *file = vma->vm_file;
  147. struct shm_file_data *sfd = shm_file_data(file);
  148. struct shmid_kernel *shp;
  149. shp = shm_lock(sfd->ns, sfd->id);
  150. BUG_ON(IS_ERR(shp));
  151. shp->shm_atim = get_seconds();
  152. shp->shm_lprid = task_tgid_vnr(current);
  153. shp->shm_nattch++;
  154. shm_unlock(shp);
  155. }
  156. /*
  157. * shm_destroy - free the struct shmid_kernel
  158. *
  159. * @ns: namespace
  160. * @shp: struct to free
  161. *
  162. * It has to be called with shp and shm_ids.rw_mutex (writer) locked,
  163. * but returns with shp unlocked and freed.
  164. */
  165. static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
  166. {
  167. ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
  168. shm_rmid(ns, shp);
  169. shm_unlock(shp);
  170. if (!is_file_hugepages(shp->shm_file))
  171. shmem_lock(shp->shm_file, 0, shp->mlock_user);
  172. else
  173. user_shm_unlock(shp->shm_file->f_path.dentry->d_inode->i_size,
  174. shp->mlock_user);
  175. fput (shp->shm_file);
  176. security_shm_free(shp);
  177. ipc_rcu_putref(shp);
  178. }
  179. /*
  180. * remove the attach descriptor vma.
  181. * free memory for segment if it is marked destroyed.
  182. * The descriptor has already been removed from the current->mm->mmap list
  183. * and will later be kfree()d.
  184. */
  185. static void shm_close(struct vm_area_struct *vma)
  186. {
  187. struct file * file = vma->vm_file;
  188. struct shm_file_data *sfd = shm_file_data(file);
  189. struct shmid_kernel *shp;
  190. struct ipc_namespace *ns = sfd->ns;
  191. down_write(&shm_ids(ns).rw_mutex);
  192. /* remove from the list of attaches of the shm segment */
  193. shp = shm_lock_down(ns, sfd->id);
  194. BUG_ON(IS_ERR(shp));
  195. shp->shm_lprid = task_tgid_vnr(current);
  196. shp->shm_dtim = get_seconds();
  197. shp->shm_nattch--;
  198. if(shp->shm_nattch == 0 &&
  199. shp->shm_perm.mode & SHM_DEST)
  200. shm_destroy(ns, shp);
  201. else
  202. shm_unlock(shp);
  203. up_write(&shm_ids(ns).rw_mutex);
  204. }
  205. static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  206. {
  207. struct file *file = vma->vm_file;
  208. struct shm_file_data *sfd = shm_file_data(file);
  209. return sfd->vm_ops->fault(vma, vmf);
  210. }
  211. #ifdef CONFIG_NUMA
  212. static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
  213. {
  214. struct file *file = vma->vm_file;
  215. struct shm_file_data *sfd = shm_file_data(file);
  216. int err = 0;
  217. if (sfd->vm_ops->set_policy)
  218. err = sfd->vm_ops->set_policy(vma, new);
  219. return err;
  220. }
  221. static struct mempolicy *shm_get_policy(struct vm_area_struct *vma,
  222. unsigned long addr)
  223. {
  224. struct file *file = vma->vm_file;
  225. struct shm_file_data *sfd = shm_file_data(file);
  226. struct mempolicy *pol = NULL;
  227. if (sfd->vm_ops->get_policy)
  228. pol = sfd->vm_ops->get_policy(vma, addr);
  229. else if (vma->vm_policy)
  230. pol = vma->vm_policy;
  231. return pol;
  232. }
  233. #endif
  234. static int shm_mmap(struct file * file, struct vm_area_struct * vma)
  235. {
  236. struct shm_file_data *sfd = shm_file_data(file);
  237. int ret;
  238. ret = sfd->file->f_op->mmap(sfd->file, vma);
  239. if (ret != 0)
  240. return ret;
  241. sfd->vm_ops = vma->vm_ops;
  242. #ifdef CONFIG_MMU
  243. BUG_ON(!sfd->vm_ops->fault);
  244. #endif
  245. vma->vm_ops = &shm_vm_ops;
  246. shm_open(vma);
  247. return ret;
  248. }
  249. static int shm_release(struct inode *ino, struct file *file)
  250. {
  251. struct shm_file_data *sfd = shm_file_data(file);
  252. put_ipc_ns(sfd->ns);
  253. shm_file_data(file) = NULL;
  254. kfree(sfd);
  255. return 0;
  256. }
  257. static int shm_fsync(struct file *file, struct dentry *dentry, int datasync)
  258. {
  259. int (*fsync) (struct file *, struct dentry *, int datasync);
  260. struct shm_file_data *sfd = shm_file_data(file);
  261. int ret = -EINVAL;
  262. fsync = sfd->file->f_op->fsync;
  263. if (fsync)
  264. ret = fsync(sfd->file, sfd->file->f_path.dentry, datasync);
  265. return ret;
  266. }
  267. static unsigned long shm_get_unmapped_area(struct file *file,
  268. unsigned long addr, unsigned long len, unsigned long pgoff,
  269. unsigned long flags)
  270. {
  271. struct shm_file_data *sfd = shm_file_data(file);
  272. return get_unmapped_area(sfd->file, addr, len, pgoff, flags);
  273. }
  274. int is_file_shm_hugepages(struct file *file)
  275. {
  276. int ret = 0;
  277. if (file->f_op == &shm_file_operations) {
  278. struct shm_file_data *sfd;
  279. sfd = shm_file_data(file);
  280. ret = is_file_hugepages(sfd->file);
  281. }
  282. return ret;
  283. }
  284. static const struct file_operations shm_file_operations = {
  285. .mmap = shm_mmap,
  286. .fsync = shm_fsync,
  287. .release = shm_release,
  288. .get_unmapped_area = shm_get_unmapped_area,
  289. };
  290. static struct vm_operations_struct shm_vm_ops = {
  291. .open = shm_open, /* callback for a new vm-area open */
  292. .close = shm_close, /* callback for when the vm-area is released */
  293. .fault = shm_fault,
  294. #if defined(CONFIG_NUMA)
  295. .set_policy = shm_set_policy,
  296. .get_policy = shm_get_policy,
  297. #endif
  298. };
  299. /**
  300. * newseg - Create a new shared memory segment
  301. * @ns: namespace
  302. * @params: ptr to the structure that contains key, size and shmflg
  303. *
  304. * Called with shm_ids.rw_mutex held as a writer.
  305. */
  306. static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
  307. {
  308. key_t key = params->key;
  309. int shmflg = params->flg;
  310. size_t size = params->u.size;
  311. int error;
  312. struct shmid_kernel *shp;
  313. int numpages = (size + PAGE_SIZE -1) >> PAGE_SHIFT;
  314. struct file * file;
  315. char name[13];
  316. int id;
  317. if (size < SHMMIN || size > ns->shm_ctlmax)
  318. return -EINVAL;
  319. if (ns->shm_tot + numpages > ns->shm_ctlall)
  320. return -ENOSPC;
  321. shp = ipc_rcu_alloc(sizeof(*shp));
  322. if (!shp)
  323. return -ENOMEM;
  324. shp->shm_perm.key = key;
  325. shp->shm_perm.mode = (shmflg & S_IRWXUGO);
  326. shp->mlock_user = NULL;
  327. shp->shm_perm.security = NULL;
  328. error = security_shm_alloc(shp);
  329. if (error) {
  330. ipc_rcu_putref(shp);
  331. return error;
  332. }
  333. sprintf (name, "SYSV%08x", key);
  334. if (shmflg & SHM_HUGETLB) {
  335. /* hugetlb_file_setup takes care of mlock user accounting */
  336. file = hugetlb_file_setup(name, size);
  337. shp->mlock_user = current->user;
  338. } else {
  339. int acctflag = VM_ACCOUNT;
  340. /*
  341. * Do not allow no accounting for OVERCOMMIT_NEVER, even
  342. * if it's asked for.
  343. */
  344. if ((shmflg & SHM_NORESERVE) &&
  345. sysctl_overcommit_memory != OVERCOMMIT_NEVER)
  346. acctflag = 0;
  347. file = shmem_file_setup(name, size, acctflag);
  348. }
  349. error = PTR_ERR(file);
  350. if (IS_ERR(file))
  351. goto no_file;
  352. id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
  353. if (id < 0) {
  354. error = id;
  355. goto no_id;
  356. }
  357. shp->shm_cprid = task_tgid_vnr(current);
  358. shp->shm_lprid = 0;
  359. shp->shm_atim = shp->shm_dtim = 0;
  360. shp->shm_ctim = get_seconds();
  361. shp->shm_segsz = size;
  362. shp->shm_nattch = 0;
  363. shp->shm_file = file;
  364. /*
  365. * shmid gets reported as "inode#" in /proc/pid/maps.
  366. * proc-ps tools use this. Changing this will break them.
  367. */
  368. file->f_dentry->d_inode->i_ino = shp->shm_perm.id;
  369. ns->shm_tot += numpages;
  370. error = shp->shm_perm.id;
  371. shm_unlock(shp);
  372. return error;
  373. no_id:
  374. fput(file);
  375. no_file:
  376. security_shm_free(shp);
  377. ipc_rcu_putref(shp);
  378. return error;
  379. }
  380. /*
  381. * Called with shm_ids.rw_mutex and ipcp locked.
  382. */
  383. static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg)
  384. {
  385. struct shmid_kernel *shp;
  386. shp = container_of(ipcp, struct shmid_kernel, shm_perm);
  387. return security_shm_associate(shp, shmflg);
  388. }
  389. /*
  390. * Called with shm_ids.rw_mutex and ipcp locked.
  391. */
  392. static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
  393. struct ipc_params *params)
  394. {
  395. struct shmid_kernel *shp;
  396. shp = container_of(ipcp, struct shmid_kernel, shm_perm);
  397. if (shp->shm_segsz < params->u.size)
  398. return -EINVAL;
  399. return 0;
  400. }
  401. asmlinkage long sys_shmget (key_t key, size_t size, int shmflg)
  402. {
  403. struct ipc_namespace *ns;
  404. struct ipc_ops shm_ops;
  405. struct ipc_params shm_params;
  406. ns = current->nsproxy->ipc_ns;
  407. shm_ops.getnew = newseg;
  408. shm_ops.associate = shm_security;
  409. shm_ops.more_checks = shm_more_checks;
  410. shm_params.key = key;
  411. shm_params.flg = shmflg;
  412. shm_params.u.size = size;
  413. return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params);
  414. }
  415. static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
  416. {
  417. switch(version) {
  418. case IPC_64:
  419. return copy_to_user(buf, in, sizeof(*in));
  420. case IPC_OLD:
  421. {
  422. struct shmid_ds out;
  423. ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
  424. out.shm_segsz = in->shm_segsz;
  425. out.shm_atime = in->shm_atime;
  426. out.shm_dtime = in->shm_dtime;
  427. out.shm_ctime = in->shm_ctime;
  428. out.shm_cpid = in->shm_cpid;
  429. out.shm_lpid = in->shm_lpid;
  430. out.shm_nattch = in->shm_nattch;
  431. return copy_to_user(buf, &out, sizeof(out));
  432. }
  433. default:
  434. return -EINVAL;
  435. }
  436. }
  437. static inline unsigned long
  438. copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version)
  439. {
  440. switch(version) {
  441. case IPC_64:
  442. if (copy_from_user(out, buf, sizeof(*out)))
  443. return -EFAULT;
  444. return 0;
  445. case IPC_OLD:
  446. {
  447. struct shmid_ds tbuf_old;
  448. if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
  449. return -EFAULT;
  450. out->shm_perm.uid = tbuf_old.shm_perm.uid;
  451. out->shm_perm.gid = tbuf_old.shm_perm.gid;
  452. out->shm_perm.mode = tbuf_old.shm_perm.mode;
  453. return 0;
  454. }
  455. default:
  456. return -EINVAL;
  457. }
  458. }
  459. static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
  460. {
  461. switch(version) {
  462. case IPC_64:
  463. return copy_to_user(buf, in, sizeof(*in));
  464. case IPC_OLD:
  465. {
  466. struct shminfo out;
  467. if(in->shmmax > INT_MAX)
  468. out.shmmax = INT_MAX;
  469. else
  470. out.shmmax = (int)in->shmmax;
  471. out.shmmin = in->shmmin;
  472. out.shmmni = in->shmmni;
  473. out.shmseg = in->shmseg;
  474. out.shmall = in->shmall;
  475. return copy_to_user(buf, &out, sizeof(out));
  476. }
  477. default:
  478. return -EINVAL;
  479. }
  480. }
  481. /*
  482. * Called with shm_ids.rw_mutex held as a reader
  483. */
  484. static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
  485. unsigned long *swp)
  486. {
  487. int next_id;
  488. int total, in_use;
  489. *rss = 0;
  490. *swp = 0;
  491. in_use = shm_ids(ns).in_use;
  492. for (total = 0, next_id = 0; total < in_use; next_id++) {
  493. struct shmid_kernel *shp;
  494. struct inode *inode;
  495. shp = idr_find(&shm_ids(ns).ipcs_idr, next_id);
  496. if (shp == NULL)
  497. continue;
  498. inode = shp->shm_file->f_path.dentry->d_inode;
  499. if (is_file_hugepages(shp->shm_file)) {
  500. struct address_space *mapping = inode->i_mapping;
  501. *rss += (HPAGE_SIZE/PAGE_SIZE)*mapping->nrpages;
  502. } else {
  503. struct shmem_inode_info *info = SHMEM_I(inode);
  504. spin_lock(&info->lock);
  505. *rss += inode->i_mapping->nrpages;
  506. *swp += info->swapped;
  507. spin_unlock(&info->lock);
  508. }
  509. total++;
  510. }
  511. }
  512. /*
  513. * This function handles some shmctl commands which require the rw_mutex
  514. * to be held in write mode.
  515. * NOTE: no locks must be held, the rw_mutex is taken inside this function.
  516. */
  517. static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
  518. struct shmid_ds __user *buf, int version)
  519. {
  520. struct kern_ipc_perm *ipcp;
  521. struct shmid64_ds shmid64;
  522. struct shmid_kernel *shp;
  523. int err;
  524. if (cmd == IPC_SET) {
  525. if (copy_shmid_from_user(&shmid64, buf, version))
  526. return -EFAULT;
  527. }
  528. down_write(&shm_ids(ns).rw_mutex);
  529. shp = shm_lock_check_down(ns, shmid);
  530. if (IS_ERR(shp)) {
  531. err = PTR_ERR(shp);
  532. goto out_up;
  533. }
  534. ipcp = &shp->shm_perm;
  535. err = audit_ipc_obj(ipcp);
  536. if (err)
  537. goto out_unlock;
  538. if (cmd == IPC_SET) {
  539. err = audit_ipc_set_perm(0, shmid64.shm_perm.uid,
  540. shmid64.shm_perm.gid,
  541. shmid64.shm_perm.mode);
  542. if (err)
  543. goto out_unlock;
  544. }
  545. if (current->euid != ipcp->uid &&
  546. current->euid != ipcp->cuid &&
  547. !capable(CAP_SYS_ADMIN)) {
  548. err = -EPERM;
  549. goto out_unlock;
  550. }
  551. err = security_shm_shmctl(shp, cmd);
  552. if (err)
  553. goto out_unlock;
  554. switch (cmd) {
  555. case IPC_RMID:
  556. do_shm_rmid(ns, ipcp);
  557. goto out_up;
  558. case IPC_SET:
  559. ipcp->uid = shmid64.shm_perm.uid;
  560. ipcp->gid = shmid64.shm_perm.gid;
  561. ipcp->mode = (ipcp->mode & ~S_IRWXUGO)
  562. | (shmid64.shm_perm.mode & S_IRWXUGO);
  563. shp->shm_ctim = get_seconds();
  564. break;
  565. default:
  566. err = -EINVAL;
  567. }
  568. out_unlock:
  569. shm_unlock(shp);
  570. out_up:
  571. up_write(&shm_ids(ns).rw_mutex);
  572. return err;
  573. }
  574. asmlinkage long sys_shmctl(int shmid, int cmd, struct shmid_ds __user *buf)
  575. {
  576. struct shmid_kernel *shp;
  577. int err, version;
  578. struct ipc_namespace *ns;
  579. if (cmd < 0 || shmid < 0) {
  580. err = -EINVAL;
  581. goto out;
  582. }
  583. version = ipc_parse_version(&cmd);
  584. ns = current->nsproxy->ipc_ns;
  585. switch (cmd) { /* replace with proc interface ? */
  586. case IPC_INFO:
  587. {
  588. struct shminfo64 shminfo;
  589. err = security_shm_shmctl(NULL, cmd);
  590. if (err)
  591. return err;
  592. memset(&shminfo,0,sizeof(shminfo));
  593. shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni;
  594. shminfo.shmmax = ns->shm_ctlmax;
  595. shminfo.shmall = ns->shm_ctlall;
  596. shminfo.shmmin = SHMMIN;
  597. if(copy_shminfo_to_user (buf, &shminfo, version))
  598. return -EFAULT;
  599. down_read(&shm_ids(ns).rw_mutex);
  600. err = ipc_get_maxid(&shm_ids(ns));
  601. up_read(&shm_ids(ns).rw_mutex);
  602. if(err<0)
  603. err = 0;
  604. goto out;
  605. }
  606. case SHM_INFO:
  607. {
  608. struct shm_info shm_info;
  609. err = security_shm_shmctl(NULL, cmd);
  610. if (err)
  611. return err;
  612. memset(&shm_info,0,sizeof(shm_info));
  613. down_read(&shm_ids(ns).rw_mutex);
  614. shm_info.used_ids = shm_ids(ns).in_use;
  615. shm_get_stat (ns, &shm_info.shm_rss, &shm_info.shm_swp);
  616. shm_info.shm_tot = ns->shm_tot;
  617. shm_info.swap_attempts = 0;
  618. shm_info.swap_successes = 0;
  619. err = ipc_get_maxid(&shm_ids(ns));
  620. up_read(&shm_ids(ns).rw_mutex);
  621. if(copy_to_user (buf, &shm_info, sizeof(shm_info))) {
  622. err = -EFAULT;
  623. goto out;
  624. }
  625. err = err < 0 ? 0 : err;
  626. goto out;
  627. }
  628. case SHM_STAT:
  629. case IPC_STAT:
  630. {
  631. struct shmid64_ds tbuf;
  632. int result;
  633. if (!buf) {
  634. err = -EFAULT;
  635. goto out;
  636. }
  637. if (cmd == SHM_STAT) {
  638. shp = shm_lock(ns, shmid);
  639. if (IS_ERR(shp)) {
  640. err = PTR_ERR(shp);
  641. goto out;
  642. }
  643. result = shp->shm_perm.id;
  644. } else {
  645. shp = shm_lock_check(ns, shmid);
  646. if (IS_ERR(shp)) {
  647. err = PTR_ERR(shp);
  648. goto out;
  649. }
  650. result = 0;
  651. }
  652. err=-EACCES;
  653. if (ipcperms (&shp->shm_perm, S_IRUGO))
  654. goto out_unlock;
  655. err = security_shm_shmctl(shp, cmd);
  656. if (err)
  657. goto out_unlock;
  658. memset(&tbuf, 0, sizeof(tbuf));
  659. kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm);
  660. tbuf.shm_segsz = shp->shm_segsz;
  661. tbuf.shm_atime = shp->shm_atim;
  662. tbuf.shm_dtime = shp->shm_dtim;
  663. tbuf.shm_ctime = shp->shm_ctim;
  664. tbuf.shm_cpid = shp->shm_cprid;
  665. tbuf.shm_lpid = shp->shm_lprid;
  666. tbuf.shm_nattch = shp->shm_nattch;
  667. shm_unlock(shp);
  668. if(copy_shmid_to_user (buf, &tbuf, version))
  669. err = -EFAULT;
  670. else
  671. err = result;
  672. goto out;
  673. }
  674. case SHM_LOCK:
  675. case SHM_UNLOCK:
  676. {
  677. shp = shm_lock_check(ns, shmid);
  678. if (IS_ERR(shp)) {
  679. err = PTR_ERR(shp);
  680. goto out;
  681. }
  682. err = audit_ipc_obj(&(shp->shm_perm));
  683. if (err)
  684. goto out_unlock;
  685. if (!capable(CAP_IPC_LOCK)) {
  686. err = -EPERM;
  687. if (current->euid != shp->shm_perm.uid &&
  688. current->euid != shp->shm_perm.cuid)
  689. goto out_unlock;
  690. if (cmd == SHM_LOCK &&
  691. !current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur)
  692. goto out_unlock;
  693. }
  694. err = security_shm_shmctl(shp, cmd);
  695. if (err)
  696. goto out_unlock;
  697. if(cmd==SHM_LOCK) {
  698. struct user_struct * user = current->user;
  699. if (!is_file_hugepages(shp->shm_file)) {
  700. err = shmem_lock(shp->shm_file, 1, user);
  701. if (!err && !(shp->shm_perm.mode & SHM_LOCKED)){
  702. shp->shm_perm.mode |= SHM_LOCKED;
  703. shp->mlock_user = user;
  704. }
  705. }
  706. } else if (!is_file_hugepages(shp->shm_file)) {
  707. shmem_lock(shp->shm_file, 0, shp->mlock_user);
  708. shp->shm_perm.mode &= ~SHM_LOCKED;
  709. shp->mlock_user = NULL;
  710. }
  711. shm_unlock(shp);
  712. goto out;
  713. }
  714. case IPC_RMID:
  715. case IPC_SET:
  716. err = shmctl_down(ns, shmid, cmd, buf, version);
  717. return err;
  718. default:
  719. return -EINVAL;
  720. }
  721. out_unlock:
  722. shm_unlock(shp);
  723. out:
  724. return err;
  725. }
  726. /*
  727. * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
  728. *
  729. * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
  730. * "raddr" thing points to kernel space, and there has to be a wrapper around
  731. * this.
  732. */
  733. long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
  734. {
  735. struct shmid_kernel *shp;
  736. unsigned long addr;
  737. unsigned long size;
  738. struct file * file;
  739. int err;
  740. unsigned long flags;
  741. unsigned long prot;
  742. int acc_mode;
  743. unsigned long user_addr;
  744. struct ipc_namespace *ns;
  745. struct shm_file_data *sfd;
  746. struct path path;
  747. mode_t f_mode;
  748. err = -EINVAL;
  749. if (shmid < 0)
  750. goto out;
  751. else if ((addr = (ulong)shmaddr)) {
  752. if (addr & (SHMLBA-1)) {
  753. if (shmflg & SHM_RND)
  754. addr &= ~(SHMLBA-1); /* round down */
  755. else
  756. #ifndef __ARCH_FORCE_SHMLBA
  757. if (addr & ~PAGE_MASK)
  758. #endif
  759. goto out;
  760. }
  761. flags = MAP_SHARED | MAP_FIXED;
  762. } else {
  763. if ((shmflg & SHM_REMAP))
  764. goto out;
  765. flags = MAP_SHARED;
  766. }
  767. if (shmflg & SHM_RDONLY) {
  768. prot = PROT_READ;
  769. acc_mode = S_IRUGO;
  770. f_mode = FMODE_READ;
  771. } else {
  772. prot = PROT_READ | PROT_WRITE;
  773. acc_mode = S_IRUGO | S_IWUGO;
  774. f_mode = FMODE_READ | FMODE_WRITE;
  775. }
  776. if (shmflg & SHM_EXEC) {
  777. prot |= PROT_EXEC;
  778. acc_mode |= S_IXUGO;
  779. }
  780. /*
  781. * We cannot rely on the fs check since SYSV IPC does have an
  782. * additional creator id...
  783. */
  784. ns = current->nsproxy->ipc_ns;
  785. shp = shm_lock_check(ns, shmid);
  786. if (IS_ERR(shp)) {
  787. err = PTR_ERR(shp);
  788. goto out;
  789. }
  790. err = -EACCES;
  791. if (ipcperms(&shp->shm_perm, acc_mode))
  792. goto out_unlock;
  793. err = security_shm_shmat(shp, shmaddr, shmflg);
  794. if (err)
  795. goto out_unlock;
  796. path.dentry = dget(shp->shm_file->f_path.dentry);
  797. path.mnt = shp->shm_file->f_path.mnt;
  798. shp->shm_nattch++;
  799. size = i_size_read(path.dentry->d_inode);
  800. shm_unlock(shp);
  801. err = -ENOMEM;
  802. sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
  803. if (!sfd)
  804. goto out_put_dentry;
  805. err = -ENOMEM;
  806. file = alloc_file(path.mnt, path.dentry, f_mode, &shm_file_operations);
  807. if (!file)
  808. goto out_free;
  809. file->private_data = sfd;
  810. file->f_mapping = shp->shm_file->f_mapping;
  811. sfd->id = shp->shm_perm.id;
  812. sfd->ns = get_ipc_ns(ns);
  813. sfd->file = shp->shm_file;
  814. sfd->vm_ops = NULL;
  815. down_write(&current->mm->mmap_sem);
  816. if (addr && !(shmflg & SHM_REMAP)) {
  817. err = -EINVAL;
  818. if (find_vma_intersection(current->mm, addr, addr + size))
  819. goto invalid;
  820. /*
  821. * If shm segment goes below stack, make sure there is some
  822. * space left for the stack to grow (at least 4 pages).
  823. */
  824. if (addr < current->mm->start_stack &&
  825. addr > current->mm->start_stack - size - PAGE_SIZE * 5)
  826. goto invalid;
  827. }
  828. user_addr = do_mmap (file, addr, size, prot, flags, 0);
  829. *raddr = user_addr;
  830. err = 0;
  831. if (IS_ERR_VALUE(user_addr))
  832. err = (long)user_addr;
  833. invalid:
  834. up_write(&current->mm->mmap_sem);
  835. fput(file);
  836. out_nattch:
  837. down_write(&shm_ids(ns).rw_mutex);
  838. shp = shm_lock_down(ns, shmid);
  839. BUG_ON(IS_ERR(shp));
  840. shp->shm_nattch--;
  841. if(shp->shm_nattch == 0 &&
  842. shp->shm_perm.mode & SHM_DEST)
  843. shm_destroy(ns, shp);
  844. else
  845. shm_unlock(shp);
  846. up_write(&shm_ids(ns).rw_mutex);
  847. out:
  848. return err;
  849. out_unlock:
  850. shm_unlock(shp);
  851. goto out;
  852. out_free:
  853. kfree(sfd);
  854. out_put_dentry:
  855. dput(path.dentry);
  856. goto out_nattch;
  857. }
  858. asmlinkage long sys_shmat(int shmid, char __user *shmaddr, int shmflg)
  859. {
  860. unsigned long ret;
  861. long err;
  862. err = do_shmat(shmid, shmaddr, shmflg, &ret);
  863. if (err)
  864. return err;
  865. force_successful_syscall_return();
  866. return (long)ret;
  867. }
  868. /*
  869. * detach and kill segment if marked destroyed.
  870. * The work is done in shm_close.
  871. */
  872. asmlinkage long sys_shmdt(char __user *shmaddr)
  873. {
  874. struct mm_struct *mm = current->mm;
  875. struct vm_area_struct *vma, *next;
  876. unsigned long addr = (unsigned long)shmaddr;
  877. loff_t size = 0;
  878. int retval = -EINVAL;
  879. if (addr & ~PAGE_MASK)
  880. return retval;
  881. down_write(&mm->mmap_sem);
  882. /*
  883. * This function tries to be smart and unmap shm segments that
  884. * were modified by partial mlock or munmap calls:
  885. * - It first determines the size of the shm segment that should be
  886. * unmapped: It searches for a vma that is backed by shm and that
  887. * started at address shmaddr. It records it's size and then unmaps
  888. * it.
  889. * - Then it unmaps all shm vmas that started at shmaddr and that
  890. * are within the initially determined size.
  891. * Errors from do_munmap are ignored: the function only fails if
  892. * it's called with invalid parameters or if it's called to unmap
  893. * a part of a vma. Both calls in this function are for full vmas,
  894. * the parameters are directly copied from the vma itself and always
  895. * valid - therefore do_munmap cannot fail. (famous last words?)
  896. */
  897. /*
  898. * If it had been mremap()'d, the starting address would not
  899. * match the usual checks anyway. So assume all vma's are
  900. * above the starting address given.
  901. */
  902. vma = find_vma(mm, addr);
  903. while (vma) {
  904. next = vma->vm_next;
  905. /*
  906. * Check if the starting address would match, i.e. it's
  907. * a fragment created by mprotect() and/or munmap(), or it
  908. * otherwise it starts at this address with no hassles.
  909. */
  910. if ((vma->vm_ops == &shm_vm_ops) &&
  911. (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
  912. size = vma->vm_file->f_path.dentry->d_inode->i_size;
  913. do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
  914. /*
  915. * We discovered the size of the shm segment, so
  916. * break out of here and fall through to the next
  917. * loop that uses the size information to stop
  918. * searching for matching vma's.
  919. */
  920. retval = 0;
  921. vma = next;
  922. break;
  923. }
  924. vma = next;
  925. }
  926. /*
  927. * We need look no further than the maximum address a fragment
  928. * could possibly have landed at. Also cast things to loff_t to
  929. * prevent overflows and make comparisions vs. equal-width types.
  930. */
  931. size = PAGE_ALIGN(size);
  932. while (vma && (loff_t)(vma->vm_end - addr) <= size) {
  933. next = vma->vm_next;
  934. /* finding a matching vma now does not alter retval */
  935. if ((vma->vm_ops == &shm_vm_ops) &&
  936. (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff)
  937. do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
  938. vma = next;
  939. }
  940. up_write(&mm->mmap_sem);
  941. return retval;
  942. }
  943. #ifdef CONFIG_PROC_FS
  944. static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
  945. {
  946. struct shmid_kernel *shp = it;
  947. char *format;
  948. #define SMALL_STRING "%10d %10d %4o %10u %5u %5u %5d %5u %5u %5u %5u %10lu %10lu %10lu\n"
  949. #define BIG_STRING "%10d %10d %4o %21u %5u %5u %5d %5u %5u %5u %5u %10lu %10lu %10lu\n"
  950. if (sizeof(size_t) <= sizeof(int))
  951. format = SMALL_STRING;
  952. else
  953. format = BIG_STRING;
  954. return seq_printf(s, format,
  955. shp->shm_perm.key,
  956. shp->shm_perm.id,
  957. shp->shm_perm.mode,
  958. shp->shm_segsz,
  959. shp->shm_cprid,
  960. shp->shm_lprid,
  961. shp->shm_nattch,
  962. shp->shm_perm.uid,
  963. shp->shm_perm.gid,
  964. shp->shm_perm.cuid,
  965. shp->shm_perm.cgid,
  966. shp->shm_atim,
  967. shp->shm_dtim,
  968. shp->shm_ctim);
  969. }
  970. #endif