shm.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113
  1. /*
  2. * linux/ipc/shm.c
  3. * Copyright (C) 1992, 1993 Krishna Balasubramanian
  4. * Many improvements/fixes by Bruno Haible.
  5. * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
  6. * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
  7. *
  8. * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
  9. * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
  10. * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
  11. * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
  12. * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
  13. * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
  14. * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
  15. *
  16. * support for audit of ipc object properties and permission changes
  17. * Dustin Kirkland <dustin.kirkland@us.ibm.com>
  18. *
  19. * namespaces support
  20. * OpenVZ, SWsoft Inc.
  21. * Pavel Emelianov <xemul@openvz.org>
  22. */
  23. #include <linux/slab.h>
  24. #include <linux/mm.h>
  25. #include <linux/hugetlb.h>
  26. #include <linux/shm.h>
  27. #include <linux/init.h>
  28. #include <linux/file.h>
  29. #include <linux/mman.h>
  30. #include <linux/shmem_fs.h>
  31. #include <linux/security.h>
  32. #include <linux/syscalls.h>
  33. #include <linux/audit.h>
  34. #include <linux/capability.h>
  35. #include <linux/ptrace.h>
  36. #include <linux/seq_file.h>
  37. #include <linux/mutex.h>
  38. #include <linux/nsproxy.h>
  39. #include <linux/mount.h>
  40. #include <asm/uaccess.h>
  41. #include "util.h"
  42. struct shm_file_data {
  43. int id;
  44. struct ipc_namespace *ns;
  45. struct file *file;
  46. const struct vm_operations_struct *vm_ops;
  47. };
  48. #define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data))
  49. static const struct file_operations shm_file_operations;
  50. static struct vm_operations_struct shm_vm_ops;
  51. static struct ipc_ids init_shm_ids;
  52. #define shm_ids(ns) (*((ns)->ids[IPC_SHM_IDS]))
  53. #define shm_lock(ns, id) \
  54. ((struct shmid_kernel*)ipc_lock(&shm_ids(ns),id))
  55. #define shm_unlock(shp) \
  56. ipc_unlock(&(shp)->shm_perm)
  57. #define shm_get(ns, id) \
  58. ((struct shmid_kernel*)ipc_get(&shm_ids(ns),id))
  59. #define shm_buildid(ns, id, seq) \
  60. ipc_buildid(&shm_ids(ns), id, seq)
  61. static int newseg (struct ipc_namespace *ns, key_t key,
  62. int shmflg, size_t size);
  63. static void shm_open(struct vm_area_struct *vma);
  64. static void shm_close(struct vm_area_struct *vma);
  65. static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
  66. #ifdef CONFIG_PROC_FS
  67. static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
  68. #endif
  69. static void __shm_init_ns(struct ipc_namespace *ns, struct ipc_ids *ids)
  70. {
  71. ns->ids[IPC_SHM_IDS] = ids;
  72. ns->shm_ctlmax = SHMMAX;
  73. ns->shm_ctlall = SHMALL;
  74. ns->shm_ctlmni = SHMMNI;
  75. ns->shm_tot = 0;
  76. ipc_init_ids(ids, 1);
  77. }
  78. static void do_shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *shp)
  79. {
  80. if (shp->shm_nattch){
  81. shp->shm_perm.mode |= SHM_DEST;
  82. /* Do not find it any more */
  83. shp->shm_perm.key = IPC_PRIVATE;
  84. shm_unlock(shp);
  85. } else
  86. shm_destroy(ns, shp);
  87. }
  88. int shm_init_ns(struct ipc_namespace *ns)
  89. {
  90. struct ipc_ids *ids;
  91. ids = kmalloc(sizeof(struct ipc_ids), GFP_KERNEL);
  92. if (ids == NULL)
  93. return -ENOMEM;
  94. __shm_init_ns(ns, ids);
  95. return 0;
  96. }
  97. void shm_exit_ns(struct ipc_namespace *ns)
  98. {
  99. int i;
  100. struct shmid_kernel *shp;
  101. mutex_lock(&shm_ids(ns).mutex);
  102. for (i = 0; i <= shm_ids(ns).max_id; i++) {
  103. shp = shm_lock(ns, i);
  104. if (shp == NULL)
  105. continue;
  106. do_shm_rmid(ns, shp);
  107. }
  108. mutex_unlock(&shm_ids(ns).mutex);
  109. ipc_fini_ids(ns->ids[IPC_SHM_IDS]);
  110. kfree(ns->ids[IPC_SHM_IDS]);
  111. ns->ids[IPC_SHM_IDS] = NULL;
  112. }
  113. void __init shm_init (void)
  114. {
  115. __shm_init_ns(&init_ipc_ns, &init_shm_ids);
  116. ipc_init_proc_interface("sysvipc/shm",
  117. " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime\n",
  118. IPC_SHM_IDS, sysvipc_shm_proc_show);
  119. }
  120. static inline int shm_checkid(struct ipc_namespace *ns,
  121. struct shmid_kernel *s, int id)
  122. {
  123. if (ipc_checkid(&shm_ids(ns), &s->shm_perm, id))
  124. return -EIDRM;
  125. return 0;
  126. }
  127. static inline struct shmid_kernel *shm_rmid(struct ipc_namespace *ns, int id)
  128. {
  129. return (struct shmid_kernel *)ipc_rmid(&shm_ids(ns), id);
  130. }
  131. static inline int shm_addid(struct ipc_namespace *ns, struct shmid_kernel *shp)
  132. {
  133. return ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
  134. }
  135. /* This is called by fork, once for every shm attach. */
  136. static void shm_open(struct vm_area_struct *vma)
  137. {
  138. struct file *file = vma->vm_file;
  139. struct shm_file_data *sfd = shm_file_data(file);
  140. struct shmid_kernel *shp;
  141. shp = shm_lock(sfd->ns, sfd->id);
  142. BUG_ON(!shp);
  143. shp->shm_atim = get_seconds();
  144. shp->shm_lprid = current->tgid;
  145. shp->shm_nattch++;
  146. shm_unlock(shp);
  147. }
  148. /*
  149. * shm_destroy - free the struct shmid_kernel
  150. *
  151. * @shp: struct to free
  152. *
  153. * It has to be called with shp and shm_ids.mutex locked,
  154. * but returns with shp unlocked and freed.
  155. */
  156. static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
  157. {
  158. ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
  159. shm_rmid(ns, shp->id);
  160. shm_unlock(shp);
  161. if (!is_file_hugepages(shp->shm_file))
  162. shmem_lock(shp->shm_file, 0, shp->mlock_user);
  163. else
  164. user_shm_unlock(shp->shm_file->f_path.dentry->d_inode->i_size,
  165. shp->mlock_user);
  166. fput (shp->shm_file);
  167. security_shm_free(shp);
  168. ipc_rcu_putref(shp);
  169. }
  170. /*
  171. * remove the attach descriptor vma.
  172. * free memory for segment if it is marked destroyed.
  173. * The descriptor has already been removed from the current->mm->mmap list
  174. * and will later be kfree()d.
  175. */
  176. static void shm_close(struct vm_area_struct *vma)
  177. {
  178. struct file * file = vma->vm_file;
  179. struct shm_file_data *sfd = shm_file_data(file);
  180. struct shmid_kernel *shp;
  181. struct ipc_namespace *ns = sfd->ns;
  182. mutex_lock(&shm_ids(ns).mutex);
  183. /* remove from the list of attaches of the shm segment */
  184. shp = shm_lock(ns, sfd->id);
  185. BUG_ON(!shp);
  186. shp->shm_lprid = current->tgid;
  187. shp->shm_dtim = get_seconds();
  188. shp->shm_nattch--;
  189. if(shp->shm_nattch == 0 &&
  190. shp->shm_perm.mode & SHM_DEST)
  191. shm_destroy(ns, shp);
  192. else
  193. shm_unlock(shp);
  194. mutex_unlock(&shm_ids(ns).mutex);
  195. }
  196. static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  197. {
  198. struct file *file = vma->vm_file;
  199. struct shm_file_data *sfd = shm_file_data(file);
  200. return sfd->vm_ops->fault(vma, vmf);
  201. }
  202. #ifdef CONFIG_NUMA
  203. int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
  204. {
  205. struct file *file = vma->vm_file;
  206. struct shm_file_data *sfd = shm_file_data(file);
  207. int err = 0;
  208. if (sfd->vm_ops->set_policy)
  209. err = sfd->vm_ops->set_policy(vma, new);
  210. return err;
  211. }
  212. struct mempolicy *shm_get_policy(struct vm_area_struct *vma, unsigned long addr)
  213. {
  214. struct file *file = vma->vm_file;
  215. struct shm_file_data *sfd = shm_file_data(file);
  216. struct mempolicy *pol = NULL;
  217. if (sfd->vm_ops->get_policy)
  218. pol = sfd->vm_ops->get_policy(vma, addr);
  219. else if (vma->vm_policy)
  220. pol = vma->vm_policy;
  221. else
  222. pol = current->mempolicy;
  223. return pol;
  224. }
  225. #endif
  226. static int shm_mmap(struct file * file, struct vm_area_struct * vma)
  227. {
  228. struct shm_file_data *sfd = shm_file_data(file);
  229. int ret;
  230. ret = sfd->file->f_op->mmap(sfd->file, vma);
  231. if (ret != 0)
  232. return ret;
  233. sfd->vm_ops = vma->vm_ops;
  234. #ifdef CONFIG_MMU
  235. BUG_ON(!sfd->vm_ops->fault);
  236. #endif
  237. vma->vm_ops = &shm_vm_ops;
  238. shm_open(vma);
  239. return ret;
  240. }
  241. static int shm_release(struct inode *ino, struct file *file)
  242. {
  243. struct shm_file_data *sfd = shm_file_data(file);
  244. put_ipc_ns(sfd->ns);
  245. shm_file_data(file) = NULL;
  246. kfree(sfd);
  247. return 0;
  248. }
  249. static int shm_fsync(struct file *file, struct dentry *dentry, int datasync)
  250. {
  251. int (*fsync) (struct file *, struct dentry *, int datasync);
  252. struct shm_file_data *sfd = shm_file_data(file);
  253. int ret = -EINVAL;
  254. fsync = sfd->file->f_op->fsync;
  255. if (fsync)
  256. ret = fsync(sfd->file, sfd->file->f_path.dentry, datasync);
  257. return ret;
  258. }
  259. static unsigned long shm_get_unmapped_area(struct file *file,
  260. unsigned long addr, unsigned long len, unsigned long pgoff,
  261. unsigned long flags)
  262. {
  263. struct shm_file_data *sfd = shm_file_data(file);
  264. return get_unmapped_area(sfd->file, addr, len, pgoff, flags);
  265. }
  266. int is_file_shm_hugepages(struct file *file)
  267. {
  268. int ret = 0;
  269. if (file->f_op == &shm_file_operations) {
  270. struct shm_file_data *sfd;
  271. sfd = shm_file_data(file);
  272. ret = is_file_hugepages(sfd->file);
  273. }
  274. return ret;
  275. }
  276. static const struct file_operations shm_file_operations = {
  277. .mmap = shm_mmap,
  278. .fsync = shm_fsync,
  279. .release = shm_release,
  280. .get_unmapped_area = shm_get_unmapped_area,
  281. };
  282. static struct vm_operations_struct shm_vm_ops = {
  283. .open = shm_open, /* callback for a new vm-area open */
  284. .close = shm_close, /* callback for when the vm-area is released */
  285. .fault = shm_fault,
  286. #if defined(CONFIG_NUMA)
  287. .set_policy = shm_set_policy,
  288. .get_policy = shm_get_policy,
  289. #endif
  290. };
  291. static int newseg (struct ipc_namespace *ns, key_t key, int shmflg, size_t size)
  292. {
  293. int error;
  294. struct shmid_kernel *shp;
  295. int numpages = (size + PAGE_SIZE -1) >> PAGE_SHIFT;
  296. struct file * file;
  297. char name[13];
  298. int id;
  299. if (size < SHMMIN || size > ns->shm_ctlmax)
  300. return -EINVAL;
  301. if (ns->shm_tot + numpages > ns->shm_ctlall)
  302. return -ENOSPC;
  303. shp = ipc_rcu_alloc(sizeof(*shp));
  304. if (!shp)
  305. return -ENOMEM;
  306. shp->shm_perm.key = key;
  307. shp->shm_perm.mode = (shmflg & S_IRWXUGO);
  308. shp->mlock_user = NULL;
  309. shp->shm_perm.security = NULL;
  310. error = security_shm_alloc(shp);
  311. if (error) {
  312. ipc_rcu_putref(shp);
  313. return error;
  314. }
  315. sprintf (name, "SYSV%08x", key);
  316. if (shmflg & SHM_HUGETLB) {
  317. /* hugetlb_file_setup takes care of mlock user accounting */
  318. file = hugetlb_file_setup(name, size);
  319. shp->mlock_user = current->user;
  320. } else {
  321. int acctflag = VM_ACCOUNT;
  322. /*
  323. * Do not allow no accounting for OVERCOMMIT_NEVER, even
  324. * if it's asked for.
  325. */
  326. if ((shmflg & SHM_NORESERVE) &&
  327. sysctl_overcommit_memory != OVERCOMMIT_NEVER)
  328. acctflag = 0;
  329. file = shmem_file_setup(name, size, acctflag);
  330. }
  331. error = PTR_ERR(file);
  332. if (IS_ERR(file))
  333. goto no_file;
  334. error = -ENOSPC;
  335. id = shm_addid(ns, shp);
  336. if(id == -1)
  337. goto no_id;
  338. shp->shm_cprid = current->tgid;
  339. shp->shm_lprid = 0;
  340. shp->shm_atim = shp->shm_dtim = 0;
  341. shp->shm_ctim = get_seconds();
  342. shp->shm_segsz = size;
  343. shp->shm_nattch = 0;
  344. shp->id = shm_buildid(ns, id, shp->shm_perm.seq);
  345. shp->shm_file = file;
  346. /*
  347. * shmid gets reported as "inode#" in /proc/pid/maps.
  348. * proc-ps tools use this. Changing this will break them.
  349. */
  350. file->f_dentry->d_inode->i_ino = shp->id;
  351. ns->shm_tot += numpages;
  352. shm_unlock(shp);
  353. return shp->id;
  354. no_id:
  355. fput(file);
  356. no_file:
  357. security_shm_free(shp);
  358. ipc_rcu_putref(shp);
  359. return error;
  360. }
  361. asmlinkage long sys_shmget (key_t key, size_t size, int shmflg)
  362. {
  363. struct shmid_kernel *shp;
  364. int err, id = 0;
  365. struct ipc_namespace *ns;
  366. ns = current->nsproxy->ipc_ns;
  367. mutex_lock(&shm_ids(ns).mutex);
  368. if (key == IPC_PRIVATE) {
  369. err = newseg(ns, key, shmflg, size);
  370. } else if ((id = ipc_findkey(&shm_ids(ns), key)) == -1) {
  371. if (!(shmflg & IPC_CREAT))
  372. err = -ENOENT;
  373. else
  374. err = newseg(ns, key, shmflg, size);
  375. } else if ((shmflg & IPC_CREAT) && (shmflg & IPC_EXCL)) {
  376. err = -EEXIST;
  377. } else {
  378. shp = shm_lock(ns, id);
  379. BUG_ON(shp==NULL);
  380. if (shp->shm_segsz < size)
  381. err = -EINVAL;
  382. else if (ipcperms(&shp->shm_perm, shmflg))
  383. err = -EACCES;
  384. else {
  385. int shmid = shm_buildid(ns, id, shp->shm_perm.seq);
  386. err = security_shm_associate(shp, shmflg);
  387. if (!err)
  388. err = shmid;
  389. }
  390. shm_unlock(shp);
  391. }
  392. mutex_unlock(&shm_ids(ns).mutex);
  393. return err;
  394. }
  395. static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
  396. {
  397. switch(version) {
  398. case IPC_64:
  399. return copy_to_user(buf, in, sizeof(*in));
  400. case IPC_OLD:
  401. {
  402. struct shmid_ds out;
  403. ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
  404. out.shm_segsz = in->shm_segsz;
  405. out.shm_atime = in->shm_atime;
  406. out.shm_dtime = in->shm_dtime;
  407. out.shm_ctime = in->shm_ctime;
  408. out.shm_cpid = in->shm_cpid;
  409. out.shm_lpid = in->shm_lpid;
  410. out.shm_nattch = in->shm_nattch;
  411. return copy_to_user(buf, &out, sizeof(out));
  412. }
  413. default:
  414. return -EINVAL;
  415. }
  416. }
  417. struct shm_setbuf {
  418. uid_t uid;
  419. gid_t gid;
  420. mode_t mode;
  421. };
  422. static inline unsigned long copy_shmid_from_user(struct shm_setbuf *out, void __user *buf, int version)
  423. {
  424. switch(version) {
  425. case IPC_64:
  426. {
  427. struct shmid64_ds tbuf;
  428. if (copy_from_user(&tbuf, buf, sizeof(tbuf)))
  429. return -EFAULT;
  430. out->uid = tbuf.shm_perm.uid;
  431. out->gid = tbuf.shm_perm.gid;
  432. out->mode = tbuf.shm_perm.mode;
  433. return 0;
  434. }
  435. case IPC_OLD:
  436. {
  437. struct shmid_ds tbuf_old;
  438. if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
  439. return -EFAULT;
  440. out->uid = tbuf_old.shm_perm.uid;
  441. out->gid = tbuf_old.shm_perm.gid;
  442. out->mode = tbuf_old.shm_perm.mode;
  443. return 0;
  444. }
  445. default:
  446. return -EINVAL;
  447. }
  448. }
  449. static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
  450. {
  451. switch(version) {
  452. case IPC_64:
  453. return copy_to_user(buf, in, sizeof(*in));
  454. case IPC_OLD:
  455. {
  456. struct shminfo out;
  457. if(in->shmmax > INT_MAX)
  458. out.shmmax = INT_MAX;
  459. else
  460. out.shmmax = (int)in->shmmax;
  461. out.shmmin = in->shmmin;
  462. out.shmmni = in->shmmni;
  463. out.shmseg = in->shmseg;
  464. out.shmall = in->shmall;
  465. return copy_to_user(buf, &out, sizeof(out));
  466. }
  467. default:
  468. return -EINVAL;
  469. }
  470. }
  471. static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
  472. unsigned long *swp)
  473. {
  474. int i;
  475. *rss = 0;
  476. *swp = 0;
  477. for (i = 0; i <= shm_ids(ns).max_id; i++) {
  478. struct shmid_kernel *shp;
  479. struct inode *inode;
  480. shp = shm_get(ns, i);
  481. if(!shp)
  482. continue;
  483. inode = shp->shm_file->f_path.dentry->d_inode;
  484. if (is_file_hugepages(shp->shm_file)) {
  485. struct address_space *mapping = inode->i_mapping;
  486. *rss += (HPAGE_SIZE/PAGE_SIZE)*mapping->nrpages;
  487. } else {
  488. struct shmem_inode_info *info = SHMEM_I(inode);
  489. spin_lock(&info->lock);
  490. *rss += inode->i_mapping->nrpages;
  491. *swp += info->swapped;
  492. spin_unlock(&info->lock);
  493. }
  494. }
  495. }
  496. asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf)
  497. {
  498. struct shm_setbuf setbuf;
  499. struct shmid_kernel *shp;
  500. int err, version;
  501. struct ipc_namespace *ns;
  502. if (cmd < 0 || shmid < 0) {
  503. err = -EINVAL;
  504. goto out;
  505. }
  506. version = ipc_parse_version(&cmd);
  507. ns = current->nsproxy->ipc_ns;
  508. switch (cmd) { /* replace with proc interface ? */
  509. case IPC_INFO:
  510. {
  511. struct shminfo64 shminfo;
  512. err = security_shm_shmctl(NULL, cmd);
  513. if (err)
  514. return err;
  515. memset(&shminfo,0,sizeof(shminfo));
  516. shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni;
  517. shminfo.shmmax = ns->shm_ctlmax;
  518. shminfo.shmall = ns->shm_ctlall;
  519. shminfo.shmmin = SHMMIN;
  520. if(copy_shminfo_to_user (buf, &shminfo, version))
  521. return -EFAULT;
  522. /* reading a integer is always atomic */
  523. err= shm_ids(ns).max_id;
  524. if(err<0)
  525. err = 0;
  526. goto out;
  527. }
  528. case SHM_INFO:
  529. {
  530. struct shm_info shm_info;
  531. err = security_shm_shmctl(NULL, cmd);
  532. if (err)
  533. return err;
  534. memset(&shm_info,0,sizeof(shm_info));
  535. mutex_lock(&shm_ids(ns).mutex);
  536. shm_info.used_ids = shm_ids(ns).in_use;
  537. shm_get_stat (ns, &shm_info.shm_rss, &shm_info.shm_swp);
  538. shm_info.shm_tot = ns->shm_tot;
  539. shm_info.swap_attempts = 0;
  540. shm_info.swap_successes = 0;
  541. err = shm_ids(ns).max_id;
  542. mutex_unlock(&shm_ids(ns).mutex);
  543. if(copy_to_user (buf, &shm_info, sizeof(shm_info))) {
  544. err = -EFAULT;
  545. goto out;
  546. }
  547. err = err < 0 ? 0 : err;
  548. goto out;
  549. }
  550. case SHM_STAT:
  551. case IPC_STAT:
  552. {
  553. struct shmid64_ds tbuf;
  554. int result;
  555. memset(&tbuf, 0, sizeof(tbuf));
  556. shp = shm_lock(ns, shmid);
  557. if(shp==NULL) {
  558. err = -EINVAL;
  559. goto out;
  560. } else if(cmd==SHM_STAT) {
  561. err = -EINVAL;
  562. if (shmid > shm_ids(ns).max_id)
  563. goto out_unlock;
  564. result = shm_buildid(ns, shmid, shp->shm_perm.seq);
  565. } else {
  566. err = shm_checkid(ns, shp,shmid);
  567. if(err)
  568. goto out_unlock;
  569. result = 0;
  570. }
  571. err=-EACCES;
  572. if (ipcperms (&shp->shm_perm, S_IRUGO))
  573. goto out_unlock;
  574. err = security_shm_shmctl(shp, cmd);
  575. if (err)
  576. goto out_unlock;
  577. kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm);
  578. tbuf.shm_segsz = shp->shm_segsz;
  579. tbuf.shm_atime = shp->shm_atim;
  580. tbuf.shm_dtime = shp->shm_dtim;
  581. tbuf.shm_ctime = shp->shm_ctim;
  582. tbuf.shm_cpid = shp->shm_cprid;
  583. tbuf.shm_lpid = shp->shm_lprid;
  584. tbuf.shm_nattch = shp->shm_nattch;
  585. shm_unlock(shp);
  586. if(copy_shmid_to_user (buf, &tbuf, version))
  587. err = -EFAULT;
  588. else
  589. err = result;
  590. goto out;
  591. }
  592. case SHM_LOCK:
  593. case SHM_UNLOCK:
  594. {
  595. shp = shm_lock(ns, shmid);
  596. if(shp==NULL) {
  597. err = -EINVAL;
  598. goto out;
  599. }
  600. err = shm_checkid(ns, shp,shmid);
  601. if(err)
  602. goto out_unlock;
  603. err = audit_ipc_obj(&(shp->shm_perm));
  604. if (err)
  605. goto out_unlock;
  606. if (!capable(CAP_IPC_LOCK)) {
  607. err = -EPERM;
  608. if (current->euid != shp->shm_perm.uid &&
  609. current->euid != shp->shm_perm.cuid)
  610. goto out_unlock;
  611. if (cmd == SHM_LOCK &&
  612. !current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur)
  613. goto out_unlock;
  614. }
  615. err = security_shm_shmctl(shp, cmd);
  616. if (err)
  617. goto out_unlock;
  618. if(cmd==SHM_LOCK) {
  619. struct user_struct * user = current->user;
  620. if (!is_file_hugepages(shp->shm_file)) {
  621. err = shmem_lock(shp->shm_file, 1, user);
  622. if (!err && !(shp->shm_perm.mode & SHM_LOCKED)){
  623. shp->shm_perm.mode |= SHM_LOCKED;
  624. shp->mlock_user = user;
  625. }
  626. }
  627. } else if (!is_file_hugepages(shp->shm_file)) {
  628. shmem_lock(shp->shm_file, 0, shp->mlock_user);
  629. shp->shm_perm.mode &= ~SHM_LOCKED;
  630. shp->mlock_user = NULL;
  631. }
  632. shm_unlock(shp);
  633. goto out;
  634. }
  635. case IPC_RMID:
  636. {
  637. /*
  638. * We cannot simply remove the file. The SVID states
  639. * that the block remains until the last person
  640. * detaches from it, then is deleted. A shmat() on
  641. * an RMID segment is legal in older Linux and if
  642. * we change it apps break...
  643. *
  644. * Instead we set a destroyed flag, and then blow
  645. * the name away when the usage hits zero.
  646. */
  647. mutex_lock(&shm_ids(ns).mutex);
  648. shp = shm_lock(ns, shmid);
  649. err = -EINVAL;
  650. if (shp == NULL)
  651. goto out_up;
  652. err = shm_checkid(ns, shp, shmid);
  653. if(err)
  654. goto out_unlock_up;
  655. err = audit_ipc_obj(&(shp->shm_perm));
  656. if (err)
  657. goto out_unlock_up;
  658. if (current->euid != shp->shm_perm.uid &&
  659. current->euid != shp->shm_perm.cuid &&
  660. !capable(CAP_SYS_ADMIN)) {
  661. err=-EPERM;
  662. goto out_unlock_up;
  663. }
  664. err = security_shm_shmctl(shp, cmd);
  665. if (err)
  666. goto out_unlock_up;
  667. do_shm_rmid(ns, shp);
  668. mutex_unlock(&shm_ids(ns).mutex);
  669. goto out;
  670. }
  671. case IPC_SET:
  672. {
  673. if (copy_shmid_from_user (&setbuf, buf, version)) {
  674. err = -EFAULT;
  675. goto out;
  676. }
  677. mutex_lock(&shm_ids(ns).mutex);
  678. shp = shm_lock(ns, shmid);
  679. err=-EINVAL;
  680. if(shp==NULL)
  681. goto out_up;
  682. err = shm_checkid(ns, shp,shmid);
  683. if(err)
  684. goto out_unlock_up;
  685. err = audit_ipc_obj(&(shp->shm_perm));
  686. if (err)
  687. goto out_unlock_up;
  688. err = audit_ipc_set_perm(0, setbuf.uid, setbuf.gid, setbuf.mode);
  689. if (err)
  690. goto out_unlock_up;
  691. err=-EPERM;
  692. if (current->euid != shp->shm_perm.uid &&
  693. current->euid != shp->shm_perm.cuid &&
  694. !capable(CAP_SYS_ADMIN)) {
  695. goto out_unlock_up;
  696. }
  697. err = security_shm_shmctl(shp, cmd);
  698. if (err)
  699. goto out_unlock_up;
  700. shp->shm_perm.uid = setbuf.uid;
  701. shp->shm_perm.gid = setbuf.gid;
  702. shp->shm_perm.mode = (shp->shm_perm.mode & ~S_IRWXUGO)
  703. | (setbuf.mode & S_IRWXUGO);
  704. shp->shm_ctim = get_seconds();
  705. break;
  706. }
  707. default:
  708. err = -EINVAL;
  709. goto out;
  710. }
  711. err = 0;
  712. out_unlock_up:
  713. shm_unlock(shp);
  714. out_up:
  715. mutex_unlock(&shm_ids(ns).mutex);
  716. goto out;
  717. out_unlock:
  718. shm_unlock(shp);
  719. out:
  720. return err;
  721. }
  722. /*
  723. * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
  724. *
  725. * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
  726. * "raddr" thing points to kernel space, and there has to be a wrapper around
  727. * this.
  728. */
  729. long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
  730. {
  731. struct shmid_kernel *shp;
  732. unsigned long addr;
  733. unsigned long size;
  734. struct file * file;
  735. int err;
  736. unsigned long flags;
  737. unsigned long prot;
  738. int acc_mode;
  739. unsigned long user_addr;
  740. struct ipc_namespace *ns;
  741. struct shm_file_data *sfd;
  742. struct path path;
  743. mode_t f_mode;
  744. err = -EINVAL;
  745. if (shmid < 0)
  746. goto out;
  747. else if ((addr = (ulong)shmaddr)) {
  748. if (addr & (SHMLBA-1)) {
  749. if (shmflg & SHM_RND)
  750. addr &= ~(SHMLBA-1); /* round down */
  751. else
  752. #ifndef __ARCH_FORCE_SHMLBA
  753. if (addr & ~PAGE_MASK)
  754. #endif
  755. goto out;
  756. }
  757. flags = MAP_SHARED | MAP_FIXED;
  758. } else {
  759. if ((shmflg & SHM_REMAP))
  760. goto out;
  761. flags = MAP_SHARED;
  762. }
  763. if (shmflg & SHM_RDONLY) {
  764. prot = PROT_READ;
  765. acc_mode = S_IRUGO;
  766. f_mode = FMODE_READ;
  767. } else {
  768. prot = PROT_READ | PROT_WRITE;
  769. acc_mode = S_IRUGO | S_IWUGO;
  770. f_mode = FMODE_READ | FMODE_WRITE;
  771. }
  772. if (shmflg & SHM_EXEC) {
  773. prot |= PROT_EXEC;
  774. acc_mode |= S_IXUGO;
  775. }
  776. /*
  777. * We cannot rely on the fs check since SYSV IPC does have an
  778. * additional creator id...
  779. */
  780. ns = current->nsproxy->ipc_ns;
  781. shp = shm_lock(ns, shmid);
  782. if(shp == NULL)
  783. goto out;
  784. err = shm_checkid(ns, shp,shmid);
  785. if (err)
  786. goto out_unlock;
  787. err = -EACCES;
  788. if (ipcperms(&shp->shm_perm, acc_mode))
  789. goto out_unlock;
  790. err = security_shm_shmat(shp, shmaddr, shmflg);
  791. if (err)
  792. goto out_unlock;
  793. path.dentry = dget(shp->shm_file->f_path.dentry);
  794. path.mnt = mntget(shp->shm_file->f_path.mnt);
  795. shp->shm_nattch++;
  796. size = i_size_read(path.dentry->d_inode);
  797. shm_unlock(shp);
  798. err = -ENOMEM;
  799. sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
  800. if (!sfd)
  801. goto out_put_path;
  802. err = -ENOMEM;
  803. file = get_empty_filp();
  804. if (!file)
  805. goto out_free;
  806. file->f_op = &shm_file_operations;
  807. file->private_data = sfd;
  808. file->f_path = path;
  809. file->f_mapping = shp->shm_file->f_mapping;
  810. file->f_mode = f_mode;
  811. sfd->id = shp->id;
  812. sfd->ns = get_ipc_ns(ns);
  813. sfd->file = shp->shm_file;
  814. sfd->vm_ops = NULL;
  815. down_write(&current->mm->mmap_sem);
  816. if (addr && !(shmflg & SHM_REMAP)) {
  817. err = -EINVAL;
  818. if (find_vma_intersection(current->mm, addr, addr + size))
  819. goto invalid;
  820. /*
  821. * If shm segment goes below stack, make sure there is some
  822. * space left for the stack to grow (at least 4 pages).
  823. */
  824. if (addr < current->mm->start_stack &&
  825. addr > current->mm->start_stack - size - PAGE_SIZE * 5)
  826. goto invalid;
  827. }
  828. user_addr = do_mmap (file, addr, size, prot, flags, 0);
  829. *raddr = user_addr;
  830. err = 0;
  831. if (IS_ERR_VALUE(user_addr))
  832. err = (long)user_addr;
  833. invalid:
  834. up_write(&current->mm->mmap_sem);
  835. fput(file);
  836. out_nattch:
  837. mutex_lock(&shm_ids(ns).mutex);
  838. shp = shm_lock(ns, shmid);
  839. BUG_ON(!shp);
  840. shp->shm_nattch--;
  841. if(shp->shm_nattch == 0 &&
  842. shp->shm_perm.mode & SHM_DEST)
  843. shm_destroy(ns, shp);
  844. else
  845. shm_unlock(shp);
  846. mutex_unlock(&shm_ids(ns).mutex);
  847. out:
  848. return err;
  849. out_unlock:
  850. shm_unlock(shp);
  851. goto out;
  852. out_free:
  853. kfree(sfd);
  854. out_put_path:
  855. dput(path.dentry);
  856. mntput(path.mnt);
  857. goto out_nattch;
  858. }
  859. asmlinkage long sys_shmat(int shmid, char __user *shmaddr, int shmflg)
  860. {
  861. unsigned long ret;
  862. long err;
  863. err = do_shmat(shmid, shmaddr, shmflg, &ret);
  864. if (err)
  865. return err;
  866. force_successful_syscall_return();
  867. return (long)ret;
  868. }
  869. /*
  870. * detach and kill segment if marked destroyed.
  871. * The work is done in shm_close.
  872. */
  873. asmlinkage long sys_shmdt(char __user *shmaddr)
  874. {
  875. struct mm_struct *mm = current->mm;
  876. struct vm_area_struct *vma, *next;
  877. unsigned long addr = (unsigned long)shmaddr;
  878. loff_t size = 0;
  879. int retval = -EINVAL;
  880. if (addr & ~PAGE_MASK)
  881. return retval;
  882. down_write(&mm->mmap_sem);
  883. /*
  884. * This function tries to be smart and unmap shm segments that
  885. * were modified by partial mlock or munmap calls:
  886. * - It first determines the size of the shm segment that should be
  887. * unmapped: It searches for a vma that is backed by shm and that
  888. * started at address shmaddr. It records it's size and then unmaps
  889. * it.
  890. * - Then it unmaps all shm vmas that started at shmaddr and that
  891. * are within the initially determined size.
  892. * Errors from do_munmap are ignored: the function only fails if
  893. * it's called with invalid parameters or if it's called to unmap
  894. * a part of a vma. Both calls in this function are for full vmas,
  895. * the parameters are directly copied from the vma itself and always
  896. * valid - therefore do_munmap cannot fail. (famous last words?)
  897. */
  898. /*
  899. * If it had been mremap()'d, the starting address would not
  900. * match the usual checks anyway. So assume all vma's are
  901. * above the starting address given.
  902. */
  903. vma = find_vma(mm, addr);
  904. while (vma) {
  905. next = vma->vm_next;
  906. /*
  907. * Check if the starting address would match, i.e. it's
  908. * a fragment created by mprotect() and/or munmap(), or it
  909. * otherwise it starts at this address with no hassles.
  910. */
  911. if ((vma->vm_ops == &shm_vm_ops) &&
  912. (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
  913. size = vma->vm_file->f_path.dentry->d_inode->i_size;
  914. do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
  915. /*
  916. * We discovered the size of the shm segment, so
  917. * break out of here and fall through to the next
  918. * loop that uses the size information to stop
  919. * searching for matching vma's.
  920. */
  921. retval = 0;
  922. vma = next;
  923. break;
  924. }
  925. vma = next;
  926. }
  927. /*
  928. * We need look no further than the maximum address a fragment
  929. * could possibly have landed at. Also cast things to loff_t to
  930. * prevent overflows and make comparisions vs. equal-width types.
  931. */
  932. size = PAGE_ALIGN(size);
  933. while (vma && (loff_t)(vma->vm_end - addr) <= size) {
  934. next = vma->vm_next;
  935. /* finding a matching vma now does not alter retval */
  936. if ((vma->vm_ops == &shm_vm_ops) &&
  937. (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff)
  938. do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
  939. vma = next;
  940. }
  941. up_write(&mm->mmap_sem);
  942. return retval;
  943. }
  944. #ifdef CONFIG_PROC_FS
  945. static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
  946. {
  947. struct shmid_kernel *shp = it;
  948. char *format;
  949. #define SMALL_STRING "%10d %10d %4o %10u %5u %5u %5d %5u %5u %5u %5u %10lu %10lu %10lu\n"
  950. #define BIG_STRING "%10d %10d %4o %21u %5u %5u %5d %5u %5u %5u %5u %10lu %10lu %10lu\n"
  951. if (sizeof(size_t) <= sizeof(int))
  952. format = SMALL_STRING;
  953. else
  954. format = BIG_STRING;
  955. return seq_printf(s, format,
  956. shp->shm_perm.key,
  957. shp->id,
  958. shp->shm_perm.mode,
  959. shp->shm_segsz,
  960. shp->shm_cprid,
  961. shp->shm_lprid,
  962. shp->shm_nattch,
  963. shp->shm_perm.uid,
  964. shp->shm_perm.gid,
  965. shp->shm_perm.cuid,
  966. shp->shm_perm.cgid,
  967. shp->shm_atim,
  968. shp->shm_dtim,
  969. shp->shm_ctim);
  970. }
  971. #endif