shm.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179
  1. /*
  2. * linux/ipc/shm.c
  3. * Copyright (C) 1992, 1993 Krishna Balasubramanian
  4. * Many improvements/fixes by Bruno Haible.
  5. * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
  6. * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
  7. *
  8. * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
  9. * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
  10. * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
  11. * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
  12. * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
  13. * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
  14. * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
  15. *
  16. * support for audit of ipc object properties and permission changes
  17. * Dustin Kirkland <dustin.kirkland@us.ibm.com>
  18. *
  19. * namespaces support
  20. * OpenVZ, SWsoft Inc.
  21. * Pavel Emelianov <xemul@openvz.org>
  22. */
  23. #include <linux/slab.h>
  24. #include <linux/mm.h>
  25. #include <linux/hugetlb.h>
  26. #include <linux/shm.h>
  27. #include <linux/init.h>
  28. #include <linux/file.h>
  29. #include <linux/mman.h>
  30. #include <linux/shmem_fs.h>
  31. #include <linux/security.h>
  32. #include <linux/syscalls.h>
  33. #include <linux/audit.h>
  34. #include <linux/capability.h>
  35. #include <linux/ptrace.h>
  36. #include <linux/seq_file.h>
  37. #include <linux/rwsem.h>
  38. #include <linux/nsproxy.h>
  39. #include <linux/mount.h>
  40. #include <asm/uaccess.h>
  41. #include "util.h"
  42. struct shm_file_data {
  43. int id;
  44. struct ipc_namespace *ns;
  45. struct file *file;
  46. const struct vm_operations_struct *vm_ops;
  47. };
  48. #define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data))
  49. static const struct file_operations shm_file_operations;
  50. static struct vm_operations_struct shm_vm_ops;
  51. static struct ipc_ids init_shm_ids;
  52. #define shm_ids(ns) (*((ns)->ids[IPC_SHM_IDS]))
  53. #define shm_unlock(shp) \
  54. ipc_unlock(&(shp)->shm_perm)
  55. #define shm_buildid(id, seq) ipc_buildid(id, seq)
  56. static int newseg(struct ipc_namespace *, struct ipc_params *);
  57. static void shm_open(struct vm_area_struct *vma);
  58. static void shm_close(struct vm_area_struct *vma);
  59. static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
  60. #ifdef CONFIG_PROC_FS
  61. static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
  62. #endif
  63. static void __shm_init_ns(struct ipc_namespace *ns, struct ipc_ids *ids)
  64. {
  65. ns->ids[IPC_SHM_IDS] = ids;
  66. ns->shm_ctlmax = SHMMAX;
  67. ns->shm_ctlall = SHMALL;
  68. ns->shm_ctlmni = SHMMNI;
  69. ns->shm_tot = 0;
  70. ipc_init_ids(ids);
  71. }
  72. /*
  73. * Called with shm_ids.rw_mutex (writer) and the shp structure locked.
  74. * Only shm_ids.rw_mutex remains locked on exit.
  75. */
  76. static void do_shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *shp)
  77. {
  78. if (shp->shm_nattch){
  79. shp->shm_perm.mode |= SHM_DEST;
  80. /* Do not find it any more */
  81. shp->shm_perm.key = IPC_PRIVATE;
  82. shm_unlock(shp);
  83. } else
  84. shm_destroy(ns, shp);
  85. }
  86. int shm_init_ns(struct ipc_namespace *ns)
  87. {
  88. struct ipc_ids *ids;
  89. ids = kmalloc(sizeof(struct ipc_ids), GFP_KERNEL);
  90. if (ids == NULL)
  91. return -ENOMEM;
  92. __shm_init_ns(ns, ids);
  93. return 0;
  94. }
  95. void shm_exit_ns(struct ipc_namespace *ns)
  96. {
  97. struct shmid_kernel *shp;
  98. int next_id;
  99. int total, in_use;
  100. down_write(&shm_ids(ns).rw_mutex);
  101. in_use = shm_ids(ns).in_use;
  102. for (total = 0, next_id = 0; total < in_use; next_id++) {
  103. shp = idr_find(&shm_ids(ns).ipcs_idr, next_id);
  104. if (shp == NULL)
  105. continue;
  106. ipc_lock_by_ptr(&shp->shm_perm);
  107. do_shm_rmid(ns, shp);
  108. total++;
  109. }
  110. up_write(&shm_ids(ns).rw_mutex);
  111. kfree(ns->ids[IPC_SHM_IDS]);
  112. ns->ids[IPC_SHM_IDS] = NULL;
  113. }
  114. void __init shm_init (void)
  115. {
  116. __shm_init_ns(&init_ipc_ns, &init_shm_ids);
  117. ipc_init_proc_interface("sysvipc/shm",
  118. " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime\n",
  119. IPC_SHM_IDS, sysvipc_shm_proc_show);
  120. }
  121. /*
  122. * shm_lock_(check_)down routines are called in the paths where the rw_mutex
  123. * is held to protect access to the idr tree.
  124. */
  125. static inline struct shmid_kernel *shm_lock_down(struct ipc_namespace *ns,
  126. int id)
  127. {
  128. struct kern_ipc_perm *ipcp = ipc_lock_down(&shm_ids(ns), id);
  129. return container_of(ipcp, struct shmid_kernel, shm_perm);
  130. }
  131. static inline struct shmid_kernel *shm_lock_check_down(
  132. struct ipc_namespace *ns,
  133. int id)
  134. {
  135. struct kern_ipc_perm *ipcp = ipc_lock_check_down(&shm_ids(ns), id);
  136. return container_of(ipcp, struct shmid_kernel, shm_perm);
  137. }
  138. /*
  139. * shm_lock_(check_) routines are called in the paths where the rw_mutex
  140. * is not held.
  141. */
  142. static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
  143. {
  144. struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id);
  145. return container_of(ipcp, struct shmid_kernel, shm_perm);
  146. }
  147. static inline struct shmid_kernel *shm_lock_check(struct ipc_namespace *ns,
  148. int id)
  149. {
  150. struct kern_ipc_perm *ipcp = ipc_lock_check(&shm_ids(ns), id);
  151. return container_of(ipcp, struct shmid_kernel, shm_perm);
  152. }
  153. static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
  154. {
  155. ipc_rmid(&shm_ids(ns), &s->shm_perm);
  156. }
  157. static inline int shm_addid(struct ipc_namespace *ns, struct shmid_kernel *shp)
  158. {
  159. return ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
  160. }
  161. /* This is called by fork, once for every shm attach. */
  162. static void shm_open(struct vm_area_struct *vma)
  163. {
  164. struct file *file = vma->vm_file;
  165. struct shm_file_data *sfd = shm_file_data(file);
  166. struct shmid_kernel *shp;
  167. shp = shm_lock(sfd->ns, sfd->id);
  168. BUG_ON(IS_ERR(shp));
  169. shp->shm_atim = get_seconds();
  170. shp->shm_lprid = task_tgid_vnr(current);
  171. shp->shm_nattch++;
  172. shm_unlock(shp);
  173. }
  174. /*
  175. * shm_destroy - free the struct shmid_kernel
  176. *
  177. * @ns: namespace
  178. * @shp: struct to free
  179. *
  180. * It has to be called with shp and shm_ids.rw_mutex (writer) locked,
  181. * but returns with shp unlocked and freed.
  182. */
  183. static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
  184. {
  185. ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
  186. shm_rmid(ns, shp);
  187. shm_unlock(shp);
  188. if (!is_file_hugepages(shp->shm_file))
  189. shmem_lock(shp->shm_file, 0, shp->mlock_user);
  190. else
  191. user_shm_unlock(shp->shm_file->f_path.dentry->d_inode->i_size,
  192. shp->mlock_user);
  193. fput (shp->shm_file);
  194. security_shm_free(shp);
  195. ipc_rcu_putref(shp);
  196. }
  197. /*
  198. * remove the attach descriptor vma.
  199. * free memory for segment if it is marked destroyed.
  200. * The descriptor has already been removed from the current->mm->mmap list
  201. * and will later be kfree()d.
  202. */
  203. static void shm_close(struct vm_area_struct *vma)
  204. {
  205. struct file * file = vma->vm_file;
  206. struct shm_file_data *sfd = shm_file_data(file);
  207. struct shmid_kernel *shp;
  208. struct ipc_namespace *ns = sfd->ns;
  209. down_write(&shm_ids(ns).rw_mutex);
  210. /* remove from the list of attaches of the shm segment */
  211. shp = shm_lock_down(ns, sfd->id);
  212. BUG_ON(IS_ERR(shp));
  213. shp->shm_lprid = task_tgid_vnr(current);
  214. shp->shm_dtim = get_seconds();
  215. shp->shm_nattch--;
  216. if(shp->shm_nattch == 0 &&
  217. shp->shm_perm.mode & SHM_DEST)
  218. shm_destroy(ns, shp);
  219. else
  220. shm_unlock(shp);
  221. up_write(&shm_ids(ns).rw_mutex);
  222. }
  223. static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  224. {
  225. struct file *file = vma->vm_file;
  226. struct shm_file_data *sfd = shm_file_data(file);
  227. return sfd->vm_ops->fault(vma, vmf);
  228. }
  229. #ifdef CONFIG_NUMA
  230. static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
  231. {
  232. struct file *file = vma->vm_file;
  233. struct shm_file_data *sfd = shm_file_data(file);
  234. int err = 0;
  235. if (sfd->vm_ops->set_policy)
  236. err = sfd->vm_ops->set_policy(vma, new);
  237. return err;
  238. }
  239. static struct mempolicy *shm_get_policy(struct vm_area_struct *vma,
  240. unsigned long addr)
  241. {
  242. struct file *file = vma->vm_file;
  243. struct shm_file_data *sfd = shm_file_data(file);
  244. struct mempolicy *pol = NULL;
  245. if (sfd->vm_ops->get_policy)
  246. pol = sfd->vm_ops->get_policy(vma, addr);
  247. else if (vma->vm_policy)
  248. pol = vma->vm_policy;
  249. else
  250. pol = current->mempolicy;
  251. return pol;
  252. }
  253. #endif
  254. static int shm_mmap(struct file * file, struct vm_area_struct * vma)
  255. {
  256. struct shm_file_data *sfd = shm_file_data(file);
  257. int ret;
  258. ret = sfd->file->f_op->mmap(sfd->file, vma);
  259. if (ret != 0)
  260. return ret;
  261. sfd->vm_ops = vma->vm_ops;
  262. #ifdef CONFIG_MMU
  263. BUG_ON(!sfd->vm_ops->fault);
  264. #endif
  265. vma->vm_ops = &shm_vm_ops;
  266. shm_open(vma);
  267. return ret;
  268. }
  269. static int shm_release(struct inode *ino, struct file *file)
  270. {
  271. struct shm_file_data *sfd = shm_file_data(file);
  272. put_ipc_ns(sfd->ns);
  273. shm_file_data(file) = NULL;
  274. kfree(sfd);
  275. return 0;
  276. }
  277. static int shm_fsync(struct file *file, struct dentry *dentry, int datasync)
  278. {
  279. int (*fsync) (struct file *, struct dentry *, int datasync);
  280. struct shm_file_data *sfd = shm_file_data(file);
  281. int ret = -EINVAL;
  282. fsync = sfd->file->f_op->fsync;
  283. if (fsync)
  284. ret = fsync(sfd->file, sfd->file->f_path.dentry, datasync);
  285. return ret;
  286. }
  287. static unsigned long shm_get_unmapped_area(struct file *file,
  288. unsigned long addr, unsigned long len, unsigned long pgoff,
  289. unsigned long flags)
  290. {
  291. struct shm_file_data *sfd = shm_file_data(file);
  292. return get_unmapped_area(sfd->file, addr, len, pgoff, flags);
  293. }
  294. int is_file_shm_hugepages(struct file *file)
  295. {
  296. int ret = 0;
  297. if (file->f_op == &shm_file_operations) {
  298. struct shm_file_data *sfd;
  299. sfd = shm_file_data(file);
  300. ret = is_file_hugepages(sfd->file);
  301. }
  302. return ret;
  303. }
  304. static const struct file_operations shm_file_operations = {
  305. .mmap = shm_mmap,
  306. .fsync = shm_fsync,
  307. .release = shm_release,
  308. .get_unmapped_area = shm_get_unmapped_area,
  309. };
  310. static struct vm_operations_struct shm_vm_ops = {
  311. .open = shm_open, /* callback for a new vm-area open */
  312. .close = shm_close, /* callback for when the vm-area is released */
  313. .fault = shm_fault,
  314. #if defined(CONFIG_NUMA)
  315. .set_policy = shm_set_policy,
  316. .get_policy = shm_get_policy,
  317. #endif
  318. };
  319. /**
  320. * newseg - Create a new shared memory segment
  321. * @ns: namespace
  322. * @params: ptr to the structure that contains key, size and shmflg
  323. *
  324. * Called with shm_ids.rw_mutex held as a writer.
  325. */
  326. static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
  327. {
  328. key_t key = params->key;
  329. int shmflg = params->flg;
  330. size_t size = params->u.size;
  331. int error;
  332. struct shmid_kernel *shp;
  333. int numpages = (size + PAGE_SIZE -1) >> PAGE_SHIFT;
  334. struct file * file;
  335. char name[13];
  336. int id;
  337. if (size < SHMMIN || size > ns->shm_ctlmax)
  338. return -EINVAL;
  339. if (ns->shm_tot + numpages > ns->shm_ctlall)
  340. return -ENOSPC;
  341. shp = ipc_rcu_alloc(sizeof(*shp));
  342. if (!shp)
  343. return -ENOMEM;
  344. shp->shm_perm.key = key;
  345. shp->shm_perm.mode = (shmflg & S_IRWXUGO);
  346. shp->mlock_user = NULL;
  347. shp->shm_perm.security = NULL;
  348. error = security_shm_alloc(shp);
  349. if (error) {
  350. ipc_rcu_putref(shp);
  351. return error;
  352. }
  353. sprintf (name, "SYSV%08x", key);
  354. if (shmflg & SHM_HUGETLB) {
  355. /* hugetlb_file_setup takes care of mlock user accounting */
  356. file = hugetlb_file_setup(name, size);
  357. shp->mlock_user = current->user;
  358. } else {
  359. int acctflag = VM_ACCOUNT;
  360. /*
  361. * Do not allow no accounting for OVERCOMMIT_NEVER, even
  362. * if it's asked for.
  363. */
  364. if ((shmflg & SHM_NORESERVE) &&
  365. sysctl_overcommit_memory != OVERCOMMIT_NEVER)
  366. acctflag = 0;
  367. file = shmem_file_setup(name, size, acctflag);
  368. }
  369. error = PTR_ERR(file);
  370. if (IS_ERR(file))
  371. goto no_file;
  372. id = shm_addid(ns, shp);
  373. if (id < 0) {
  374. error = id;
  375. goto no_id;
  376. }
  377. shp->shm_cprid = task_tgid_vnr(current);
  378. shp->shm_lprid = 0;
  379. shp->shm_atim = shp->shm_dtim = 0;
  380. shp->shm_ctim = get_seconds();
  381. shp->shm_segsz = size;
  382. shp->shm_nattch = 0;
  383. shp->shm_perm.id = shm_buildid(id, shp->shm_perm.seq);
  384. shp->shm_file = file;
  385. /*
  386. * shmid gets reported as "inode#" in /proc/pid/maps.
  387. * proc-ps tools use this. Changing this will break them.
  388. */
  389. file->f_dentry->d_inode->i_ino = shp->shm_perm.id;
  390. ns->shm_tot += numpages;
  391. error = shp->shm_perm.id;
  392. shm_unlock(shp);
  393. return error;
  394. no_id:
  395. fput(file);
  396. no_file:
  397. security_shm_free(shp);
  398. ipc_rcu_putref(shp);
  399. return error;
  400. }
  401. /*
  402. * Called with shm_ids.rw_mutex and ipcp locked.
  403. */
  404. static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg)
  405. {
  406. struct shmid_kernel *shp;
  407. shp = container_of(ipcp, struct shmid_kernel, shm_perm);
  408. return security_shm_associate(shp, shmflg);
  409. }
  410. /*
  411. * Called with shm_ids.rw_mutex and ipcp locked.
  412. */
  413. static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
  414. struct ipc_params *params)
  415. {
  416. struct shmid_kernel *shp;
  417. shp = container_of(ipcp, struct shmid_kernel, shm_perm);
  418. if (shp->shm_segsz < params->u.size)
  419. return -EINVAL;
  420. return 0;
  421. }
  422. asmlinkage long sys_shmget (key_t key, size_t size, int shmflg)
  423. {
  424. struct ipc_namespace *ns;
  425. struct ipc_ops shm_ops;
  426. struct ipc_params shm_params;
  427. ns = current->nsproxy->ipc_ns;
  428. shm_ops.getnew = newseg;
  429. shm_ops.associate = shm_security;
  430. shm_ops.more_checks = shm_more_checks;
  431. shm_params.key = key;
  432. shm_params.flg = shmflg;
  433. shm_params.u.size = size;
  434. return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params);
  435. }
  436. static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
  437. {
  438. switch(version) {
  439. case IPC_64:
  440. return copy_to_user(buf, in, sizeof(*in));
  441. case IPC_OLD:
  442. {
  443. struct shmid_ds out;
  444. ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
  445. out.shm_segsz = in->shm_segsz;
  446. out.shm_atime = in->shm_atime;
  447. out.shm_dtime = in->shm_dtime;
  448. out.shm_ctime = in->shm_ctime;
  449. out.shm_cpid = in->shm_cpid;
  450. out.shm_lpid = in->shm_lpid;
  451. out.shm_nattch = in->shm_nattch;
  452. return copy_to_user(buf, &out, sizeof(out));
  453. }
  454. default:
  455. return -EINVAL;
  456. }
  457. }
  458. struct shm_setbuf {
  459. uid_t uid;
  460. gid_t gid;
  461. mode_t mode;
  462. };
  463. static inline unsigned long copy_shmid_from_user(struct shm_setbuf *out, void __user *buf, int version)
  464. {
  465. switch(version) {
  466. case IPC_64:
  467. {
  468. struct shmid64_ds tbuf;
  469. if (copy_from_user(&tbuf, buf, sizeof(tbuf)))
  470. return -EFAULT;
  471. out->uid = tbuf.shm_perm.uid;
  472. out->gid = tbuf.shm_perm.gid;
  473. out->mode = tbuf.shm_perm.mode;
  474. return 0;
  475. }
  476. case IPC_OLD:
  477. {
  478. struct shmid_ds tbuf_old;
  479. if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
  480. return -EFAULT;
  481. out->uid = tbuf_old.shm_perm.uid;
  482. out->gid = tbuf_old.shm_perm.gid;
  483. out->mode = tbuf_old.shm_perm.mode;
  484. return 0;
  485. }
  486. default:
  487. return -EINVAL;
  488. }
  489. }
  490. static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
  491. {
  492. switch(version) {
  493. case IPC_64:
  494. return copy_to_user(buf, in, sizeof(*in));
  495. case IPC_OLD:
  496. {
  497. struct shminfo out;
  498. if(in->shmmax > INT_MAX)
  499. out.shmmax = INT_MAX;
  500. else
  501. out.shmmax = (int)in->shmmax;
  502. out.shmmin = in->shmmin;
  503. out.shmmni = in->shmmni;
  504. out.shmseg = in->shmseg;
  505. out.shmall = in->shmall;
  506. return copy_to_user(buf, &out, sizeof(out));
  507. }
  508. default:
  509. return -EINVAL;
  510. }
  511. }
  512. /*
  513. * Called with shm_ids.rw_mutex held as a reader
  514. */
  515. static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
  516. unsigned long *swp)
  517. {
  518. int next_id;
  519. int total, in_use;
  520. *rss = 0;
  521. *swp = 0;
  522. in_use = shm_ids(ns).in_use;
  523. for (total = 0, next_id = 0; total < in_use; next_id++) {
  524. struct shmid_kernel *shp;
  525. struct inode *inode;
  526. shp = idr_find(&shm_ids(ns).ipcs_idr, next_id);
  527. if (shp == NULL)
  528. continue;
  529. inode = shp->shm_file->f_path.dentry->d_inode;
  530. if (is_file_hugepages(shp->shm_file)) {
  531. struct address_space *mapping = inode->i_mapping;
  532. *rss += (HPAGE_SIZE/PAGE_SIZE)*mapping->nrpages;
  533. } else {
  534. struct shmem_inode_info *info = SHMEM_I(inode);
  535. spin_lock(&info->lock);
  536. *rss += inode->i_mapping->nrpages;
  537. *swp += info->swapped;
  538. spin_unlock(&info->lock);
  539. }
  540. total++;
  541. }
  542. }
  543. asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf)
  544. {
  545. struct shm_setbuf setbuf;
  546. struct shmid_kernel *shp;
  547. int err, version;
  548. struct ipc_namespace *ns;
  549. if (cmd < 0 || shmid < 0) {
  550. err = -EINVAL;
  551. goto out;
  552. }
  553. version = ipc_parse_version(&cmd);
  554. ns = current->nsproxy->ipc_ns;
  555. switch (cmd) { /* replace with proc interface ? */
  556. case IPC_INFO:
  557. {
  558. struct shminfo64 shminfo;
  559. err = security_shm_shmctl(NULL, cmd);
  560. if (err)
  561. return err;
  562. memset(&shminfo,0,sizeof(shminfo));
  563. shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni;
  564. shminfo.shmmax = ns->shm_ctlmax;
  565. shminfo.shmall = ns->shm_ctlall;
  566. shminfo.shmmin = SHMMIN;
  567. if(copy_shminfo_to_user (buf, &shminfo, version))
  568. return -EFAULT;
  569. down_read(&shm_ids(ns).rw_mutex);
  570. err = ipc_get_maxid(&shm_ids(ns));
  571. up_read(&shm_ids(ns).rw_mutex);
  572. if(err<0)
  573. err = 0;
  574. goto out;
  575. }
  576. case SHM_INFO:
  577. {
  578. struct shm_info shm_info;
  579. err = security_shm_shmctl(NULL, cmd);
  580. if (err)
  581. return err;
  582. memset(&shm_info,0,sizeof(shm_info));
  583. down_read(&shm_ids(ns).rw_mutex);
  584. shm_info.used_ids = shm_ids(ns).in_use;
  585. shm_get_stat (ns, &shm_info.shm_rss, &shm_info.shm_swp);
  586. shm_info.shm_tot = ns->shm_tot;
  587. shm_info.swap_attempts = 0;
  588. shm_info.swap_successes = 0;
  589. err = ipc_get_maxid(&shm_ids(ns));
  590. up_read(&shm_ids(ns).rw_mutex);
  591. if(copy_to_user (buf, &shm_info, sizeof(shm_info))) {
  592. err = -EFAULT;
  593. goto out;
  594. }
  595. err = err < 0 ? 0 : err;
  596. goto out;
  597. }
  598. case SHM_STAT:
  599. case IPC_STAT:
  600. {
  601. struct shmid64_ds tbuf;
  602. int result;
  603. if (!buf) {
  604. err = -EFAULT;
  605. goto out;
  606. }
  607. if (cmd == SHM_STAT) {
  608. shp = shm_lock(ns, shmid);
  609. if (IS_ERR(shp)) {
  610. err = PTR_ERR(shp);
  611. goto out;
  612. }
  613. result = shp->shm_perm.id;
  614. } else {
  615. shp = shm_lock_check(ns, shmid);
  616. if (IS_ERR(shp)) {
  617. err = PTR_ERR(shp);
  618. goto out;
  619. }
  620. result = 0;
  621. }
  622. err=-EACCES;
  623. if (ipcperms (&shp->shm_perm, S_IRUGO))
  624. goto out_unlock;
  625. err = security_shm_shmctl(shp, cmd);
  626. if (err)
  627. goto out_unlock;
  628. memset(&tbuf, 0, sizeof(tbuf));
  629. kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm);
  630. tbuf.shm_segsz = shp->shm_segsz;
  631. tbuf.shm_atime = shp->shm_atim;
  632. tbuf.shm_dtime = shp->shm_dtim;
  633. tbuf.shm_ctime = shp->shm_ctim;
  634. tbuf.shm_cpid = shp->shm_cprid;
  635. tbuf.shm_lpid = shp->shm_lprid;
  636. tbuf.shm_nattch = shp->shm_nattch;
  637. shm_unlock(shp);
  638. if(copy_shmid_to_user (buf, &tbuf, version))
  639. err = -EFAULT;
  640. else
  641. err = result;
  642. goto out;
  643. }
  644. case SHM_LOCK:
  645. case SHM_UNLOCK:
  646. {
  647. shp = shm_lock_check(ns, shmid);
  648. if (IS_ERR(shp)) {
  649. err = PTR_ERR(shp);
  650. goto out;
  651. }
  652. err = audit_ipc_obj(&(shp->shm_perm));
  653. if (err)
  654. goto out_unlock;
  655. if (!capable(CAP_IPC_LOCK)) {
  656. err = -EPERM;
  657. if (current->euid != shp->shm_perm.uid &&
  658. current->euid != shp->shm_perm.cuid)
  659. goto out_unlock;
  660. if (cmd == SHM_LOCK &&
  661. !current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur)
  662. goto out_unlock;
  663. }
  664. err = security_shm_shmctl(shp, cmd);
  665. if (err)
  666. goto out_unlock;
  667. if(cmd==SHM_LOCK) {
  668. struct user_struct * user = current->user;
  669. if (!is_file_hugepages(shp->shm_file)) {
  670. err = shmem_lock(shp->shm_file, 1, user);
  671. if (!err && !(shp->shm_perm.mode & SHM_LOCKED)){
  672. shp->shm_perm.mode |= SHM_LOCKED;
  673. shp->mlock_user = user;
  674. }
  675. }
  676. } else if (!is_file_hugepages(shp->shm_file)) {
  677. shmem_lock(shp->shm_file, 0, shp->mlock_user);
  678. shp->shm_perm.mode &= ~SHM_LOCKED;
  679. shp->mlock_user = NULL;
  680. }
  681. shm_unlock(shp);
  682. goto out;
  683. }
  684. case IPC_RMID:
  685. {
  686. /*
  687. * We cannot simply remove the file. The SVID states
  688. * that the block remains until the last person
  689. * detaches from it, then is deleted. A shmat() on
  690. * an RMID segment is legal in older Linux and if
  691. * we change it apps break...
  692. *
  693. * Instead we set a destroyed flag, and then blow
  694. * the name away when the usage hits zero.
  695. */
  696. down_write(&shm_ids(ns).rw_mutex);
  697. shp = shm_lock_check_down(ns, shmid);
  698. if (IS_ERR(shp)) {
  699. err = PTR_ERR(shp);
  700. goto out_up;
  701. }
  702. err = audit_ipc_obj(&(shp->shm_perm));
  703. if (err)
  704. goto out_unlock_up;
  705. if (current->euid != shp->shm_perm.uid &&
  706. current->euid != shp->shm_perm.cuid &&
  707. !capable(CAP_SYS_ADMIN)) {
  708. err=-EPERM;
  709. goto out_unlock_up;
  710. }
  711. err = security_shm_shmctl(shp, cmd);
  712. if (err)
  713. goto out_unlock_up;
  714. do_shm_rmid(ns, shp);
  715. up_write(&shm_ids(ns).rw_mutex);
  716. goto out;
  717. }
  718. case IPC_SET:
  719. {
  720. if (!buf) {
  721. err = -EFAULT;
  722. goto out;
  723. }
  724. if (copy_shmid_from_user (&setbuf, buf, version)) {
  725. err = -EFAULT;
  726. goto out;
  727. }
  728. down_write(&shm_ids(ns).rw_mutex);
  729. shp = shm_lock_check_down(ns, shmid);
  730. if (IS_ERR(shp)) {
  731. err = PTR_ERR(shp);
  732. goto out_up;
  733. }
  734. err = audit_ipc_obj(&(shp->shm_perm));
  735. if (err)
  736. goto out_unlock_up;
  737. err = audit_ipc_set_perm(0, setbuf.uid, setbuf.gid, setbuf.mode);
  738. if (err)
  739. goto out_unlock_up;
  740. err=-EPERM;
  741. if (current->euid != shp->shm_perm.uid &&
  742. current->euid != shp->shm_perm.cuid &&
  743. !capable(CAP_SYS_ADMIN)) {
  744. goto out_unlock_up;
  745. }
  746. err = security_shm_shmctl(shp, cmd);
  747. if (err)
  748. goto out_unlock_up;
  749. shp->shm_perm.uid = setbuf.uid;
  750. shp->shm_perm.gid = setbuf.gid;
  751. shp->shm_perm.mode = (shp->shm_perm.mode & ~S_IRWXUGO)
  752. | (setbuf.mode & S_IRWXUGO);
  753. shp->shm_ctim = get_seconds();
  754. break;
  755. }
  756. default:
  757. err = -EINVAL;
  758. goto out;
  759. }
  760. err = 0;
  761. out_unlock_up:
  762. shm_unlock(shp);
  763. out_up:
  764. up_write(&shm_ids(ns).rw_mutex);
  765. goto out;
  766. out_unlock:
  767. shm_unlock(shp);
  768. out:
  769. return err;
  770. }
  771. /*
  772. * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
  773. *
  774. * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
  775. * "raddr" thing points to kernel space, and there has to be a wrapper around
  776. * this.
  777. */
  778. long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
  779. {
  780. struct shmid_kernel *shp;
  781. unsigned long addr;
  782. unsigned long size;
  783. struct file * file;
  784. int err;
  785. unsigned long flags;
  786. unsigned long prot;
  787. int acc_mode;
  788. unsigned long user_addr;
  789. struct ipc_namespace *ns;
  790. struct shm_file_data *sfd;
  791. struct path path;
  792. mode_t f_mode;
  793. err = -EINVAL;
  794. if (shmid < 0)
  795. goto out;
  796. else if ((addr = (ulong)shmaddr)) {
  797. if (addr & (SHMLBA-1)) {
  798. if (shmflg & SHM_RND)
  799. addr &= ~(SHMLBA-1); /* round down */
  800. else
  801. #ifndef __ARCH_FORCE_SHMLBA
  802. if (addr & ~PAGE_MASK)
  803. #endif
  804. goto out;
  805. }
  806. flags = MAP_SHARED | MAP_FIXED;
  807. } else {
  808. if ((shmflg & SHM_REMAP))
  809. goto out;
  810. flags = MAP_SHARED;
  811. }
  812. if (shmflg & SHM_RDONLY) {
  813. prot = PROT_READ;
  814. acc_mode = S_IRUGO;
  815. f_mode = FMODE_READ;
  816. } else {
  817. prot = PROT_READ | PROT_WRITE;
  818. acc_mode = S_IRUGO | S_IWUGO;
  819. f_mode = FMODE_READ | FMODE_WRITE;
  820. }
  821. if (shmflg & SHM_EXEC) {
  822. prot |= PROT_EXEC;
  823. acc_mode |= S_IXUGO;
  824. }
  825. /*
  826. * We cannot rely on the fs check since SYSV IPC does have an
  827. * additional creator id...
  828. */
  829. ns = current->nsproxy->ipc_ns;
  830. shp = shm_lock_check(ns, shmid);
  831. if (IS_ERR(shp)) {
  832. err = PTR_ERR(shp);
  833. goto out;
  834. }
  835. err = -EACCES;
  836. if (ipcperms(&shp->shm_perm, acc_mode))
  837. goto out_unlock;
  838. err = security_shm_shmat(shp, shmaddr, shmflg);
  839. if (err)
  840. goto out_unlock;
  841. path.dentry = dget(shp->shm_file->f_path.dentry);
  842. path.mnt = shp->shm_file->f_path.mnt;
  843. shp->shm_nattch++;
  844. size = i_size_read(path.dentry->d_inode);
  845. shm_unlock(shp);
  846. err = -ENOMEM;
  847. sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
  848. if (!sfd)
  849. goto out_put_dentry;
  850. err = -ENOMEM;
  851. file = alloc_file(path.mnt, path.dentry, f_mode, &shm_file_operations);
  852. if (!file)
  853. goto out_free;
  854. file->private_data = sfd;
  855. file->f_mapping = shp->shm_file->f_mapping;
  856. sfd->id = shp->shm_perm.id;
  857. sfd->ns = get_ipc_ns(ns);
  858. sfd->file = shp->shm_file;
  859. sfd->vm_ops = NULL;
  860. down_write(&current->mm->mmap_sem);
  861. if (addr && !(shmflg & SHM_REMAP)) {
  862. err = -EINVAL;
  863. if (find_vma_intersection(current->mm, addr, addr + size))
  864. goto invalid;
  865. /*
  866. * If shm segment goes below stack, make sure there is some
  867. * space left for the stack to grow (at least 4 pages).
  868. */
  869. if (addr < current->mm->start_stack &&
  870. addr > current->mm->start_stack - size - PAGE_SIZE * 5)
  871. goto invalid;
  872. }
  873. user_addr = do_mmap (file, addr, size, prot, flags, 0);
  874. *raddr = user_addr;
  875. err = 0;
  876. if (IS_ERR_VALUE(user_addr))
  877. err = (long)user_addr;
  878. invalid:
  879. up_write(&current->mm->mmap_sem);
  880. fput(file);
  881. out_nattch:
  882. down_write(&shm_ids(ns).rw_mutex);
  883. shp = shm_lock_down(ns, shmid);
  884. BUG_ON(IS_ERR(shp));
  885. shp->shm_nattch--;
  886. if(shp->shm_nattch == 0 &&
  887. shp->shm_perm.mode & SHM_DEST)
  888. shm_destroy(ns, shp);
  889. else
  890. shm_unlock(shp);
  891. up_write(&shm_ids(ns).rw_mutex);
  892. out:
  893. return err;
  894. out_unlock:
  895. shm_unlock(shp);
  896. goto out;
  897. out_free:
  898. kfree(sfd);
  899. out_put_dentry:
  900. dput(path.dentry);
  901. goto out_nattch;
  902. }
  903. asmlinkage long sys_shmat(int shmid, char __user *shmaddr, int shmflg)
  904. {
  905. unsigned long ret;
  906. long err;
  907. err = do_shmat(shmid, shmaddr, shmflg, &ret);
  908. if (err)
  909. return err;
  910. force_successful_syscall_return();
  911. return (long)ret;
  912. }
  913. /*
  914. * detach and kill segment if marked destroyed.
  915. * The work is done in shm_close.
  916. */
  917. asmlinkage long sys_shmdt(char __user *shmaddr)
  918. {
  919. struct mm_struct *mm = current->mm;
  920. struct vm_area_struct *vma, *next;
  921. unsigned long addr = (unsigned long)shmaddr;
  922. loff_t size = 0;
  923. int retval = -EINVAL;
  924. if (addr & ~PAGE_MASK)
  925. return retval;
  926. down_write(&mm->mmap_sem);
  927. /*
  928. * This function tries to be smart and unmap shm segments that
  929. * were modified by partial mlock or munmap calls:
  930. * - It first determines the size of the shm segment that should be
  931. * unmapped: It searches for a vma that is backed by shm and that
  932. * started at address shmaddr. It records it's size and then unmaps
  933. * it.
  934. * - Then it unmaps all shm vmas that started at shmaddr and that
  935. * are within the initially determined size.
  936. * Errors from do_munmap are ignored: the function only fails if
  937. * it's called with invalid parameters or if it's called to unmap
  938. * a part of a vma. Both calls in this function are for full vmas,
  939. * the parameters are directly copied from the vma itself and always
  940. * valid - therefore do_munmap cannot fail. (famous last words?)
  941. */
  942. /*
  943. * If it had been mremap()'d, the starting address would not
  944. * match the usual checks anyway. So assume all vma's are
  945. * above the starting address given.
  946. */
  947. vma = find_vma(mm, addr);
  948. while (vma) {
  949. next = vma->vm_next;
  950. /*
  951. * Check if the starting address would match, i.e. it's
  952. * a fragment created by mprotect() and/or munmap(), or it
  953. * otherwise it starts at this address with no hassles.
  954. */
  955. if ((vma->vm_ops == &shm_vm_ops) &&
  956. (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
  957. size = vma->vm_file->f_path.dentry->d_inode->i_size;
  958. do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
  959. /*
  960. * We discovered the size of the shm segment, so
  961. * break out of here and fall through to the next
  962. * loop that uses the size information to stop
  963. * searching for matching vma's.
  964. */
  965. retval = 0;
  966. vma = next;
  967. break;
  968. }
  969. vma = next;
  970. }
  971. /*
  972. * We need look no further than the maximum address a fragment
  973. * could possibly have landed at. Also cast things to loff_t to
  974. * prevent overflows and make comparisions vs. equal-width types.
  975. */
  976. size = PAGE_ALIGN(size);
  977. while (vma && (loff_t)(vma->vm_end - addr) <= size) {
  978. next = vma->vm_next;
  979. /* finding a matching vma now does not alter retval */
  980. if ((vma->vm_ops == &shm_vm_ops) &&
  981. (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff)
  982. do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
  983. vma = next;
  984. }
  985. up_write(&mm->mmap_sem);
  986. return retval;
  987. }
  988. #ifdef CONFIG_PROC_FS
  989. static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
  990. {
  991. struct shmid_kernel *shp = it;
  992. char *format;
  993. #define SMALL_STRING "%10d %10d %4o %10u %5u %5u %5d %5u %5u %5u %5u %10lu %10lu %10lu\n"
  994. #define BIG_STRING "%10d %10d %4o %21u %5u %5u %5d %5u %5u %5u %5u %10lu %10lu %10lu\n"
  995. if (sizeof(size_t) <= sizeof(int))
  996. format = SMALL_STRING;
  997. else
  998. format = BIG_STRING;
  999. return seq_printf(s, format,
  1000. shp->shm_perm.key,
  1001. shp->shm_perm.id,
  1002. shp->shm_perm.mode,
  1003. shp->shm_segsz,
  1004. shp->shm_cprid,
  1005. shp->shm_lprid,
  1006. shp->shm_nattch,
  1007. shp->shm_perm.uid,
  1008. shp->shm_perm.gid,
  1009. shp->shm_perm.cuid,
  1010. shp->shm_perm.cgid,
  1011. shp->shm_atim,
  1012. shp->shm_dtim,
  1013. shp->shm_ctim);
  1014. }
  1015. #endif