shm.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924
  1. /*
  2. * linux/ipc/shm.c
  3. * Copyright (C) 1992, 1993 Krishna Balasubramanian
  4. * Many improvements/fixes by Bruno Haible.
  5. * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
  6. * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
  7. *
  8. * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
  9. * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
  10. * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
  11. * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
  12. * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
  13. * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
  14. * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
  15. *
  16. */
  17. #include <linux/config.h>
  18. #include <linux/slab.h>
  19. #include <linux/mm.h>
  20. #include <linux/hugetlb.h>
  21. #include <linux/shm.h>
  22. #include <linux/init.h>
  23. #include <linux/file.h>
  24. #include <linux/mman.h>
  25. #include <linux/shmem_fs.h>
  26. #include <linux/security.h>
  27. #include <linux/syscalls.h>
  28. #include <linux/audit.h>
  29. #include <linux/capability.h>
  30. #include <linux/ptrace.h>
  31. #include <linux/seq_file.h>
  32. #include <linux/mutex.h>
  33. #include <asm/uaccess.h>
  34. #include "util.h"
  35. static struct file_operations shm_file_operations;
  36. static struct vm_operations_struct shm_vm_ops;
  37. static struct ipc_ids shm_ids;
  38. #define shm_lock(id) ((struct shmid_kernel*)ipc_lock(&shm_ids,id))
  39. #define shm_unlock(shp) ipc_unlock(&(shp)->shm_perm)
  40. #define shm_get(id) ((struct shmid_kernel*)ipc_get(&shm_ids,id))
  41. #define shm_buildid(id, seq) \
  42. ipc_buildid(&shm_ids, id, seq)
  43. static int newseg (key_t key, int shmflg, size_t size);
  44. static void shm_open (struct vm_area_struct *shmd);
  45. static void shm_close (struct vm_area_struct *shmd);
  46. #ifdef CONFIG_PROC_FS
  47. static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
  48. #endif
  49. size_t shm_ctlmax = SHMMAX;
  50. size_t shm_ctlall = SHMALL;
  51. int shm_ctlmni = SHMMNI;
  52. static int shm_tot; /* total number of shared memory pages */
  53. void __init shm_init (void)
  54. {
  55. ipc_init_ids(&shm_ids, 1);
  56. ipc_init_proc_interface("sysvipc/shm",
  57. " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime\n",
  58. &shm_ids,
  59. sysvipc_shm_proc_show);
  60. }
  61. static inline int shm_checkid(struct shmid_kernel *s, int id)
  62. {
  63. if (ipc_checkid(&shm_ids,&s->shm_perm,id))
  64. return -EIDRM;
  65. return 0;
  66. }
  67. static inline struct shmid_kernel *shm_rmid(int id)
  68. {
  69. return (struct shmid_kernel *)ipc_rmid(&shm_ids,id);
  70. }
  71. static inline int shm_addid(struct shmid_kernel *shp)
  72. {
  73. return ipc_addid(&shm_ids, &shp->shm_perm, shm_ctlmni);
  74. }
  75. static inline void shm_inc (int id) {
  76. struct shmid_kernel *shp;
  77. shp = shm_lock(id);
  78. BUG_ON(!shp);
  79. shp->shm_atim = get_seconds();
  80. shp->shm_lprid = current->tgid;
  81. shp->shm_nattch++;
  82. shm_unlock(shp);
  83. }
  84. /* This is called by fork, once for every shm attach. */
  85. static void shm_open (struct vm_area_struct *shmd)
  86. {
  87. shm_inc (shmd->vm_file->f_dentry->d_inode->i_ino);
  88. }
  89. /*
  90. * shm_destroy - free the struct shmid_kernel
  91. *
  92. * @shp: struct to free
  93. *
  94. * It has to be called with shp and shm_ids.mutex locked,
  95. * but returns with shp unlocked and freed.
  96. */
  97. static void shm_destroy (struct shmid_kernel *shp)
  98. {
  99. shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
  100. shm_rmid (shp->id);
  101. shm_unlock(shp);
  102. if (!is_file_hugepages(shp->shm_file))
  103. shmem_lock(shp->shm_file, 0, shp->mlock_user);
  104. else
  105. user_shm_unlock(shp->shm_file->f_dentry->d_inode->i_size,
  106. shp->mlock_user);
  107. fput (shp->shm_file);
  108. security_shm_free(shp);
  109. ipc_rcu_putref(shp);
  110. }
  111. /*
  112. * remove the attach descriptor shmd.
  113. * free memory for segment if it is marked destroyed.
  114. * The descriptor has already been removed from the current->mm->mmap list
  115. * and will later be kfree()d.
  116. */
  117. static void shm_close (struct vm_area_struct *shmd)
  118. {
  119. struct file * file = shmd->vm_file;
  120. int id = file->f_dentry->d_inode->i_ino;
  121. struct shmid_kernel *shp;
  122. mutex_lock(&shm_ids.mutex);
  123. /* remove from the list of attaches of the shm segment */
  124. shp = shm_lock(id);
  125. BUG_ON(!shp);
  126. shp->shm_lprid = current->tgid;
  127. shp->shm_dtim = get_seconds();
  128. shp->shm_nattch--;
  129. if(shp->shm_nattch == 0 &&
  130. shp->shm_perm.mode & SHM_DEST)
  131. shm_destroy (shp);
  132. else
  133. shm_unlock(shp);
  134. mutex_unlock(&shm_ids.mutex);
  135. }
  136. static int shm_mmap(struct file * file, struct vm_area_struct * vma)
  137. {
  138. int ret;
  139. ret = shmem_mmap(file, vma);
  140. if (ret == 0) {
  141. vma->vm_ops = &shm_vm_ops;
  142. if (!(vma->vm_flags & VM_WRITE))
  143. vma->vm_flags &= ~VM_MAYWRITE;
  144. shm_inc(file->f_dentry->d_inode->i_ino);
  145. }
  146. return ret;
  147. }
  148. static struct file_operations shm_file_operations = {
  149. .mmap = shm_mmap,
  150. #ifndef CONFIG_MMU
  151. .get_unmapped_area = shmem_get_unmapped_area,
  152. #endif
  153. };
  154. static struct vm_operations_struct shm_vm_ops = {
  155. .open = shm_open, /* callback for a new vm-area open */
  156. .close = shm_close, /* callback for when the vm-area is released */
  157. .nopage = shmem_nopage,
  158. #if defined(CONFIG_NUMA) && defined(CONFIG_SHMEM)
  159. .set_policy = shmem_set_policy,
  160. .get_policy = shmem_get_policy,
  161. #endif
  162. };
  163. static int newseg (key_t key, int shmflg, size_t size)
  164. {
  165. int error;
  166. struct shmid_kernel *shp;
  167. int numpages = (size + PAGE_SIZE -1) >> PAGE_SHIFT;
  168. struct file * file;
  169. char name[13];
  170. int id;
  171. if (size < SHMMIN || size > shm_ctlmax)
  172. return -EINVAL;
  173. if (shm_tot + numpages >= shm_ctlall)
  174. return -ENOSPC;
  175. shp = ipc_rcu_alloc(sizeof(*shp));
  176. if (!shp)
  177. return -ENOMEM;
  178. shp->shm_perm.key = key;
  179. shp->shm_perm.mode = (shmflg & S_IRWXUGO);
  180. shp->mlock_user = NULL;
  181. shp->shm_perm.security = NULL;
  182. error = security_shm_alloc(shp);
  183. if (error) {
  184. ipc_rcu_putref(shp);
  185. return error;
  186. }
  187. if (shmflg & SHM_HUGETLB) {
  188. /* hugetlb_zero_setup takes care of mlock user accounting */
  189. file = hugetlb_zero_setup(size);
  190. shp->mlock_user = current->user;
  191. } else {
  192. int acctflag = VM_ACCOUNT;
  193. /*
  194. * Do not allow no accounting for OVERCOMMIT_NEVER, even
  195. * if it's asked for.
  196. */
  197. if ((shmflg & SHM_NORESERVE) &&
  198. sysctl_overcommit_memory != OVERCOMMIT_NEVER)
  199. acctflag = 0;
  200. sprintf (name, "SYSV%08x", key);
  201. file = shmem_file_setup(name, size, acctflag);
  202. }
  203. error = PTR_ERR(file);
  204. if (IS_ERR(file))
  205. goto no_file;
  206. error = -ENOSPC;
  207. id = shm_addid(shp);
  208. if(id == -1)
  209. goto no_id;
  210. shp->shm_cprid = current->tgid;
  211. shp->shm_lprid = 0;
  212. shp->shm_atim = shp->shm_dtim = 0;
  213. shp->shm_ctim = get_seconds();
  214. shp->shm_segsz = size;
  215. shp->shm_nattch = 0;
  216. shp->id = shm_buildid(id,shp->shm_perm.seq);
  217. shp->shm_file = file;
  218. file->f_dentry->d_inode->i_ino = shp->id;
  219. /* Hugetlb ops would have already been assigned. */
  220. if (!(shmflg & SHM_HUGETLB))
  221. file->f_op = &shm_file_operations;
  222. shm_tot += numpages;
  223. shm_unlock(shp);
  224. return shp->id;
  225. no_id:
  226. fput(file);
  227. no_file:
  228. security_shm_free(shp);
  229. ipc_rcu_putref(shp);
  230. return error;
  231. }
  232. asmlinkage long sys_shmget (key_t key, size_t size, int shmflg)
  233. {
  234. struct shmid_kernel *shp;
  235. int err, id = 0;
  236. mutex_lock(&shm_ids.mutex);
  237. if (key == IPC_PRIVATE) {
  238. err = newseg(key, shmflg, size);
  239. } else if ((id = ipc_findkey(&shm_ids, key)) == -1) {
  240. if (!(shmflg & IPC_CREAT))
  241. err = -ENOENT;
  242. else
  243. err = newseg(key, shmflg, size);
  244. } else if ((shmflg & IPC_CREAT) && (shmflg & IPC_EXCL)) {
  245. err = -EEXIST;
  246. } else {
  247. shp = shm_lock(id);
  248. BUG_ON(shp==NULL);
  249. if (shp->shm_segsz < size)
  250. err = -EINVAL;
  251. else if (ipcperms(&shp->shm_perm, shmflg))
  252. err = -EACCES;
  253. else {
  254. int shmid = shm_buildid(id, shp->shm_perm.seq);
  255. err = security_shm_associate(shp, shmflg);
  256. if (!err)
  257. err = shmid;
  258. }
  259. shm_unlock(shp);
  260. }
  261. mutex_unlock(&shm_ids.mutex);
  262. return err;
  263. }
  264. static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
  265. {
  266. switch(version) {
  267. case IPC_64:
  268. return copy_to_user(buf, in, sizeof(*in));
  269. case IPC_OLD:
  270. {
  271. struct shmid_ds out;
  272. ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
  273. out.shm_segsz = in->shm_segsz;
  274. out.shm_atime = in->shm_atime;
  275. out.shm_dtime = in->shm_dtime;
  276. out.shm_ctime = in->shm_ctime;
  277. out.shm_cpid = in->shm_cpid;
  278. out.shm_lpid = in->shm_lpid;
  279. out.shm_nattch = in->shm_nattch;
  280. return copy_to_user(buf, &out, sizeof(out));
  281. }
  282. default:
  283. return -EINVAL;
  284. }
  285. }
  286. struct shm_setbuf {
  287. uid_t uid;
  288. gid_t gid;
  289. mode_t mode;
  290. };
  291. static inline unsigned long copy_shmid_from_user(struct shm_setbuf *out, void __user *buf, int version)
  292. {
  293. switch(version) {
  294. case IPC_64:
  295. {
  296. struct shmid64_ds tbuf;
  297. if (copy_from_user(&tbuf, buf, sizeof(tbuf)))
  298. return -EFAULT;
  299. out->uid = tbuf.shm_perm.uid;
  300. out->gid = tbuf.shm_perm.gid;
  301. out->mode = tbuf.shm_perm.mode;
  302. return 0;
  303. }
  304. case IPC_OLD:
  305. {
  306. struct shmid_ds tbuf_old;
  307. if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
  308. return -EFAULT;
  309. out->uid = tbuf_old.shm_perm.uid;
  310. out->gid = tbuf_old.shm_perm.gid;
  311. out->mode = tbuf_old.shm_perm.mode;
  312. return 0;
  313. }
  314. default:
  315. return -EINVAL;
  316. }
  317. }
  318. static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
  319. {
  320. switch(version) {
  321. case IPC_64:
  322. return copy_to_user(buf, in, sizeof(*in));
  323. case IPC_OLD:
  324. {
  325. struct shminfo out;
  326. if(in->shmmax > INT_MAX)
  327. out.shmmax = INT_MAX;
  328. else
  329. out.shmmax = (int)in->shmmax;
  330. out.shmmin = in->shmmin;
  331. out.shmmni = in->shmmni;
  332. out.shmseg = in->shmseg;
  333. out.shmall = in->shmall;
  334. return copy_to_user(buf, &out, sizeof(out));
  335. }
  336. default:
  337. return -EINVAL;
  338. }
  339. }
  340. static void shm_get_stat(unsigned long *rss, unsigned long *swp)
  341. {
  342. int i;
  343. *rss = 0;
  344. *swp = 0;
  345. for (i = 0; i <= shm_ids.max_id; i++) {
  346. struct shmid_kernel *shp;
  347. struct inode *inode;
  348. shp = shm_get(i);
  349. if(!shp)
  350. continue;
  351. inode = shp->shm_file->f_dentry->d_inode;
  352. if (is_file_hugepages(shp->shm_file)) {
  353. struct address_space *mapping = inode->i_mapping;
  354. *rss += (HPAGE_SIZE/PAGE_SIZE)*mapping->nrpages;
  355. } else {
  356. struct shmem_inode_info *info = SHMEM_I(inode);
  357. spin_lock(&info->lock);
  358. *rss += inode->i_mapping->nrpages;
  359. *swp += info->swapped;
  360. spin_unlock(&info->lock);
  361. }
  362. }
  363. }
  364. asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf)
  365. {
  366. struct shm_setbuf setbuf;
  367. struct shmid_kernel *shp;
  368. int err, version;
  369. if (cmd < 0 || shmid < 0) {
  370. err = -EINVAL;
  371. goto out;
  372. }
  373. version = ipc_parse_version(&cmd);
  374. switch (cmd) { /* replace with proc interface ? */
  375. case IPC_INFO:
  376. {
  377. struct shminfo64 shminfo;
  378. err = security_shm_shmctl(NULL, cmd);
  379. if (err)
  380. return err;
  381. memset(&shminfo,0,sizeof(shminfo));
  382. shminfo.shmmni = shminfo.shmseg = shm_ctlmni;
  383. shminfo.shmmax = shm_ctlmax;
  384. shminfo.shmall = shm_ctlall;
  385. shminfo.shmmin = SHMMIN;
  386. if(copy_shminfo_to_user (buf, &shminfo, version))
  387. return -EFAULT;
  388. /* reading a integer is always atomic */
  389. err= shm_ids.max_id;
  390. if(err<0)
  391. err = 0;
  392. goto out;
  393. }
  394. case SHM_INFO:
  395. {
  396. struct shm_info shm_info;
  397. err = security_shm_shmctl(NULL, cmd);
  398. if (err)
  399. return err;
  400. memset(&shm_info,0,sizeof(shm_info));
  401. mutex_lock(&shm_ids.mutex);
  402. shm_info.used_ids = shm_ids.in_use;
  403. shm_get_stat (&shm_info.shm_rss, &shm_info.shm_swp);
  404. shm_info.shm_tot = shm_tot;
  405. shm_info.swap_attempts = 0;
  406. shm_info.swap_successes = 0;
  407. err = shm_ids.max_id;
  408. mutex_unlock(&shm_ids.mutex);
  409. if(copy_to_user (buf, &shm_info, sizeof(shm_info))) {
  410. err = -EFAULT;
  411. goto out;
  412. }
  413. err = err < 0 ? 0 : err;
  414. goto out;
  415. }
  416. case SHM_STAT:
  417. case IPC_STAT:
  418. {
  419. struct shmid64_ds tbuf;
  420. int result;
  421. memset(&tbuf, 0, sizeof(tbuf));
  422. shp = shm_lock(shmid);
  423. if(shp==NULL) {
  424. err = -EINVAL;
  425. goto out;
  426. } else if(cmd==SHM_STAT) {
  427. err = -EINVAL;
  428. if (shmid > shm_ids.max_id)
  429. goto out_unlock;
  430. result = shm_buildid(shmid, shp->shm_perm.seq);
  431. } else {
  432. err = shm_checkid(shp,shmid);
  433. if(err)
  434. goto out_unlock;
  435. result = 0;
  436. }
  437. err=-EACCES;
  438. if (ipcperms (&shp->shm_perm, S_IRUGO))
  439. goto out_unlock;
  440. err = security_shm_shmctl(shp, cmd);
  441. if (err)
  442. goto out_unlock;
  443. kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm);
  444. tbuf.shm_segsz = shp->shm_segsz;
  445. tbuf.shm_atime = shp->shm_atim;
  446. tbuf.shm_dtime = shp->shm_dtim;
  447. tbuf.shm_ctime = shp->shm_ctim;
  448. tbuf.shm_cpid = shp->shm_cprid;
  449. tbuf.shm_lpid = shp->shm_lprid;
  450. if (!is_file_hugepages(shp->shm_file))
  451. tbuf.shm_nattch = shp->shm_nattch;
  452. else
  453. tbuf.shm_nattch = file_count(shp->shm_file) - 1;
  454. shm_unlock(shp);
  455. if(copy_shmid_to_user (buf, &tbuf, version))
  456. err = -EFAULT;
  457. else
  458. err = result;
  459. goto out;
  460. }
  461. case SHM_LOCK:
  462. case SHM_UNLOCK:
  463. {
  464. shp = shm_lock(shmid);
  465. if(shp==NULL) {
  466. err = -EINVAL;
  467. goto out;
  468. }
  469. err = shm_checkid(shp,shmid);
  470. if(err)
  471. goto out_unlock;
  472. if (!capable(CAP_IPC_LOCK)) {
  473. err = -EPERM;
  474. if (current->euid != shp->shm_perm.uid &&
  475. current->euid != shp->shm_perm.cuid)
  476. goto out_unlock;
  477. if (cmd == SHM_LOCK &&
  478. !current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur)
  479. goto out_unlock;
  480. }
  481. err = security_shm_shmctl(shp, cmd);
  482. if (err)
  483. goto out_unlock;
  484. if(cmd==SHM_LOCK) {
  485. struct user_struct * user = current->user;
  486. if (!is_file_hugepages(shp->shm_file)) {
  487. err = shmem_lock(shp->shm_file, 1, user);
  488. if (!err) {
  489. shp->shm_perm.mode |= SHM_LOCKED;
  490. shp->mlock_user = user;
  491. }
  492. }
  493. } else if (!is_file_hugepages(shp->shm_file)) {
  494. shmem_lock(shp->shm_file, 0, shp->mlock_user);
  495. shp->shm_perm.mode &= ~SHM_LOCKED;
  496. shp->mlock_user = NULL;
  497. }
  498. shm_unlock(shp);
  499. goto out;
  500. }
  501. case IPC_RMID:
  502. {
  503. /*
  504. * We cannot simply remove the file. The SVID states
  505. * that the block remains until the last person
  506. * detaches from it, then is deleted. A shmat() on
  507. * an RMID segment is legal in older Linux and if
  508. * we change it apps break...
  509. *
  510. * Instead we set a destroyed flag, and then blow
  511. * the name away when the usage hits zero.
  512. */
  513. mutex_lock(&shm_ids.mutex);
  514. shp = shm_lock(shmid);
  515. err = -EINVAL;
  516. if (shp == NULL)
  517. goto out_up;
  518. err = shm_checkid(shp, shmid);
  519. if(err)
  520. goto out_unlock_up;
  521. if (current->euid != shp->shm_perm.uid &&
  522. current->euid != shp->shm_perm.cuid &&
  523. !capable(CAP_SYS_ADMIN)) {
  524. err=-EPERM;
  525. goto out_unlock_up;
  526. }
  527. err = security_shm_shmctl(shp, cmd);
  528. if (err)
  529. goto out_unlock_up;
  530. if (shp->shm_nattch){
  531. shp->shm_perm.mode |= SHM_DEST;
  532. /* Do not find it any more */
  533. shp->shm_perm.key = IPC_PRIVATE;
  534. shm_unlock(shp);
  535. } else
  536. shm_destroy (shp);
  537. mutex_unlock(&shm_ids.mutex);
  538. goto out;
  539. }
  540. case IPC_SET:
  541. {
  542. if (copy_shmid_from_user (&setbuf, buf, version)) {
  543. err = -EFAULT;
  544. goto out;
  545. }
  546. mutex_lock(&shm_ids.mutex);
  547. shp = shm_lock(shmid);
  548. err=-EINVAL;
  549. if(shp==NULL)
  550. goto out_up;
  551. if ((err = audit_ipc_perms(0, setbuf.uid, setbuf.gid,
  552. setbuf.mode, &(shp->shm_perm))))
  553. goto out_unlock_up;
  554. err = shm_checkid(shp,shmid);
  555. if(err)
  556. goto out_unlock_up;
  557. err=-EPERM;
  558. if (current->euid != shp->shm_perm.uid &&
  559. current->euid != shp->shm_perm.cuid &&
  560. !capable(CAP_SYS_ADMIN)) {
  561. goto out_unlock_up;
  562. }
  563. err = security_shm_shmctl(shp, cmd);
  564. if (err)
  565. goto out_unlock_up;
  566. shp->shm_perm.uid = setbuf.uid;
  567. shp->shm_perm.gid = setbuf.gid;
  568. shp->shm_perm.mode = (shp->shm_perm.mode & ~S_IRWXUGO)
  569. | (setbuf.mode & S_IRWXUGO);
  570. shp->shm_ctim = get_seconds();
  571. break;
  572. }
  573. default:
  574. err = -EINVAL;
  575. goto out;
  576. }
  577. err = 0;
  578. out_unlock_up:
  579. shm_unlock(shp);
  580. out_up:
  581. mutex_unlock(&shm_ids.mutex);
  582. goto out;
  583. out_unlock:
  584. shm_unlock(shp);
  585. out:
  586. return err;
  587. }
  588. /*
  589. * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
  590. *
  591. * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
  592. * "raddr" thing points to kernel space, and there has to be a wrapper around
  593. * this.
  594. */
  595. long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
  596. {
  597. struct shmid_kernel *shp;
  598. unsigned long addr;
  599. unsigned long size;
  600. struct file * file;
  601. int err;
  602. unsigned long flags;
  603. unsigned long prot;
  604. unsigned long o_flags;
  605. int acc_mode;
  606. void *user_addr;
  607. if (shmid < 0) {
  608. err = -EINVAL;
  609. goto out;
  610. } else if ((addr = (ulong)shmaddr)) {
  611. if (addr & (SHMLBA-1)) {
  612. if (shmflg & SHM_RND)
  613. addr &= ~(SHMLBA-1); /* round down */
  614. else
  615. #ifndef __ARCH_FORCE_SHMLBA
  616. if (addr & ~PAGE_MASK)
  617. #endif
  618. return -EINVAL;
  619. }
  620. flags = MAP_SHARED | MAP_FIXED;
  621. } else {
  622. if ((shmflg & SHM_REMAP))
  623. return -EINVAL;
  624. flags = MAP_SHARED;
  625. }
  626. if (shmflg & SHM_RDONLY) {
  627. prot = PROT_READ;
  628. o_flags = O_RDONLY;
  629. acc_mode = S_IRUGO;
  630. } else {
  631. prot = PROT_READ | PROT_WRITE;
  632. o_flags = O_RDWR;
  633. acc_mode = S_IRUGO | S_IWUGO;
  634. }
  635. if (shmflg & SHM_EXEC) {
  636. prot |= PROT_EXEC;
  637. acc_mode |= S_IXUGO;
  638. }
  639. /*
  640. * We cannot rely on the fs check since SYSV IPC does have an
  641. * additional creator id...
  642. */
  643. shp = shm_lock(shmid);
  644. if(shp == NULL) {
  645. err = -EINVAL;
  646. goto out;
  647. }
  648. err = shm_checkid(shp,shmid);
  649. if (err) {
  650. shm_unlock(shp);
  651. goto out;
  652. }
  653. if (ipcperms(&shp->shm_perm, acc_mode)) {
  654. shm_unlock(shp);
  655. err = -EACCES;
  656. goto out;
  657. }
  658. err = security_shm_shmat(shp, shmaddr, shmflg);
  659. if (err) {
  660. shm_unlock(shp);
  661. return err;
  662. }
  663. file = shp->shm_file;
  664. size = i_size_read(file->f_dentry->d_inode);
  665. shp->shm_nattch++;
  666. shm_unlock(shp);
  667. down_write(&current->mm->mmap_sem);
  668. if (addr && !(shmflg & SHM_REMAP)) {
  669. user_addr = ERR_PTR(-EINVAL);
  670. if (find_vma_intersection(current->mm, addr, addr + size))
  671. goto invalid;
  672. /*
  673. * If shm segment goes below stack, make sure there is some
  674. * space left for the stack to grow (at least 4 pages).
  675. */
  676. if (addr < current->mm->start_stack &&
  677. addr > current->mm->start_stack - size - PAGE_SIZE * 5)
  678. goto invalid;
  679. }
  680. user_addr = (void*) do_mmap (file, addr, size, prot, flags, 0);
  681. invalid:
  682. up_write(&current->mm->mmap_sem);
  683. mutex_lock(&shm_ids.mutex);
  684. shp = shm_lock(shmid);
  685. BUG_ON(!shp);
  686. shp->shm_nattch--;
  687. if(shp->shm_nattch == 0 &&
  688. shp->shm_perm.mode & SHM_DEST)
  689. shm_destroy (shp);
  690. else
  691. shm_unlock(shp);
  692. mutex_unlock(&shm_ids.mutex);
  693. *raddr = (unsigned long) user_addr;
  694. err = 0;
  695. if (IS_ERR(user_addr))
  696. err = PTR_ERR(user_addr);
  697. out:
  698. return err;
  699. }
  700. asmlinkage long sys_shmat(int shmid, char __user *shmaddr, int shmflg)
  701. {
  702. unsigned long ret;
  703. long err;
  704. err = do_shmat(shmid, shmaddr, shmflg, &ret);
  705. if (err)
  706. return err;
  707. force_successful_syscall_return();
  708. return (long)ret;
  709. }
  710. /*
  711. * detach and kill segment if marked destroyed.
  712. * The work is done in shm_close.
  713. */
  714. asmlinkage long sys_shmdt(char __user *shmaddr)
  715. {
  716. struct mm_struct *mm = current->mm;
  717. struct vm_area_struct *vma, *next;
  718. unsigned long addr = (unsigned long)shmaddr;
  719. loff_t size = 0;
  720. int retval = -EINVAL;
  721. if (addr & ~PAGE_MASK)
  722. return retval;
  723. down_write(&mm->mmap_sem);
  724. /*
  725. * This function tries to be smart and unmap shm segments that
  726. * were modified by partial mlock or munmap calls:
  727. * - It first determines the size of the shm segment that should be
  728. * unmapped: It searches for a vma that is backed by shm and that
  729. * started at address shmaddr. It records it's size and then unmaps
  730. * it.
  731. * - Then it unmaps all shm vmas that started at shmaddr and that
  732. * are within the initially determined size.
  733. * Errors from do_munmap are ignored: the function only fails if
  734. * it's called with invalid parameters or if it's called to unmap
  735. * a part of a vma. Both calls in this function are for full vmas,
  736. * the parameters are directly copied from the vma itself and always
  737. * valid - therefore do_munmap cannot fail. (famous last words?)
  738. */
  739. /*
  740. * If it had been mremap()'d, the starting address would not
  741. * match the usual checks anyway. So assume all vma's are
  742. * above the starting address given.
  743. */
  744. vma = find_vma(mm, addr);
  745. while (vma) {
  746. next = vma->vm_next;
  747. /*
  748. * Check if the starting address would match, i.e. it's
  749. * a fragment created by mprotect() and/or munmap(), or it
  750. * otherwise it starts at this address with no hassles.
  751. */
  752. if ((vma->vm_ops == &shm_vm_ops || is_vm_hugetlb_page(vma)) &&
  753. (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
  754. size = vma->vm_file->f_dentry->d_inode->i_size;
  755. do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
  756. /*
  757. * We discovered the size of the shm segment, so
  758. * break out of here and fall through to the next
  759. * loop that uses the size information to stop
  760. * searching for matching vma's.
  761. */
  762. retval = 0;
  763. vma = next;
  764. break;
  765. }
  766. vma = next;
  767. }
  768. /*
  769. * We need look no further than the maximum address a fragment
  770. * could possibly have landed at. Also cast things to loff_t to
  771. * prevent overflows and make comparisions vs. equal-width types.
  772. */
  773. size = PAGE_ALIGN(size);
  774. while (vma && (loff_t)(vma->vm_end - addr) <= size) {
  775. next = vma->vm_next;
  776. /* finding a matching vma now does not alter retval */
  777. if ((vma->vm_ops == &shm_vm_ops || is_vm_hugetlb_page(vma)) &&
  778. (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff)
  779. do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
  780. vma = next;
  781. }
  782. up_write(&mm->mmap_sem);
  783. return retval;
  784. }
  785. #ifdef CONFIG_PROC_FS
  786. static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
  787. {
  788. struct shmid_kernel *shp = it;
  789. char *format;
  790. #define SMALL_STRING "%10d %10d %4o %10u %5u %5u %5d %5u %5u %5u %5u %10lu %10lu %10lu\n"
  791. #define BIG_STRING "%10d %10d %4o %21u %5u %5u %5d %5u %5u %5u %5u %10lu %10lu %10lu\n"
  792. if (sizeof(size_t) <= sizeof(int))
  793. format = SMALL_STRING;
  794. else
  795. format = BIG_STRING;
  796. return seq_printf(s, format,
  797. shp->shm_perm.key,
  798. shp->id,
  799. shp->shm_perm.mode,
  800. shp->shm_segsz,
  801. shp->shm_cprid,
  802. shp->shm_lprid,
  803. is_file_hugepages(shp->shm_file) ? (file_count(shp->shm_file) - 1) : shp->shm_nattch,
  804. shp->shm_perm.uid,
  805. shp->shm_perm.gid,
  806. shp->shm_perm.cuid,
  807. shp->shm_perm.cgid,
  808. shp->shm_atim,
  809. shp->shm_dtim,
  810. shp->shm_ctim);
  811. }
  812. #endif