util.c 24 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057
  1. /*
  2. * linux/ipc/util.c
  3. * Copyright (C) 1992 Krishna Balasubramanian
  4. *
  5. * Sep 1997 - Call suser() last after "normal" permission checks so we
  6. * get BSD style process accounting right.
  7. * Occurs in several places in the IPC code.
  8. * Chris Evans, <chris@ferret.lmh.ox.ac.uk>
  9. * Nov 1999 - ipc helper functions, unified SMP locking
  10. * Manfred Spraul <manfred@colorfullife.com>
  11. * Oct 2002 - One lock per IPC id. RCU ipc_free for lock-free grow_ary().
  12. * Mingming Cao <cmm@us.ibm.com>
  13. * Mar 2006 - support for audit of ipc object properties
  14. * Dustin Kirkland <dustin.kirkland@us.ibm.com>
  15. * Jun 2006 - namespaces ssupport
  16. * OpenVZ, SWsoft Inc.
  17. * Pavel Emelianov <xemul@openvz.org>
  18. */
  19. #include <linux/mm.h>
  20. #include <linux/shm.h>
  21. #include <linux/init.h>
  22. #include <linux/msg.h>
  23. #include <linux/vmalloc.h>
  24. #include <linux/slab.h>
  25. #include <linux/capability.h>
  26. #include <linux/highuid.h>
  27. #include <linux/security.h>
  28. #include <linux/rcupdate.h>
  29. #include <linux/workqueue.h>
  30. #include <linux/seq_file.h>
  31. #include <linux/proc_fs.h>
  32. #include <linux/audit.h>
  33. #include <linux/nsproxy.h>
  34. #include <linux/rwsem.h>
  35. #include <linux/memory.h>
  36. #include <linux/ipc_namespace.h>
  37. #include <asm/unistd.h>
  38. #include "util.h"
  39. struct ipc_proc_iface {
  40. const char *path;
  41. const char *header;
  42. int ids;
  43. int (*show)(struct seq_file *, void *);
  44. };
  45. struct ipc_namespace init_ipc_ns = {
  46. .kref = {
  47. .refcount = ATOMIC_INIT(2),
  48. },
  49. };
  50. atomic_t nr_ipc_ns = ATOMIC_INIT(1);
  51. #ifdef CONFIG_MEMORY_HOTPLUG
  52. static void ipc_memory_notifier(struct work_struct *work)
  53. {
  54. ipcns_notify(IPCNS_MEMCHANGED);
  55. }
  56. static DECLARE_WORK(ipc_memory_wq, ipc_memory_notifier);
  57. static int ipc_memory_callback(struct notifier_block *self,
  58. unsigned long action, void *arg)
  59. {
  60. switch (action) {
  61. case MEM_ONLINE: /* memory successfully brought online */
  62. case MEM_OFFLINE: /* or offline: it's time to recompute msgmni */
  63. /*
  64. * This is done by invoking the ipcns notifier chain with the
  65. * IPC_MEMCHANGED event.
  66. * In order not to keep the lock on the hotplug memory chain
  67. * for too long, queue a work item that will, when waken up,
  68. * activate the ipcns notification chain.
  69. * No need to keep several ipc work items on the queue.
  70. */
  71. if (!work_pending(&ipc_memory_wq))
  72. schedule_work(&ipc_memory_wq);
  73. break;
  74. case MEM_GOING_ONLINE:
  75. case MEM_GOING_OFFLINE:
  76. case MEM_CANCEL_ONLINE:
  77. case MEM_CANCEL_OFFLINE:
  78. default:
  79. break;
  80. }
  81. return NOTIFY_OK;
  82. }
  83. #endif /* CONFIG_MEMORY_HOTPLUG */
  84. /**
  85. * ipc_init - initialise IPC subsystem
  86. *
  87. * The various system5 IPC resources (semaphores, messages and shared
  88. * memory) are initialised
  89. * A callback routine is registered into the memory hotplug notifier
  90. * chain: since msgmni scales to lowmem this callback routine will be
  91. * called upon successful memory add / remove to recompute msmgni.
  92. */
  93. static int __init ipc_init(void)
  94. {
  95. sem_init();
  96. msg_init();
  97. shm_init();
  98. hotplug_memory_notifier(ipc_memory_callback, IPC_CALLBACK_PRI);
  99. register_ipcns_notifier(&init_ipc_ns);
  100. return 0;
  101. }
  102. __initcall(ipc_init);
  103. /**
  104. * ipc_init_ids - initialise IPC identifiers
  105. * @ids: Identifier set
  106. *
  107. * Set up the sequence range to use for the ipc identifier range (limited
  108. * below IPCMNI) then initialise the ids idr.
  109. */
  110. void ipc_init_ids(struct ipc_ids *ids)
  111. {
  112. init_rwsem(&ids->rw_mutex);
  113. ids->in_use = 0;
  114. ids->seq = 0;
  115. {
  116. int seq_limit = INT_MAX/SEQ_MULTIPLIER;
  117. if (seq_limit > USHORT_MAX)
  118. ids->seq_max = USHORT_MAX;
  119. else
  120. ids->seq_max = seq_limit;
  121. }
  122. idr_init(&ids->ipcs_idr);
  123. }
  124. #ifdef CONFIG_PROC_FS
  125. static const struct file_operations sysvipc_proc_fops;
  126. /**
  127. * ipc_init_proc_interface - Create a proc interface for sysipc types using a seq_file interface.
  128. * @path: Path in procfs
  129. * @header: Banner to be printed at the beginning of the file.
  130. * @ids: ipc id table to iterate.
  131. * @show: show routine.
  132. */
  133. void __init ipc_init_proc_interface(const char *path, const char *header,
  134. int ids, int (*show)(struct seq_file *, void *))
  135. {
  136. struct proc_dir_entry *pde;
  137. struct ipc_proc_iface *iface;
  138. iface = kmalloc(sizeof(*iface), GFP_KERNEL);
  139. if (!iface)
  140. return;
  141. iface->path = path;
  142. iface->header = header;
  143. iface->ids = ids;
  144. iface->show = show;
  145. pde = proc_create_data(path,
  146. S_IRUGO, /* world readable */
  147. NULL, /* parent dir */
  148. &sysvipc_proc_fops,
  149. iface);
  150. if (!pde) {
  151. kfree(iface);
  152. }
  153. }
  154. #endif
  155. /**
  156. * ipc_findkey - find a key in an ipc identifier set
  157. * @ids: Identifier set
  158. * @key: The key to find
  159. *
  160. * Requires ipc_ids.rw_mutex locked.
  161. * Returns the LOCKED pointer to the ipc structure if found or NULL
  162. * if not.
  163. * If key is found ipc points to the owning ipc structure
  164. */
  165. static struct kern_ipc_perm *ipc_findkey(struct ipc_ids *ids, key_t key)
  166. {
  167. struct kern_ipc_perm *ipc;
  168. int next_id;
  169. int total;
  170. for (total = 0, next_id = 0; total < ids->in_use; next_id++) {
  171. ipc = idr_find(&ids->ipcs_idr, next_id);
  172. if (ipc == NULL)
  173. continue;
  174. if (ipc->key != key) {
  175. total++;
  176. continue;
  177. }
  178. ipc_lock_by_ptr(ipc);
  179. return ipc;
  180. }
  181. return NULL;
  182. }
  183. /**
  184. * ipc_get_maxid - get the last assigned id
  185. * @ids: IPC identifier set
  186. *
  187. * Called with ipc_ids.rw_mutex held.
  188. */
  189. int ipc_get_maxid(struct ipc_ids *ids)
  190. {
  191. struct kern_ipc_perm *ipc;
  192. int max_id = -1;
  193. int total, id;
  194. if (ids->in_use == 0)
  195. return -1;
  196. if (ids->in_use == IPCMNI)
  197. return IPCMNI - 1;
  198. /* Look for the last assigned id */
  199. total = 0;
  200. for (id = 0; id < IPCMNI && total < ids->in_use; id++) {
  201. ipc = idr_find(&ids->ipcs_idr, id);
  202. if (ipc != NULL) {
  203. max_id = id;
  204. total++;
  205. }
  206. }
  207. return max_id;
  208. }
  209. /**
  210. * ipc_addid - add an IPC identifier
  211. * @ids: IPC identifier set
  212. * @new: new IPC permission set
  213. * @size: limit for the number of used ids
  214. *
  215. * Add an entry 'new' to the IPC ids idr. The permissions object is
  216. * initialised and the first free entry is set up and the id assigned
  217. * is returned. The 'new' entry is returned in a locked state on success.
  218. * On failure the entry is not locked and a negative err-code is returned.
  219. *
  220. * Called with ipc_ids.rw_mutex held as a writer.
  221. */
  222. int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size)
  223. {
  224. int id, err;
  225. if (size > IPCMNI)
  226. size = IPCMNI;
  227. if (ids->in_use >= size)
  228. return -ENOSPC;
  229. err = idr_get_new(&ids->ipcs_idr, new, &id);
  230. if (err)
  231. return err;
  232. ids->in_use++;
  233. new->cuid = new->uid = current->euid;
  234. new->gid = new->cgid = current->egid;
  235. new->seq = ids->seq++;
  236. if(ids->seq > ids->seq_max)
  237. ids->seq = 0;
  238. new->id = ipc_buildid(id, new->seq);
  239. spin_lock_init(&new->lock);
  240. new->deleted = 0;
  241. rcu_read_lock();
  242. spin_lock(&new->lock);
  243. return id;
  244. }
  245. /**
  246. * ipcget_new - create a new ipc object
  247. * @ns: namespace
  248. * @ids: IPC identifer set
  249. * @ops: the actual creation routine to call
  250. * @params: its parameters
  251. *
  252. * This routine is called by sys_msgget, sys_semget() and sys_shmget()
  253. * when the key is IPC_PRIVATE.
  254. */
  255. static int ipcget_new(struct ipc_namespace *ns, struct ipc_ids *ids,
  256. struct ipc_ops *ops, struct ipc_params *params)
  257. {
  258. int err;
  259. retry:
  260. err = idr_pre_get(&ids->ipcs_idr, GFP_KERNEL);
  261. if (!err)
  262. return -ENOMEM;
  263. down_write(&ids->rw_mutex);
  264. err = ops->getnew(ns, params);
  265. up_write(&ids->rw_mutex);
  266. if (err == -EAGAIN)
  267. goto retry;
  268. return err;
  269. }
  270. /**
  271. * ipc_check_perms - check security and permissions for an IPC
  272. * @ipcp: ipc permission set
  273. * @ops: the actual security routine to call
  274. * @params: its parameters
  275. *
  276. * This routine is called by sys_msgget(), sys_semget() and sys_shmget()
  277. * when the key is not IPC_PRIVATE and that key already exists in the
  278. * ids IDR.
  279. *
  280. * On success, the IPC id is returned.
  281. *
  282. * It is called with ipc_ids.rw_mutex and ipcp->lock held.
  283. */
  284. static int ipc_check_perms(struct kern_ipc_perm *ipcp, struct ipc_ops *ops,
  285. struct ipc_params *params)
  286. {
  287. int err;
  288. if (ipcperms(ipcp, params->flg))
  289. err = -EACCES;
  290. else {
  291. err = ops->associate(ipcp, params->flg);
  292. if (!err)
  293. err = ipcp->id;
  294. }
  295. return err;
  296. }
  297. /**
  298. * ipcget_public - get an ipc object or create a new one
  299. * @ns: namespace
  300. * @ids: IPC identifer set
  301. * @ops: the actual creation routine to call
  302. * @params: its parameters
  303. *
  304. * This routine is called by sys_msgget, sys_semget() and sys_shmget()
  305. * when the key is not IPC_PRIVATE.
  306. * It adds a new entry if the key is not found and does some permission
  307. * / security checkings if the key is found.
  308. *
  309. * On success, the ipc id is returned.
  310. */
  311. static int ipcget_public(struct ipc_namespace *ns, struct ipc_ids *ids,
  312. struct ipc_ops *ops, struct ipc_params *params)
  313. {
  314. struct kern_ipc_perm *ipcp;
  315. int flg = params->flg;
  316. int err;
  317. retry:
  318. err = idr_pre_get(&ids->ipcs_idr, GFP_KERNEL);
  319. /*
  320. * Take the lock as a writer since we are potentially going to add
  321. * a new entry + read locks are not "upgradable"
  322. */
  323. down_write(&ids->rw_mutex);
  324. ipcp = ipc_findkey(ids, params->key);
  325. if (ipcp == NULL) {
  326. /* key not used */
  327. if (!(flg & IPC_CREAT))
  328. err = -ENOENT;
  329. else if (!err)
  330. err = -ENOMEM;
  331. else
  332. err = ops->getnew(ns, params);
  333. } else {
  334. /* ipc object has been locked by ipc_findkey() */
  335. if (flg & IPC_CREAT && flg & IPC_EXCL)
  336. err = -EEXIST;
  337. else {
  338. err = 0;
  339. if (ops->more_checks)
  340. err = ops->more_checks(ipcp, params);
  341. if (!err)
  342. /*
  343. * ipc_check_perms returns the IPC id on
  344. * success
  345. */
  346. err = ipc_check_perms(ipcp, ops, params);
  347. }
  348. ipc_unlock(ipcp);
  349. }
  350. up_write(&ids->rw_mutex);
  351. if (err == -EAGAIN)
  352. goto retry;
  353. return err;
  354. }
  355. /**
  356. * ipc_rmid - remove an IPC identifier
  357. * @ids: IPC identifier set
  358. * @ipcp: ipc perm structure containing the identifier to remove
  359. *
  360. * ipc_ids.rw_mutex (as a writer) and the spinlock for this ID are held
  361. * before this function is called, and remain locked on the exit.
  362. */
  363. void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp)
  364. {
  365. int lid = ipcid_to_idx(ipcp->id);
  366. idr_remove(&ids->ipcs_idr, lid);
  367. ids->in_use--;
  368. ipcp->deleted = 1;
  369. return;
  370. }
  371. /**
  372. * ipc_alloc - allocate ipc space
  373. * @size: size desired
  374. *
  375. * Allocate memory from the appropriate pools and return a pointer to it.
  376. * NULL is returned if the allocation fails
  377. */
  378. void* ipc_alloc(int size)
  379. {
  380. void* out;
  381. if(size > PAGE_SIZE)
  382. out = vmalloc(size);
  383. else
  384. out = kmalloc(size, GFP_KERNEL);
  385. return out;
  386. }
  387. /**
  388. * ipc_free - free ipc space
  389. * @ptr: pointer returned by ipc_alloc
  390. * @size: size of block
  391. *
  392. * Free a block created with ipc_alloc(). The caller must know the size
  393. * used in the allocation call.
  394. */
  395. void ipc_free(void* ptr, int size)
  396. {
  397. if(size > PAGE_SIZE)
  398. vfree(ptr);
  399. else
  400. kfree(ptr);
  401. }
  402. /*
  403. * rcu allocations:
  404. * There are three headers that are prepended to the actual allocation:
  405. * - during use: ipc_rcu_hdr.
  406. * - during the rcu grace period: ipc_rcu_grace.
  407. * - [only if vmalloc]: ipc_rcu_sched.
  408. * Their lifetime doesn't overlap, thus the headers share the same memory.
  409. * Unlike a normal union, they are right-aligned, thus some container_of
  410. * forward/backward casting is necessary:
  411. */
  412. struct ipc_rcu_hdr
  413. {
  414. int refcount;
  415. int is_vmalloc;
  416. void *data[0];
  417. };
  418. struct ipc_rcu_grace
  419. {
  420. struct rcu_head rcu;
  421. /* "void *" makes sure alignment of following data is sane. */
  422. void *data[0];
  423. };
  424. struct ipc_rcu_sched
  425. {
  426. struct work_struct work;
  427. /* "void *" makes sure alignment of following data is sane. */
  428. void *data[0];
  429. };
  430. #define HDRLEN_KMALLOC (sizeof(struct ipc_rcu_grace) > sizeof(struct ipc_rcu_hdr) ? \
  431. sizeof(struct ipc_rcu_grace) : sizeof(struct ipc_rcu_hdr))
  432. #define HDRLEN_VMALLOC (sizeof(struct ipc_rcu_sched) > HDRLEN_KMALLOC ? \
  433. sizeof(struct ipc_rcu_sched) : HDRLEN_KMALLOC)
  434. static inline int rcu_use_vmalloc(int size)
  435. {
  436. /* Too big for a single page? */
  437. if (HDRLEN_KMALLOC + size > PAGE_SIZE)
  438. return 1;
  439. return 0;
  440. }
  441. /**
  442. * ipc_rcu_alloc - allocate ipc and rcu space
  443. * @size: size desired
  444. *
  445. * Allocate memory for the rcu header structure + the object.
  446. * Returns the pointer to the object.
  447. * NULL is returned if the allocation fails.
  448. */
  449. void* ipc_rcu_alloc(int size)
  450. {
  451. void* out;
  452. /*
  453. * We prepend the allocation with the rcu struct, and
  454. * workqueue if necessary (for vmalloc).
  455. */
  456. if (rcu_use_vmalloc(size)) {
  457. out = vmalloc(HDRLEN_VMALLOC + size);
  458. if (out) {
  459. out += HDRLEN_VMALLOC;
  460. container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1;
  461. container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
  462. }
  463. } else {
  464. out = kmalloc(HDRLEN_KMALLOC + size, GFP_KERNEL);
  465. if (out) {
  466. out += HDRLEN_KMALLOC;
  467. container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0;
  468. container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
  469. }
  470. }
  471. return out;
  472. }
  473. void ipc_rcu_getref(void *ptr)
  474. {
  475. container_of(ptr, struct ipc_rcu_hdr, data)->refcount++;
  476. }
  477. static void ipc_do_vfree(struct work_struct *work)
  478. {
  479. vfree(container_of(work, struct ipc_rcu_sched, work));
  480. }
  481. /**
  482. * ipc_schedule_free - free ipc + rcu space
  483. * @head: RCU callback structure for queued work
  484. *
  485. * Since RCU callback function is called in bh,
  486. * we need to defer the vfree to schedule_work().
  487. */
  488. static void ipc_schedule_free(struct rcu_head *head)
  489. {
  490. struct ipc_rcu_grace *grace;
  491. struct ipc_rcu_sched *sched;
  492. grace = container_of(head, struct ipc_rcu_grace, rcu);
  493. sched = container_of(&(grace->data[0]), struct ipc_rcu_sched,
  494. data[0]);
  495. INIT_WORK(&sched->work, ipc_do_vfree);
  496. schedule_work(&sched->work);
  497. }
  498. /**
  499. * ipc_immediate_free - free ipc + rcu space
  500. * @head: RCU callback structure that contains pointer to be freed
  501. *
  502. * Free from the RCU callback context.
  503. */
  504. static void ipc_immediate_free(struct rcu_head *head)
  505. {
  506. struct ipc_rcu_grace *free =
  507. container_of(head, struct ipc_rcu_grace, rcu);
  508. kfree(free);
  509. }
  510. void ipc_rcu_putref(void *ptr)
  511. {
  512. if (--container_of(ptr, struct ipc_rcu_hdr, data)->refcount > 0)
  513. return;
  514. if (container_of(ptr, struct ipc_rcu_hdr, data)->is_vmalloc) {
  515. call_rcu(&container_of(ptr, struct ipc_rcu_grace, data)->rcu,
  516. ipc_schedule_free);
  517. } else {
  518. call_rcu(&container_of(ptr, struct ipc_rcu_grace, data)->rcu,
  519. ipc_immediate_free);
  520. }
  521. }
  522. /**
  523. * ipcperms - check IPC permissions
  524. * @ipcp: IPC permission set
  525. * @flag: desired permission set.
  526. *
  527. * Check user, group, other permissions for access
  528. * to ipc resources. return 0 if allowed
  529. */
  530. int ipcperms (struct kern_ipc_perm *ipcp, short flag)
  531. { /* flag will most probably be 0 or S_...UGO from <linux/stat.h> */
  532. int requested_mode, granted_mode, err;
  533. if (unlikely((err = audit_ipc_obj(ipcp))))
  534. return err;
  535. requested_mode = (flag >> 6) | (flag >> 3) | flag;
  536. granted_mode = ipcp->mode;
  537. if (current->euid == ipcp->cuid || current->euid == ipcp->uid)
  538. granted_mode >>= 6;
  539. else if (in_group_p(ipcp->cgid) || in_group_p(ipcp->gid))
  540. granted_mode >>= 3;
  541. /* is there some bit set in requested_mode but not in granted_mode? */
  542. if ((requested_mode & ~granted_mode & 0007) &&
  543. !capable(CAP_IPC_OWNER))
  544. return -1;
  545. return security_ipc_permission(ipcp, flag);
  546. }
  547. /*
  548. * Functions to convert between the kern_ipc_perm structure and the
  549. * old/new ipc_perm structures
  550. */
  551. /**
  552. * kernel_to_ipc64_perm - convert kernel ipc permissions to user
  553. * @in: kernel permissions
  554. * @out: new style IPC permissions
  555. *
  556. * Turn the kernel object @in into a set of permissions descriptions
  557. * for returning to userspace (@out).
  558. */
  559. void kernel_to_ipc64_perm (struct kern_ipc_perm *in, struct ipc64_perm *out)
  560. {
  561. out->key = in->key;
  562. out->uid = in->uid;
  563. out->gid = in->gid;
  564. out->cuid = in->cuid;
  565. out->cgid = in->cgid;
  566. out->mode = in->mode;
  567. out->seq = in->seq;
  568. }
  569. /**
  570. * ipc64_perm_to_ipc_perm - convert new ipc permissions to old
  571. * @in: new style IPC permissions
  572. * @out: old style IPC permissions
  573. *
  574. * Turn the new style permissions object @in into a compatibility
  575. * object and store it into the @out pointer.
  576. */
  577. void ipc64_perm_to_ipc_perm (struct ipc64_perm *in, struct ipc_perm *out)
  578. {
  579. out->key = in->key;
  580. SET_UID(out->uid, in->uid);
  581. SET_GID(out->gid, in->gid);
  582. SET_UID(out->cuid, in->cuid);
  583. SET_GID(out->cgid, in->cgid);
  584. out->mode = in->mode;
  585. out->seq = in->seq;
  586. }
  587. /**
  588. * ipc_lock - Lock an ipc structure without rw_mutex held
  589. * @ids: IPC identifier set
  590. * @id: ipc id to look for
  591. *
  592. * Look for an id in the ipc ids idr and lock the associated ipc object.
  593. *
  594. * The ipc object is locked on exit.
  595. *
  596. * This is the routine that should be called when the rw_mutex is not already
  597. * held, i.e. idr tree not protected: it protects the idr tree in read mode
  598. * during the idr_find().
  599. */
  600. struct kern_ipc_perm *ipc_lock(struct ipc_ids *ids, int id)
  601. {
  602. struct kern_ipc_perm *out;
  603. int lid = ipcid_to_idx(id);
  604. down_read(&ids->rw_mutex);
  605. rcu_read_lock();
  606. out = idr_find(&ids->ipcs_idr, lid);
  607. if (out == NULL) {
  608. rcu_read_unlock();
  609. up_read(&ids->rw_mutex);
  610. return ERR_PTR(-EINVAL);
  611. }
  612. up_read(&ids->rw_mutex);
  613. spin_lock(&out->lock);
  614. /* ipc_rmid() may have already freed the ID while ipc_lock
  615. * was spinning: here verify that the structure is still valid
  616. */
  617. if (out->deleted) {
  618. spin_unlock(&out->lock);
  619. rcu_read_unlock();
  620. return ERR_PTR(-EINVAL);
  621. }
  622. return out;
  623. }
  624. /**
  625. * ipc_lock_down - Lock an ipc structure with rw_sem held
  626. * @ids: IPC identifier set
  627. * @id: ipc id to look for
  628. *
  629. * Look for an id in the ipc ids idr and lock the associated ipc object.
  630. *
  631. * The ipc object is locked on exit.
  632. *
  633. * This is the routine that should be called when the rw_mutex is already
  634. * held, i.e. idr tree protected.
  635. */
  636. struct kern_ipc_perm *ipc_lock_down(struct ipc_ids *ids, int id)
  637. {
  638. struct kern_ipc_perm *out;
  639. int lid = ipcid_to_idx(id);
  640. rcu_read_lock();
  641. out = idr_find(&ids->ipcs_idr, lid);
  642. if (out == NULL) {
  643. rcu_read_unlock();
  644. return ERR_PTR(-EINVAL);
  645. }
  646. spin_lock(&out->lock);
  647. /*
  648. * No need to verify that the structure is still valid since the
  649. * rw_mutex is held.
  650. */
  651. return out;
  652. }
  653. struct kern_ipc_perm *ipc_lock_check_down(struct ipc_ids *ids, int id)
  654. {
  655. struct kern_ipc_perm *out;
  656. out = ipc_lock_down(ids, id);
  657. if (IS_ERR(out))
  658. return out;
  659. if (ipc_checkid(out, id)) {
  660. ipc_unlock(out);
  661. return ERR_PTR(-EIDRM);
  662. }
  663. return out;
  664. }
  665. struct kern_ipc_perm *ipc_lock_check(struct ipc_ids *ids, int id)
  666. {
  667. struct kern_ipc_perm *out;
  668. out = ipc_lock(ids, id);
  669. if (IS_ERR(out))
  670. return out;
  671. if (ipc_checkid(out, id)) {
  672. ipc_unlock(out);
  673. return ERR_PTR(-EIDRM);
  674. }
  675. return out;
  676. }
  677. /**
  678. * ipcget - Common sys_*get() code
  679. * @ns : namsepace
  680. * @ids : IPC identifier set
  681. * @ops : operations to be called on ipc object creation, permission checks
  682. * and further checks
  683. * @params : the parameters needed by the previous operations.
  684. *
  685. * Common routine called by sys_msgget(), sys_semget() and sys_shmget().
  686. */
  687. int ipcget(struct ipc_namespace *ns, struct ipc_ids *ids,
  688. struct ipc_ops *ops, struct ipc_params *params)
  689. {
  690. if (params->key == IPC_PRIVATE)
  691. return ipcget_new(ns, ids, ops, params);
  692. else
  693. return ipcget_public(ns, ids, ops, params);
  694. }
  695. /**
  696. * ipc_update_perm - update the permissions of an IPC.
  697. * @in: the permission given as input.
  698. * @out: the permission of the ipc to set.
  699. */
  700. void ipc_update_perm(struct ipc64_perm *in, struct kern_ipc_perm *out)
  701. {
  702. out->uid = in->uid;
  703. out->gid = in->gid;
  704. out->mode = (out->mode & ~S_IRWXUGO)
  705. | (in->mode & S_IRWXUGO);
  706. }
  707. /**
  708. * ipcctl_pre_down - retrieve an ipc and check permissions for some IPC_XXX cmd
  709. * @ids: the table of ids where to look for the ipc
  710. * @id: the id of the ipc to retrieve
  711. * @cmd: the cmd to check
  712. * @perm: the permission to set
  713. * @extra_perm: one extra permission parameter used by msq
  714. *
  715. * This function does some common audit and permissions check for some IPC_XXX
  716. * cmd and is called from semctl_down, shmctl_down and msgctl_down.
  717. * It must be called without any lock held and
  718. * - retrieves the ipc with the given id in the given table.
  719. * - performs some audit and permission check, depending on the given cmd
  720. * - returns the ipc with both ipc and rw_mutex locks held in case of success
  721. * or an err-code without any lock held otherwise.
  722. */
  723. struct kern_ipc_perm *ipcctl_pre_down(struct ipc_ids *ids, int id, int cmd,
  724. struct ipc64_perm *perm, int extra_perm)
  725. {
  726. struct kern_ipc_perm *ipcp;
  727. int err;
  728. down_write(&ids->rw_mutex);
  729. ipcp = ipc_lock_check_down(ids, id);
  730. if (IS_ERR(ipcp)) {
  731. err = PTR_ERR(ipcp);
  732. goto out_up;
  733. }
  734. err = audit_ipc_obj(ipcp);
  735. if (err)
  736. goto out_unlock;
  737. if (cmd == IPC_SET) {
  738. err = audit_ipc_set_perm(extra_perm, perm->uid,
  739. perm->gid, perm->mode);
  740. if (err)
  741. goto out_unlock;
  742. }
  743. if (current->euid == ipcp->cuid ||
  744. current->euid == ipcp->uid || capable(CAP_SYS_ADMIN))
  745. return ipcp;
  746. err = -EPERM;
  747. out_unlock:
  748. ipc_unlock(ipcp);
  749. out_up:
  750. up_write(&ids->rw_mutex);
  751. return ERR_PTR(err);
  752. }
  753. #ifdef __ARCH_WANT_IPC_PARSE_VERSION
  754. /**
  755. * ipc_parse_version - IPC call version
  756. * @cmd: pointer to command
  757. *
  758. * Return IPC_64 for new style IPC and IPC_OLD for old style IPC.
  759. * The @cmd value is turned from an encoding command and version into
  760. * just the command code.
  761. */
  762. int ipc_parse_version (int *cmd)
  763. {
  764. if (*cmd & IPC_64) {
  765. *cmd ^= IPC_64;
  766. return IPC_64;
  767. } else {
  768. return IPC_OLD;
  769. }
  770. }
  771. #endif /* __ARCH_WANT_IPC_PARSE_VERSION */
  772. #ifdef CONFIG_PROC_FS
  773. struct ipc_proc_iter {
  774. struct ipc_namespace *ns;
  775. struct ipc_proc_iface *iface;
  776. };
  777. /*
  778. * This routine locks the ipc structure found at least at position pos.
  779. */
  780. static struct kern_ipc_perm *sysvipc_find_ipc(struct ipc_ids *ids, loff_t pos,
  781. loff_t *new_pos)
  782. {
  783. struct kern_ipc_perm *ipc;
  784. int total, id;
  785. total = 0;
  786. for (id = 0; id < pos && total < ids->in_use; id++) {
  787. ipc = idr_find(&ids->ipcs_idr, id);
  788. if (ipc != NULL)
  789. total++;
  790. }
  791. if (total >= ids->in_use)
  792. return NULL;
  793. for ( ; pos < IPCMNI; pos++) {
  794. ipc = idr_find(&ids->ipcs_idr, pos);
  795. if (ipc != NULL) {
  796. *new_pos = pos + 1;
  797. ipc_lock_by_ptr(ipc);
  798. return ipc;
  799. }
  800. }
  801. /* Out of range - return NULL to terminate iteration */
  802. return NULL;
  803. }
  804. static void *sysvipc_proc_next(struct seq_file *s, void *it, loff_t *pos)
  805. {
  806. struct ipc_proc_iter *iter = s->private;
  807. struct ipc_proc_iface *iface = iter->iface;
  808. struct kern_ipc_perm *ipc = it;
  809. /* If we had an ipc id locked before, unlock it */
  810. if (ipc && ipc != SEQ_START_TOKEN)
  811. ipc_unlock(ipc);
  812. return sysvipc_find_ipc(&iter->ns->ids[iface->ids], *pos, pos);
  813. }
  814. /*
  815. * File positions: pos 0 -> header, pos n -> ipc id = n - 1.
  816. * SeqFile iterator: iterator value locked ipc pointer or SEQ_TOKEN_START.
  817. */
  818. static void *sysvipc_proc_start(struct seq_file *s, loff_t *pos)
  819. {
  820. struct ipc_proc_iter *iter = s->private;
  821. struct ipc_proc_iface *iface = iter->iface;
  822. struct ipc_ids *ids;
  823. ids = &iter->ns->ids[iface->ids];
  824. /*
  825. * Take the lock - this will be released by the corresponding
  826. * call to stop().
  827. */
  828. down_read(&ids->rw_mutex);
  829. /* pos < 0 is invalid */
  830. if (*pos < 0)
  831. return NULL;
  832. /* pos == 0 means header */
  833. if (*pos == 0)
  834. return SEQ_START_TOKEN;
  835. /* Find the (pos-1)th ipc */
  836. return sysvipc_find_ipc(ids, *pos - 1, pos);
  837. }
  838. static void sysvipc_proc_stop(struct seq_file *s, void *it)
  839. {
  840. struct kern_ipc_perm *ipc = it;
  841. struct ipc_proc_iter *iter = s->private;
  842. struct ipc_proc_iface *iface = iter->iface;
  843. struct ipc_ids *ids;
  844. /* If we had a locked structure, release it */
  845. if (ipc && ipc != SEQ_START_TOKEN)
  846. ipc_unlock(ipc);
  847. ids = &iter->ns->ids[iface->ids];
  848. /* Release the lock we took in start() */
  849. up_read(&ids->rw_mutex);
  850. }
  851. static int sysvipc_proc_show(struct seq_file *s, void *it)
  852. {
  853. struct ipc_proc_iter *iter = s->private;
  854. struct ipc_proc_iface *iface = iter->iface;
  855. if (it == SEQ_START_TOKEN)
  856. return seq_puts(s, iface->header);
  857. return iface->show(s, it);
  858. }
  859. static struct seq_operations sysvipc_proc_seqops = {
  860. .start = sysvipc_proc_start,
  861. .stop = sysvipc_proc_stop,
  862. .next = sysvipc_proc_next,
  863. .show = sysvipc_proc_show,
  864. };
  865. static int sysvipc_proc_open(struct inode *inode, struct file *file)
  866. {
  867. int ret;
  868. struct seq_file *seq;
  869. struct ipc_proc_iter *iter;
  870. ret = -ENOMEM;
  871. iter = kmalloc(sizeof(*iter), GFP_KERNEL);
  872. if (!iter)
  873. goto out;
  874. ret = seq_open(file, &sysvipc_proc_seqops);
  875. if (ret)
  876. goto out_kfree;
  877. seq = file->private_data;
  878. seq->private = iter;
  879. iter->iface = PDE(inode)->data;
  880. iter->ns = get_ipc_ns(current->nsproxy->ipc_ns);
  881. out:
  882. return ret;
  883. out_kfree:
  884. kfree(iter);
  885. goto out;
  886. }
  887. static int sysvipc_proc_release(struct inode *inode, struct file *file)
  888. {
  889. struct seq_file *seq = file->private_data;
  890. struct ipc_proc_iter *iter = seq->private;
  891. put_ipc_ns(iter->ns);
  892. return seq_release_private(inode, file);
  893. }
  894. static const struct file_operations sysvipc_proc_fops = {
  895. .open = sysvipc_proc_open,
  896. .read = seq_read,
  897. .llseek = seq_lseek,
  898. .release = sysvipc_proc_release,
  899. };
  900. #endif /* CONFIG_PROC_FS */