util.c 24 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034
  1. /*
  2. * linux/ipc/util.c
  3. * Copyright (C) 1992 Krishna Balasubramanian
  4. *
  5. * Sep 1997 - Call suser() last after "normal" permission checks so we
  6. * get BSD style process accounting right.
  7. * Occurs in several places in the IPC code.
  8. * Chris Evans, <chris@ferret.lmh.ox.ac.uk>
  9. * Nov 1999 - ipc helper functions, unified SMP locking
  10. * Manfred Spraul <manfred@colorfullife.com>
  11. * Oct 2002 - One lock per IPC id. RCU ipc_free for lock-free grow_ary().
  12. * Mingming Cao <cmm@us.ibm.com>
  13. * Mar 2006 - support for audit of ipc object properties
  14. * Dustin Kirkland <dustin.kirkland@us.ibm.com>
  15. * Jun 2006 - namespaces ssupport
  16. * OpenVZ, SWsoft Inc.
  17. * Pavel Emelianov <xemul@openvz.org>
  18. */
  19. #include <linux/mm.h>
  20. #include <linux/shm.h>
  21. #include <linux/init.h>
  22. #include <linux/msg.h>
  23. #include <linux/vmalloc.h>
  24. #include <linux/slab.h>
  25. #include <linux/notifier.h>
  26. #include <linux/capability.h>
  27. #include <linux/highuid.h>
  28. #include <linux/security.h>
  29. #include <linux/rcupdate.h>
  30. #include <linux/workqueue.h>
  31. #include <linux/seq_file.h>
  32. #include <linux/proc_fs.h>
  33. #include <linux/audit.h>
  34. #include <linux/nsproxy.h>
  35. #include <linux/rwsem.h>
  36. #include <linux/memory.h>
  37. #include <linux/ipc_namespace.h>
  38. #include <asm/unistd.h>
  39. #include "util.h"
  40. struct ipc_proc_iface {
  41. const char *path;
  42. const char *header;
  43. int ids;
  44. int (*show)(struct seq_file *, void *);
  45. };
  46. static void ipc_memory_notifier(struct work_struct *work)
  47. {
  48. ipcns_notify(IPCNS_MEMCHANGED);
  49. }
  50. static int ipc_memory_callback(struct notifier_block *self,
  51. unsigned long action, void *arg)
  52. {
  53. static DECLARE_WORK(ipc_memory_wq, ipc_memory_notifier);
  54. switch (action) {
  55. case MEM_ONLINE: /* memory successfully brought online */
  56. case MEM_OFFLINE: /* or offline: it's time to recompute msgmni */
  57. /*
  58. * This is done by invoking the ipcns notifier chain with the
  59. * IPC_MEMCHANGED event.
  60. * In order not to keep the lock on the hotplug memory chain
  61. * for too long, queue a work item that will, when waken up,
  62. * activate the ipcns notification chain.
  63. * No need to keep several ipc work items on the queue.
  64. */
  65. if (!work_pending(&ipc_memory_wq))
  66. schedule_work(&ipc_memory_wq);
  67. break;
  68. case MEM_GOING_ONLINE:
  69. case MEM_GOING_OFFLINE:
  70. case MEM_CANCEL_ONLINE:
  71. case MEM_CANCEL_OFFLINE:
  72. default:
  73. break;
  74. }
  75. return NOTIFY_OK;
  76. }
  77. static struct notifier_block ipc_memory_nb = {
  78. .notifier_call = ipc_memory_callback,
  79. .priority = IPC_CALLBACK_PRI,
  80. };
  81. /**
  82. * ipc_init - initialise IPC subsystem
  83. *
  84. * The various system5 IPC resources (semaphores, messages and shared
  85. * memory) are initialised
  86. * A callback routine is registered into the memory hotplug notifier
  87. * chain: since msgmni scales to lowmem this callback routine will be
  88. * called upon successful memory add / remove to recompute msmgni.
  89. */
  90. static int __init ipc_init(void)
  91. {
  92. sem_init();
  93. msg_init();
  94. shm_init();
  95. register_hotmemory_notifier(&ipc_memory_nb);
  96. register_ipcns_notifier(&init_ipc_ns);
  97. return 0;
  98. }
  99. __initcall(ipc_init);
  100. /**
  101. * ipc_init_ids - initialise IPC identifiers
  102. * @ids: Identifier set
  103. *
  104. * Set up the sequence range to use for the ipc identifier range (limited
  105. * below IPCMNI) then initialise the ids idr.
  106. */
  107. void ipc_init_ids(struct ipc_ids *ids)
  108. {
  109. init_rwsem(&ids->rw_mutex);
  110. ids->in_use = 0;
  111. ids->seq = 0;
  112. ids->next_id = -1;
  113. {
  114. int seq_limit = INT_MAX/SEQ_MULTIPLIER;
  115. if (seq_limit > USHRT_MAX)
  116. ids->seq_max = USHRT_MAX;
  117. else
  118. ids->seq_max = seq_limit;
  119. }
  120. idr_init(&ids->ipcs_idr);
  121. }
  122. #ifdef CONFIG_PROC_FS
  123. static const struct file_operations sysvipc_proc_fops;
  124. /**
  125. * ipc_init_proc_interface - Create a proc interface for sysipc types using a seq_file interface.
  126. * @path: Path in procfs
  127. * @header: Banner to be printed at the beginning of the file.
  128. * @ids: ipc id table to iterate.
  129. * @show: show routine.
  130. */
  131. void __init ipc_init_proc_interface(const char *path, const char *header,
  132. int ids, int (*show)(struct seq_file *, void *))
  133. {
  134. struct proc_dir_entry *pde;
  135. struct ipc_proc_iface *iface;
  136. iface = kmalloc(sizeof(*iface), GFP_KERNEL);
  137. if (!iface)
  138. return;
  139. iface->path = path;
  140. iface->header = header;
  141. iface->ids = ids;
  142. iface->show = show;
  143. pde = proc_create_data(path,
  144. S_IRUGO, /* world readable */
  145. NULL, /* parent dir */
  146. &sysvipc_proc_fops,
  147. iface);
  148. if (!pde) {
  149. kfree(iface);
  150. }
  151. }
  152. #endif
  153. /**
  154. * ipc_findkey - find a key in an ipc identifier set
  155. * @ids: Identifier set
  156. * @key: The key to find
  157. *
  158. * Requires ipc_ids.rw_mutex locked.
  159. * Returns the LOCKED pointer to the ipc structure if found or NULL
  160. * if not.
  161. * If key is found ipc points to the owning ipc structure
  162. */
  163. static struct kern_ipc_perm *ipc_findkey(struct ipc_ids *ids, key_t key)
  164. {
  165. struct kern_ipc_perm *ipc;
  166. int next_id;
  167. int total;
  168. for (total = 0, next_id = 0; total < ids->in_use; next_id++) {
  169. ipc = idr_find(&ids->ipcs_idr, next_id);
  170. if (ipc == NULL)
  171. continue;
  172. if (ipc->key != key) {
  173. total++;
  174. continue;
  175. }
  176. ipc_lock_by_ptr(ipc);
  177. return ipc;
  178. }
  179. return NULL;
  180. }
  181. /**
  182. * ipc_get_maxid - get the last assigned id
  183. * @ids: IPC identifier set
  184. *
  185. * Called with ipc_ids.rw_mutex held.
  186. */
  187. int ipc_get_maxid(struct ipc_ids *ids)
  188. {
  189. struct kern_ipc_perm *ipc;
  190. int max_id = -1;
  191. int total, id;
  192. if (ids->in_use == 0)
  193. return -1;
  194. if (ids->in_use == IPCMNI)
  195. return IPCMNI - 1;
  196. /* Look for the last assigned id */
  197. total = 0;
  198. for (id = 0; id < IPCMNI && total < ids->in_use; id++) {
  199. ipc = idr_find(&ids->ipcs_idr, id);
  200. if (ipc != NULL) {
  201. max_id = id;
  202. total++;
  203. }
  204. }
  205. return max_id;
  206. }
  207. /**
  208. * ipc_addid - add an IPC identifier
  209. * @ids: IPC identifier set
  210. * @new: new IPC permission set
  211. * @size: limit for the number of used ids
  212. *
  213. * Add an entry 'new' to the IPC ids idr. The permissions object is
  214. * initialised and the first free entry is set up and the id assigned
  215. * is returned. The 'new' entry is returned in a locked state on success.
  216. * On failure the entry is not locked and a negative err-code is returned.
  217. *
  218. * Called with ipc_ids.rw_mutex held as a writer.
  219. */
  220. int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size)
  221. {
  222. kuid_t euid;
  223. kgid_t egid;
  224. int id;
  225. int next_id = ids->next_id;
  226. if (size > IPCMNI)
  227. size = IPCMNI;
  228. if (ids->in_use >= size)
  229. return -ENOSPC;
  230. idr_preload(GFP_KERNEL);
  231. spin_lock_init(&new->lock);
  232. new->deleted = 0;
  233. rcu_read_lock();
  234. spin_lock(&new->lock);
  235. id = idr_alloc(&ids->ipcs_idr, new,
  236. (next_id < 0) ? 0 : ipcid_to_idx(next_id), 0,
  237. GFP_NOWAIT);
  238. idr_preload_end();
  239. if (id < 0) {
  240. spin_unlock(&new->lock);
  241. rcu_read_unlock();
  242. return id;
  243. }
  244. ids->in_use++;
  245. current_euid_egid(&euid, &egid);
  246. new->cuid = new->uid = euid;
  247. new->gid = new->cgid = egid;
  248. if (next_id < 0) {
  249. new->seq = ids->seq++;
  250. if (ids->seq > ids->seq_max)
  251. ids->seq = 0;
  252. } else {
  253. new->seq = ipcid_to_seqx(next_id);
  254. ids->next_id = -1;
  255. }
  256. new->id = ipc_buildid(id, new->seq);
  257. return id;
  258. }
  259. /**
  260. * ipcget_new - create a new ipc object
  261. * @ns: namespace
  262. * @ids: IPC identifer set
  263. * @ops: the actual creation routine to call
  264. * @params: its parameters
  265. *
  266. * This routine is called by sys_msgget, sys_semget() and sys_shmget()
  267. * when the key is IPC_PRIVATE.
  268. */
  269. static int ipcget_new(struct ipc_namespace *ns, struct ipc_ids *ids,
  270. struct ipc_ops *ops, struct ipc_params *params)
  271. {
  272. int err;
  273. down_write(&ids->rw_mutex);
  274. err = ops->getnew(ns, params);
  275. up_write(&ids->rw_mutex);
  276. return err;
  277. }
  278. /**
  279. * ipc_check_perms - check security and permissions for an IPC
  280. * @ns: IPC namespace
  281. * @ipcp: ipc permission set
  282. * @ops: the actual security routine to call
  283. * @params: its parameters
  284. *
  285. * This routine is called by sys_msgget(), sys_semget() and sys_shmget()
  286. * when the key is not IPC_PRIVATE and that key already exists in the
  287. * ids IDR.
  288. *
  289. * On success, the IPC id is returned.
  290. *
  291. * It is called with ipc_ids.rw_mutex and ipcp->lock held.
  292. */
  293. static int ipc_check_perms(struct ipc_namespace *ns,
  294. struct kern_ipc_perm *ipcp,
  295. struct ipc_ops *ops,
  296. struct ipc_params *params)
  297. {
  298. int err;
  299. if (ipcperms(ns, ipcp, params->flg))
  300. err = -EACCES;
  301. else {
  302. err = ops->associate(ipcp, params->flg);
  303. if (!err)
  304. err = ipcp->id;
  305. }
  306. return err;
  307. }
  308. /**
  309. * ipcget_public - get an ipc object or create a new one
  310. * @ns: namespace
  311. * @ids: IPC identifer set
  312. * @ops: the actual creation routine to call
  313. * @params: its parameters
  314. *
  315. * This routine is called by sys_msgget, sys_semget() and sys_shmget()
  316. * when the key is not IPC_PRIVATE.
  317. * It adds a new entry if the key is not found and does some permission
  318. * / security checkings if the key is found.
  319. *
  320. * On success, the ipc id is returned.
  321. */
  322. static int ipcget_public(struct ipc_namespace *ns, struct ipc_ids *ids,
  323. struct ipc_ops *ops, struct ipc_params *params)
  324. {
  325. struct kern_ipc_perm *ipcp;
  326. int flg = params->flg;
  327. int err;
  328. /*
  329. * Take the lock as a writer since we are potentially going to add
  330. * a new entry + read locks are not "upgradable"
  331. */
  332. down_write(&ids->rw_mutex);
  333. ipcp = ipc_findkey(ids, params->key);
  334. if (ipcp == NULL) {
  335. /* key not used */
  336. if (!(flg & IPC_CREAT))
  337. err = -ENOENT;
  338. else
  339. err = ops->getnew(ns, params);
  340. } else {
  341. /* ipc object has been locked by ipc_findkey() */
  342. if (flg & IPC_CREAT && flg & IPC_EXCL)
  343. err = -EEXIST;
  344. else {
  345. err = 0;
  346. if (ops->more_checks)
  347. err = ops->more_checks(ipcp, params);
  348. if (!err)
  349. /*
  350. * ipc_check_perms returns the IPC id on
  351. * success
  352. */
  353. err = ipc_check_perms(ns, ipcp, ops, params);
  354. }
  355. ipc_unlock(ipcp);
  356. }
  357. up_write(&ids->rw_mutex);
  358. return err;
  359. }
  360. /**
  361. * ipc_rmid - remove an IPC identifier
  362. * @ids: IPC identifier set
  363. * @ipcp: ipc perm structure containing the identifier to remove
  364. *
  365. * ipc_ids.rw_mutex (as a writer) and the spinlock for this ID are held
  366. * before this function is called, and remain locked on the exit.
  367. */
  368. void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp)
  369. {
  370. int lid = ipcid_to_idx(ipcp->id);
  371. idr_remove(&ids->ipcs_idr, lid);
  372. ids->in_use--;
  373. ipcp->deleted = 1;
  374. return;
  375. }
  376. /**
  377. * ipc_alloc - allocate ipc space
  378. * @size: size desired
  379. *
  380. * Allocate memory from the appropriate pools and return a pointer to it.
  381. * NULL is returned if the allocation fails
  382. */
  383. void* ipc_alloc(int size)
  384. {
  385. void* out;
  386. if(size > PAGE_SIZE)
  387. out = vmalloc(size);
  388. else
  389. out = kmalloc(size, GFP_KERNEL);
  390. return out;
  391. }
  392. /**
  393. * ipc_free - free ipc space
  394. * @ptr: pointer returned by ipc_alloc
  395. * @size: size of block
  396. *
  397. * Free a block created with ipc_alloc(). The caller must know the size
  398. * used in the allocation call.
  399. */
  400. void ipc_free(void* ptr, int size)
  401. {
  402. if(size > PAGE_SIZE)
  403. vfree(ptr);
  404. else
  405. kfree(ptr);
  406. }
  407. /*
  408. * rcu allocations:
  409. * There are three headers that are prepended to the actual allocation:
  410. * - during use: ipc_rcu_hdr.
  411. * - during the rcu grace period: ipc_rcu_grace.
  412. * - [only if vmalloc]: ipc_rcu_sched.
  413. * Their lifetime doesn't overlap, thus the headers share the same memory.
  414. * Unlike a normal union, they are right-aligned, thus some container_of
  415. * forward/backward casting is necessary:
  416. */
  417. struct ipc_rcu_hdr
  418. {
  419. int refcount;
  420. int is_vmalloc;
  421. void *data[0];
  422. };
  423. struct ipc_rcu_grace
  424. {
  425. struct rcu_head rcu;
  426. /* "void *" makes sure alignment of following data is sane. */
  427. void *data[0];
  428. };
  429. struct ipc_rcu_sched
  430. {
  431. struct work_struct work;
  432. /* "void *" makes sure alignment of following data is sane. */
  433. void *data[0];
  434. };
  435. #define HDRLEN_KMALLOC (sizeof(struct ipc_rcu_grace) > sizeof(struct ipc_rcu_hdr) ? \
  436. sizeof(struct ipc_rcu_grace) : sizeof(struct ipc_rcu_hdr))
  437. #define HDRLEN_VMALLOC (sizeof(struct ipc_rcu_sched) > HDRLEN_KMALLOC ? \
  438. sizeof(struct ipc_rcu_sched) : HDRLEN_KMALLOC)
  439. static inline int rcu_use_vmalloc(int size)
  440. {
  441. /* Too big for a single page? */
  442. if (HDRLEN_KMALLOC + size > PAGE_SIZE)
  443. return 1;
  444. return 0;
  445. }
  446. /**
  447. * ipc_rcu_alloc - allocate ipc and rcu space
  448. * @size: size desired
  449. *
  450. * Allocate memory for the rcu header structure + the object.
  451. * Returns the pointer to the object.
  452. * NULL is returned if the allocation fails.
  453. */
  454. void* ipc_rcu_alloc(int size)
  455. {
  456. void* out;
  457. /*
  458. * We prepend the allocation with the rcu struct, and
  459. * workqueue if necessary (for vmalloc).
  460. */
  461. if (rcu_use_vmalloc(size)) {
  462. out = vmalloc(HDRLEN_VMALLOC + size);
  463. if (out) {
  464. out += HDRLEN_VMALLOC;
  465. container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1;
  466. container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
  467. }
  468. } else {
  469. out = kmalloc(HDRLEN_KMALLOC + size, GFP_KERNEL);
  470. if (out) {
  471. out += HDRLEN_KMALLOC;
  472. container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0;
  473. container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
  474. }
  475. }
  476. return out;
  477. }
  478. void ipc_rcu_getref(void *ptr)
  479. {
  480. container_of(ptr, struct ipc_rcu_hdr, data)->refcount++;
  481. }
  482. static void ipc_do_vfree(struct work_struct *work)
  483. {
  484. vfree(container_of(work, struct ipc_rcu_sched, work));
  485. }
  486. /**
  487. * ipc_schedule_free - free ipc + rcu space
  488. * @head: RCU callback structure for queued work
  489. *
  490. * Since RCU callback function is called in bh,
  491. * we need to defer the vfree to schedule_work().
  492. */
  493. static void ipc_schedule_free(struct rcu_head *head)
  494. {
  495. struct ipc_rcu_grace *grace;
  496. struct ipc_rcu_sched *sched;
  497. grace = container_of(head, struct ipc_rcu_grace, rcu);
  498. sched = container_of(&(grace->data[0]), struct ipc_rcu_sched,
  499. data[0]);
  500. INIT_WORK(&sched->work, ipc_do_vfree);
  501. schedule_work(&sched->work);
  502. }
  503. void ipc_rcu_putref(void *ptr)
  504. {
  505. if (--container_of(ptr, struct ipc_rcu_hdr, data)->refcount > 0)
  506. return;
  507. if (container_of(ptr, struct ipc_rcu_hdr, data)->is_vmalloc) {
  508. call_rcu(&container_of(ptr, struct ipc_rcu_grace, data)->rcu,
  509. ipc_schedule_free);
  510. } else {
  511. kfree_rcu(container_of(ptr, struct ipc_rcu_grace, data), rcu);
  512. }
  513. }
  514. /**
  515. * ipcperms - check IPC permissions
  516. * @ns: IPC namespace
  517. * @ipcp: IPC permission set
  518. * @flag: desired permission set.
  519. *
  520. * Check user, group, other permissions for access
  521. * to ipc resources. return 0 if allowed
  522. *
  523. * @flag will most probably be 0 or S_...UGO from <linux/stat.h>
  524. */
  525. int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flag)
  526. {
  527. kuid_t euid = current_euid();
  528. int requested_mode, granted_mode;
  529. audit_ipc_obj(ipcp);
  530. requested_mode = (flag >> 6) | (flag >> 3) | flag;
  531. granted_mode = ipcp->mode;
  532. if (uid_eq(euid, ipcp->cuid) ||
  533. uid_eq(euid, ipcp->uid))
  534. granted_mode >>= 6;
  535. else if (in_group_p(ipcp->cgid) || in_group_p(ipcp->gid))
  536. granted_mode >>= 3;
  537. /* is there some bit set in requested_mode but not in granted_mode? */
  538. if ((requested_mode & ~granted_mode & 0007) &&
  539. !ns_capable(ns->user_ns, CAP_IPC_OWNER))
  540. return -1;
  541. return security_ipc_permission(ipcp, flag);
  542. }
  543. /*
  544. * Functions to convert between the kern_ipc_perm structure and the
  545. * old/new ipc_perm structures
  546. */
  547. /**
  548. * kernel_to_ipc64_perm - convert kernel ipc permissions to user
  549. * @in: kernel permissions
  550. * @out: new style IPC permissions
  551. *
  552. * Turn the kernel object @in into a set of permissions descriptions
  553. * for returning to userspace (@out).
  554. */
  555. void kernel_to_ipc64_perm (struct kern_ipc_perm *in, struct ipc64_perm *out)
  556. {
  557. out->key = in->key;
  558. out->uid = from_kuid_munged(current_user_ns(), in->uid);
  559. out->gid = from_kgid_munged(current_user_ns(), in->gid);
  560. out->cuid = from_kuid_munged(current_user_ns(), in->cuid);
  561. out->cgid = from_kgid_munged(current_user_ns(), in->cgid);
  562. out->mode = in->mode;
  563. out->seq = in->seq;
  564. }
  565. /**
  566. * ipc64_perm_to_ipc_perm - convert new ipc permissions to old
  567. * @in: new style IPC permissions
  568. * @out: old style IPC permissions
  569. *
  570. * Turn the new style permissions object @in into a compatibility
  571. * object and store it into the @out pointer.
  572. */
  573. void ipc64_perm_to_ipc_perm (struct ipc64_perm *in, struct ipc_perm *out)
  574. {
  575. out->key = in->key;
  576. SET_UID(out->uid, in->uid);
  577. SET_GID(out->gid, in->gid);
  578. SET_UID(out->cuid, in->cuid);
  579. SET_GID(out->cgid, in->cgid);
  580. out->mode = in->mode;
  581. out->seq = in->seq;
  582. }
  583. /**
  584. * ipc_obtain_object
  585. * @ids: ipc identifier set
  586. * @id: ipc id to look for
  587. *
  588. * Look for an id in the ipc ids idr and return associated ipc object.
  589. *
  590. * Call inside the RCU critical section.
  591. * The ipc object is *not* locked on exit.
  592. */
  593. struct kern_ipc_perm *ipc_obtain_object(struct ipc_ids *ids, int id)
  594. {
  595. struct kern_ipc_perm *out;
  596. int lid = ipcid_to_idx(id);
  597. out = idr_find(&ids->ipcs_idr, lid);
  598. if (!out)
  599. return ERR_PTR(-EINVAL);
  600. return out;
  601. }
  602. /**
  603. * ipc_lock - Lock an ipc structure without rw_mutex held
  604. * @ids: IPC identifier set
  605. * @id: ipc id to look for
  606. *
  607. * Look for an id in the ipc ids idr and lock the associated ipc object.
  608. *
  609. * The ipc object is locked on successful exit.
  610. */
  611. struct kern_ipc_perm *ipc_lock(struct ipc_ids *ids, int id)
  612. {
  613. struct kern_ipc_perm *out;
  614. rcu_read_lock();
  615. out = ipc_obtain_object(ids, id);
  616. if (IS_ERR(out))
  617. goto err1;
  618. spin_lock(&out->lock);
  619. /* ipc_rmid() may have already freed the ID while ipc_lock
  620. * was spinning: here verify that the structure is still valid
  621. */
  622. if (!out->deleted)
  623. return out;
  624. spin_unlock(&out->lock);
  625. out = ERR_PTR(-EINVAL);
  626. err1:
  627. rcu_read_unlock();
  628. return out;
  629. }
  630. /**
  631. * ipc_obtain_object_check
  632. * @ids: ipc identifier set
  633. * @id: ipc id to look for
  634. *
  635. * Similar to ipc_obtain_object() but also checks
  636. * the ipc object reference counter.
  637. *
  638. * Call inside the RCU critical section.
  639. * The ipc object is *not* locked on exit.
  640. */
  641. struct kern_ipc_perm *ipc_obtain_object_check(struct ipc_ids *ids, int id)
  642. {
  643. struct kern_ipc_perm *out = ipc_obtain_object(ids, id);
  644. if (IS_ERR(out))
  645. goto out;
  646. if (ipc_checkid(out, id))
  647. return ERR_PTR(-EIDRM);
  648. out:
  649. return out;
  650. }
  651. struct kern_ipc_perm *ipc_lock_check(struct ipc_ids *ids, int id)
  652. {
  653. struct kern_ipc_perm *out;
  654. out = ipc_lock(ids, id);
  655. if (IS_ERR(out))
  656. return out;
  657. if (ipc_checkid(out, id)) {
  658. ipc_unlock(out);
  659. return ERR_PTR(-EIDRM);
  660. }
  661. return out;
  662. }
  663. /**
  664. * ipcget - Common sys_*get() code
  665. * @ns : namsepace
  666. * @ids : IPC identifier set
  667. * @ops : operations to be called on ipc object creation, permission checks
  668. * and further checks
  669. * @params : the parameters needed by the previous operations.
  670. *
  671. * Common routine called by sys_msgget(), sys_semget() and sys_shmget().
  672. */
  673. int ipcget(struct ipc_namespace *ns, struct ipc_ids *ids,
  674. struct ipc_ops *ops, struct ipc_params *params)
  675. {
  676. if (params->key == IPC_PRIVATE)
  677. return ipcget_new(ns, ids, ops, params);
  678. else
  679. return ipcget_public(ns, ids, ops, params);
  680. }
  681. /**
  682. * ipc_update_perm - update the permissions of an IPC.
  683. * @in: the permission given as input.
  684. * @out: the permission of the ipc to set.
  685. */
  686. int ipc_update_perm(struct ipc64_perm *in, struct kern_ipc_perm *out)
  687. {
  688. kuid_t uid = make_kuid(current_user_ns(), in->uid);
  689. kgid_t gid = make_kgid(current_user_ns(), in->gid);
  690. if (!uid_valid(uid) || !gid_valid(gid))
  691. return -EINVAL;
  692. out->uid = uid;
  693. out->gid = gid;
  694. out->mode = (out->mode & ~S_IRWXUGO)
  695. | (in->mode & S_IRWXUGO);
  696. return 0;
  697. }
  698. /**
  699. * ipcctl_pre_down - retrieve an ipc and check permissions for some IPC_XXX cmd
  700. * @ns: the ipc namespace
  701. * @ids: the table of ids where to look for the ipc
  702. * @id: the id of the ipc to retrieve
  703. * @cmd: the cmd to check
  704. * @perm: the permission to set
  705. * @extra_perm: one extra permission parameter used by msq
  706. *
  707. * This function does some common audit and permissions check for some IPC_XXX
  708. * cmd and is called from semctl_down, shmctl_down and msgctl_down.
  709. * It must be called without any lock held and
  710. * - retrieves the ipc with the given id in the given table.
  711. * - performs some audit and permission check, depending on the given cmd
  712. * - returns the ipc with both ipc and rw_mutex locks held in case of success
  713. * or an err-code without any lock held otherwise.
  714. */
  715. struct kern_ipc_perm *ipcctl_pre_down(struct ipc_namespace *ns,
  716. struct ipc_ids *ids, int id, int cmd,
  717. struct ipc64_perm *perm, int extra_perm)
  718. {
  719. struct kern_ipc_perm *ipcp;
  720. kuid_t euid;
  721. int err;
  722. down_write(&ids->rw_mutex);
  723. ipcp = ipc_lock_check(ids, id);
  724. if (IS_ERR(ipcp)) {
  725. err = PTR_ERR(ipcp);
  726. goto out_up;
  727. }
  728. audit_ipc_obj(ipcp);
  729. if (cmd == IPC_SET)
  730. audit_ipc_set_perm(extra_perm, perm->uid,
  731. perm->gid, perm->mode);
  732. euid = current_euid();
  733. if (uid_eq(euid, ipcp->cuid) || uid_eq(euid, ipcp->uid) ||
  734. ns_capable(ns->user_ns, CAP_SYS_ADMIN))
  735. return ipcp;
  736. err = -EPERM;
  737. ipc_unlock(ipcp);
  738. out_up:
  739. up_write(&ids->rw_mutex);
  740. return ERR_PTR(err);
  741. }
  742. #ifdef CONFIG_ARCH_WANT_IPC_PARSE_VERSION
  743. /**
  744. * ipc_parse_version - IPC call version
  745. * @cmd: pointer to command
  746. *
  747. * Return IPC_64 for new style IPC and IPC_OLD for old style IPC.
  748. * The @cmd value is turned from an encoding command and version into
  749. * just the command code.
  750. */
  751. int ipc_parse_version (int *cmd)
  752. {
  753. if (*cmd & IPC_64) {
  754. *cmd ^= IPC_64;
  755. return IPC_64;
  756. } else {
  757. return IPC_OLD;
  758. }
  759. }
  760. #endif /* CONFIG_ARCH_WANT_IPC_PARSE_VERSION */
  761. #ifdef CONFIG_PROC_FS
  762. struct ipc_proc_iter {
  763. struct ipc_namespace *ns;
  764. struct ipc_proc_iface *iface;
  765. };
  766. /*
  767. * This routine locks the ipc structure found at least at position pos.
  768. */
  769. static struct kern_ipc_perm *sysvipc_find_ipc(struct ipc_ids *ids, loff_t pos,
  770. loff_t *new_pos)
  771. {
  772. struct kern_ipc_perm *ipc;
  773. int total, id;
  774. total = 0;
  775. for (id = 0; id < pos && total < ids->in_use; id++) {
  776. ipc = idr_find(&ids->ipcs_idr, id);
  777. if (ipc != NULL)
  778. total++;
  779. }
  780. if (total >= ids->in_use)
  781. return NULL;
  782. for ( ; pos < IPCMNI; pos++) {
  783. ipc = idr_find(&ids->ipcs_idr, pos);
  784. if (ipc != NULL) {
  785. *new_pos = pos + 1;
  786. ipc_lock_by_ptr(ipc);
  787. return ipc;
  788. }
  789. }
  790. /* Out of range - return NULL to terminate iteration */
  791. return NULL;
  792. }
  793. static void *sysvipc_proc_next(struct seq_file *s, void *it, loff_t *pos)
  794. {
  795. struct ipc_proc_iter *iter = s->private;
  796. struct ipc_proc_iface *iface = iter->iface;
  797. struct kern_ipc_perm *ipc = it;
  798. /* If we had an ipc id locked before, unlock it */
  799. if (ipc && ipc != SEQ_START_TOKEN)
  800. ipc_unlock(ipc);
  801. return sysvipc_find_ipc(&iter->ns->ids[iface->ids], *pos, pos);
  802. }
  803. /*
  804. * File positions: pos 0 -> header, pos n -> ipc id = n - 1.
  805. * SeqFile iterator: iterator value locked ipc pointer or SEQ_TOKEN_START.
  806. */
  807. static void *sysvipc_proc_start(struct seq_file *s, loff_t *pos)
  808. {
  809. struct ipc_proc_iter *iter = s->private;
  810. struct ipc_proc_iface *iface = iter->iface;
  811. struct ipc_ids *ids;
  812. ids = &iter->ns->ids[iface->ids];
  813. /*
  814. * Take the lock - this will be released by the corresponding
  815. * call to stop().
  816. */
  817. down_read(&ids->rw_mutex);
  818. /* pos < 0 is invalid */
  819. if (*pos < 0)
  820. return NULL;
  821. /* pos == 0 means header */
  822. if (*pos == 0)
  823. return SEQ_START_TOKEN;
  824. /* Find the (pos-1)th ipc */
  825. return sysvipc_find_ipc(ids, *pos - 1, pos);
  826. }
  827. static void sysvipc_proc_stop(struct seq_file *s, void *it)
  828. {
  829. struct kern_ipc_perm *ipc = it;
  830. struct ipc_proc_iter *iter = s->private;
  831. struct ipc_proc_iface *iface = iter->iface;
  832. struct ipc_ids *ids;
  833. /* If we had a locked structure, release it */
  834. if (ipc && ipc != SEQ_START_TOKEN)
  835. ipc_unlock(ipc);
  836. ids = &iter->ns->ids[iface->ids];
  837. /* Release the lock we took in start() */
  838. up_read(&ids->rw_mutex);
  839. }
  840. static int sysvipc_proc_show(struct seq_file *s, void *it)
  841. {
  842. struct ipc_proc_iter *iter = s->private;
  843. struct ipc_proc_iface *iface = iter->iface;
  844. if (it == SEQ_START_TOKEN)
  845. return seq_puts(s, iface->header);
  846. return iface->show(s, it);
  847. }
  848. static const struct seq_operations sysvipc_proc_seqops = {
  849. .start = sysvipc_proc_start,
  850. .stop = sysvipc_proc_stop,
  851. .next = sysvipc_proc_next,
  852. .show = sysvipc_proc_show,
  853. };
  854. static int sysvipc_proc_open(struct inode *inode, struct file *file)
  855. {
  856. int ret;
  857. struct seq_file *seq;
  858. struct ipc_proc_iter *iter;
  859. ret = -ENOMEM;
  860. iter = kmalloc(sizeof(*iter), GFP_KERNEL);
  861. if (!iter)
  862. goto out;
  863. ret = seq_open(file, &sysvipc_proc_seqops);
  864. if (ret)
  865. goto out_kfree;
  866. seq = file->private_data;
  867. seq->private = iter;
  868. iter->iface = PDE(inode)->data;
  869. iter->ns = get_ipc_ns(current->nsproxy->ipc_ns);
  870. out:
  871. return ret;
  872. out_kfree:
  873. kfree(iter);
  874. goto out;
  875. }
  876. static int sysvipc_proc_release(struct inode *inode, struct file *file)
  877. {
  878. struct seq_file *seq = file->private_data;
  879. struct ipc_proc_iter *iter = seq->private;
  880. put_ipc_ns(iter->ns);
  881. return seq_release_private(inode, file);
  882. }
  883. static const struct file_operations sysvipc_proc_fops = {
  884. .open = sysvipc_proc_open,
  885. .read = seq_read,
  886. .llseek = seq_lseek,
  887. .release = sysvipc_proc_release,
  888. };
  889. #endif /* CONFIG_PROC_FS */