msg.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972
  1. /*
  2. * linux/ipc/msg.c
  3. * Copyright (C) 1992 Krishna Balasubramanian
  4. *
  5. * Removed all the remaining kerneld mess
  6. * Catch the -EFAULT stuff properly
  7. * Use GFP_KERNEL for messages as in 1.2
  8. * Fixed up the unchecked user space derefs
  9. * Copyright (C) 1998 Alan Cox & Andi Kleen
  10. *
  11. * /proc/sysvipc/msg support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
  12. *
  13. * mostly rewritten, threaded and wake-one semantics added
  14. * MSGMAX limit removed, sysctl's added
  15. * (c) 1999 Manfred Spraul <manfred@colorfullife.com>
  16. *
  17. * support for audit of ipc object properties and permission changes
  18. * Dustin Kirkland <dustin.kirkland@us.ibm.com>
  19. *
  20. * namespaces support
  21. * OpenVZ, SWsoft Inc.
  22. * Pavel Emelianov <xemul@openvz.org>
  23. */
  24. #include <linux/capability.h>
  25. #include <linux/slab.h>
  26. #include <linux/msg.h>
  27. #include <linux/spinlock.h>
  28. #include <linux/init.h>
  29. #include <linux/proc_fs.h>
  30. #include <linux/list.h>
  31. #include <linux/security.h>
  32. #include <linux/sched.h>
  33. #include <linux/syscalls.h>
  34. #include <linux/audit.h>
  35. #include <linux/seq_file.h>
  36. #include <linux/rwsem.h>
  37. #include <linux/nsproxy.h>
  38. #include <asm/current.h>
  39. #include <asm/uaccess.h>
  40. #include "util.h"
  41. /*
  42. * one msg_receiver structure for each sleeping receiver:
  43. */
  44. struct msg_receiver {
  45. struct list_head r_list;
  46. struct task_struct *r_tsk;
  47. int r_mode;
  48. long r_msgtype;
  49. long r_maxsize;
  50. struct msg_msg *volatile r_msg;
  51. };
  52. /* one msg_sender for each sleeping sender */
  53. struct msg_sender {
  54. struct list_head list;
  55. struct task_struct *tsk;
  56. };
  57. #define SEARCH_ANY 1
  58. #define SEARCH_EQUAL 2
  59. #define SEARCH_NOTEQUAL 3
  60. #define SEARCH_LESSEQUAL 4
  61. static atomic_t msg_bytes = ATOMIC_INIT(0);
  62. static atomic_t msg_hdrs = ATOMIC_INIT(0);
  63. static struct ipc_ids init_msg_ids;
  64. #define msg_ids(ns) (*((ns)->ids[IPC_MSG_IDS]))
  65. #define msg_unlock(msq) ipc_unlock(&(msq)->q_perm)
  66. #define msg_buildid(ns, id, seq) \
  67. ipc_buildid(&msg_ids(ns), id, seq)
  68. static void freeque(struct ipc_namespace *, struct msg_queue *);
  69. static int newque(struct ipc_namespace *, struct ipc_params *);
  70. #ifdef CONFIG_PROC_FS
  71. static int sysvipc_msg_proc_show(struct seq_file *s, void *it);
  72. #endif
  73. static void __msg_init_ns(struct ipc_namespace *ns, struct ipc_ids *ids)
  74. {
  75. ns->ids[IPC_MSG_IDS] = ids;
  76. ns->msg_ctlmax = MSGMAX;
  77. ns->msg_ctlmnb = MSGMNB;
  78. ns->msg_ctlmni = MSGMNI;
  79. ipc_init_ids(ids);
  80. }
  81. int msg_init_ns(struct ipc_namespace *ns)
  82. {
  83. struct ipc_ids *ids;
  84. ids = kmalloc(sizeof(struct ipc_ids), GFP_KERNEL);
  85. if (ids == NULL)
  86. return -ENOMEM;
  87. __msg_init_ns(ns, ids);
  88. return 0;
  89. }
  90. void msg_exit_ns(struct ipc_namespace *ns)
  91. {
  92. struct msg_queue *msq;
  93. int next_id;
  94. int total, in_use;
  95. down_write(&msg_ids(ns).rw_mutex);
  96. in_use = msg_ids(ns).in_use;
  97. for (total = 0, next_id = 0; total < in_use; next_id++) {
  98. msq = idr_find(&msg_ids(ns).ipcs_idr, next_id);
  99. if (msq == NULL)
  100. continue;
  101. ipc_lock_by_ptr(&msq->q_perm);
  102. freeque(ns, msq);
  103. total++;
  104. }
  105. up_write(&msg_ids(ns).rw_mutex);
  106. kfree(ns->ids[IPC_MSG_IDS]);
  107. ns->ids[IPC_MSG_IDS] = NULL;
  108. }
  109. void __init msg_init(void)
  110. {
  111. __msg_init_ns(&init_ipc_ns, &init_msg_ids);
  112. ipc_init_proc_interface("sysvipc/msg",
  113. " key msqid perms cbytes qnum lspid lrpid uid gid cuid cgid stime rtime ctime\n",
  114. IPC_MSG_IDS, sysvipc_msg_proc_show);
  115. }
  116. /*
  117. * This routine is called in the paths where the rw_mutex is held to protect
  118. * access to the idr tree.
  119. */
  120. static inline struct msg_queue *msg_lock_check_down(struct ipc_namespace *ns,
  121. int id)
  122. {
  123. struct kern_ipc_perm *ipcp = ipc_lock_check_down(&msg_ids(ns), id);
  124. return container_of(ipcp, struct msg_queue, q_perm);
  125. }
  126. /*
  127. * msg_lock_(check_) routines are called in the paths where the rw_mutex
  128. * is not held.
  129. */
  130. static inline struct msg_queue *msg_lock(struct ipc_namespace *ns, int id)
  131. {
  132. struct kern_ipc_perm *ipcp = ipc_lock(&msg_ids(ns), id);
  133. return container_of(ipcp, struct msg_queue, q_perm);
  134. }
  135. static inline struct msg_queue *msg_lock_check(struct ipc_namespace *ns,
  136. int id)
  137. {
  138. struct kern_ipc_perm *ipcp = ipc_lock_check(&msg_ids(ns), id);
  139. return container_of(ipcp, struct msg_queue, q_perm);
  140. }
  141. static inline void msg_rmid(struct ipc_namespace *ns, struct msg_queue *s)
  142. {
  143. ipc_rmid(&msg_ids(ns), &s->q_perm);
  144. }
  145. /**
  146. * newque - Create a new msg queue
  147. * @ns: namespace
  148. * @params: ptr to the structure that contains the key and msgflg
  149. *
  150. * Called with msg_ids.rw_mutex held (writer)
  151. */
  152. static int newque(struct ipc_namespace *ns, struct ipc_params *params)
  153. {
  154. struct msg_queue *msq;
  155. int id, retval;
  156. key_t key = params->key;
  157. int msgflg = params->flg;
  158. msq = ipc_rcu_alloc(sizeof(*msq));
  159. if (!msq)
  160. return -ENOMEM;
  161. msq->q_perm.mode = msgflg & S_IRWXUGO;
  162. msq->q_perm.key = key;
  163. msq->q_perm.security = NULL;
  164. retval = security_msg_queue_alloc(msq);
  165. if (retval) {
  166. ipc_rcu_putref(msq);
  167. return retval;
  168. }
  169. /*
  170. * ipc_addid() locks msq
  171. */
  172. id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni);
  173. if (id == -1) {
  174. security_msg_queue_free(msq);
  175. ipc_rcu_putref(msq);
  176. return -ENOSPC;
  177. }
  178. msq->q_perm.id = msg_buildid(ns, id, msq->q_perm.seq);
  179. msq->q_stime = msq->q_rtime = 0;
  180. msq->q_ctime = get_seconds();
  181. msq->q_cbytes = msq->q_qnum = 0;
  182. msq->q_qbytes = ns->msg_ctlmnb;
  183. msq->q_lspid = msq->q_lrpid = 0;
  184. INIT_LIST_HEAD(&msq->q_messages);
  185. INIT_LIST_HEAD(&msq->q_receivers);
  186. INIT_LIST_HEAD(&msq->q_senders);
  187. msg_unlock(msq);
  188. return msq->q_perm.id;
  189. }
  190. static inline void ss_add(struct msg_queue *msq, struct msg_sender *mss)
  191. {
  192. mss->tsk = current;
  193. current->state = TASK_INTERRUPTIBLE;
  194. list_add_tail(&mss->list, &msq->q_senders);
  195. }
  196. static inline void ss_del(struct msg_sender *mss)
  197. {
  198. if (mss->list.next != NULL)
  199. list_del(&mss->list);
  200. }
  201. static void ss_wakeup(struct list_head *h, int kill)
  202. {
  203. struct list_head *tmp;
  204. tmp = h->next;
  205. while (tmp != h) {
  206. struct msg_sender *mss;
  207. mss = list_entry(tmp, struct msg_sender, list);
  208. tmp = tmp->next;
  209. if (kill)
  210. mss->list.next = NULL;
  211. wake_up_process(mss->tsk);
  212. }
  213. }
  214. static void expunge_all(struct msg_queue *msq, int res)
  215. {
  216. struct list_head *tmp;
  217. tmp = msq->q_receivers.next;
  218. while (tmp != &msq->q_receivers) {
  219. struct msg_receiver *msr;
  220. msr = list_entry(tmp, struct msg_receiver, r_list);
  221. tmp = tmp->next;
  222. msr->r_msg = NULL;
  223. wake_up_process(msr->r_tsk);
  224. smp_mb();
  225. msr->r_msg = ERR_PTR(res);
  226. }
  227. }
  228. /*
  229. * freeque() wakes up waiters on the sender and receiver waiting queue,
  230. * removes the message queue from message queue ID IDR, and cleans up all the
  231. * messages associated with this queue.
  232. *
  233. * msg_ids.rw_mutex (writer) and the spinlock for this message queue are held
  234. * before freeque() is called. msg_ids.rw_mutex remains locked on exit.
  235. */
  236. static void freeque(struct ipc_namespace *ns, struct msg_queue *msq)
  237. {
  238. struct list_head *tmp;
  239. expunge_all(msq, -EIDRM);
  240. ss_wakeup(&msq->q_senders, 1);
  241. msg_rmid(ns, msq);
  242. msg_unlock(msq);
  243. tmp = msq->q_messages.next;
  244. while (tmp != &msq->q_messages) {
  245. struct msg_msg *msg = list_entry(tmp, struct msg_msg, m_list);
  246. tmp = tmp->next;
  247. atomic_dec(&msg_hdrs);
  248. free_msg(msg);
  249. }
  250. atomic_sub(msq->q_cbytes, &msg_bytes);
  251. security_msg_queue_free(msq);
  252. ipc_rcu_putref(msq);
  253. }
  254. /*
  255. * Called with msg_ids.rw_mutex and ipcp locked.
  256. */
  257. static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
  258. {
  259. struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm);
  260. return security_msg_queue_associate(msq, msgflg);
  261. }
  262. asmlinkage long sys_msgget(key_t key, int msgflg)
  263. {
  264. struct ipc_namespace *ns;
  265. struct ipc_ops msg_ops;
  266. struct ipc_params msg_params;
  267. ns = current->nsproxy->ipc_ns;
  268. msg_ops.getnew = newque;
  269. msg_ops.associate = msg_security;
  270. msg_ops.more_checks = NULL;
  271. msg_params.key = key;
  272. msg_params.flg = msgflg;
  273. return ipcget(ns, &msg_ids(ns), &msg_ops, &msg_params);
  274. }
  275. static inline unsigned long
  276. copy_msqid_to_user(void __user *buf, struct msqid64_ds *in, int version)
  277. {
  278. switch(version) {
  279. case IPC_64:
  280. return copy_to_user(buf, in, sizeof(*in));
  281. case IPC_OLD:
  282. {
  283. struct msqid_ds out;
  284. memset(&out, 0, sizeof(out));
  285. ipc64_perm_to_ipc_perm(&in->msg_perm, &out.msg_perm);
  286. out.msg_stime = in->msg_stime;
  287. out.msg_rtime = in->msg_rtime;
  288. out.msg_ctime = in->msg_ctime;
  289. if (in->msg_cbytes > USHRT_MAX)
  290. out.msg_cbytes = USHRT_MAX;
  291. else
  292. out.msg_cbytes = in->msg_cbytes;
  293. out.msg_lcbytes = in->msg_cbytes;
  294. if (in->msg_qnum > USHRT_MAX)
  295. out.msg_qnum = USHRT_MAX;
  296. else
  297. out.msg_qnum = in->msg_qnum;
  298. if (in->msg_qbytes > USHRT_MAX)
  299. out.msg_qbytes = USHRT_MAX;
  300. else
  301. out.msg_qbytes = in->msg_qbytes;
  302. out.msg_lqbytes = in->msg_qbytes;
  303. out.msg_lspid = in->msg_lspid;
  304. out.msg_lrpid = in->msg_lrpid;
  305. return copy_to_user(buf, &out, sizeof(out));
  306. }
  307. default:
  308. return -EINVAL;
  309. }
  310. }
  311. struct msq_setbuf {
  312. unsigned long qbytes;
  313. uid_t uid;
  314. gid_t gid;
  315. mode_t mode;
  316. };
  317. static inline unsigned long
  318. copy_msqid_from_user(struct msq_setbuf *out, void __user *buf, int version)
  319. {
  320. switch(version) {
  321. case IPC_64:
  322. {
  323. struct msqid64_ds tbuf;
  324. if (copy_from_user(&tbuf, buf, sizeof(tbuf)))
  325. return -EFAULT;
  326. out->qbytes = tbuf.msg_qbytes;
  327. out->uid = tbuf.msg_perm.uid;
  328. out->gid = tbuf.msg_perm.gid;
  329. out->mode = tbuf.msg_perm.mode;
  330. return 0;
  331. }
  332. case IPC_OLD:
  333. {
  334. struct msqid_ds tbuf_old;
  335. if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
  336. return -EFAULT;
  337. out->uid = tbuf_old.msg_perm.uid;
  338. out->gid = tbuf_old.msg_perm.gid;
  339. out->mode = tbuf_old.msg_perm.mode;
  340. if (tbuf_old.msg_qbytes == 0)
  341. out->qbytes = tbuf_old.msg_lqbytes;
  342. else
  343. out->qbytes = tbuf_old.msg_qbytes;
  344. return 0;
  345. }
  346. default:
  347. return -EINVAL;
  348. }
  349. }
  350. asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf)
  351. {
  352. struct kern_ipc_perm *ipcp;
  353. struct msq_setbuf uninitialized_var(setbuf);
  354. struct msg_queue *msq;
  355. int err, version;
  356. struct ipc_namespace *ns;
  357. if (msqid < 0 || cmd < 0)
  358. return -EINVAL;
  359. version = ipc_parse_version(&cmd);
  360. ns = current->nsproxy->ipc_ns;
  361. switch (cmd) {
  362. case IPC_INFO:
  363. case MSG_INFO:
  364. {
  365. struct msginfo msginfo;
  366. int max_id;
  367. if (!buf)
  368. return -EFAULT;
  369. /*
  370. * We must not return kernel stack data.
  371. * due to padding, it's not enough
  372. * to set all member fields.
  373. */
  374. err = security_msg_queue_msgctl(NULL, cmd);
  375. if (err)
  376. return err;
  377. memset(&msginfo, 0, sizeof(msginfo));
  378. msginfo.msgmni = ns->msg_ctlmni;
  379. msginfo.msgmax = ns->msg_ctlmax;
  380. msginfo.msgmnb = ns->msg_ctlmnb;
  381. msginfo.msgssz = MSGSSZ;
  382. msginfo.msgseg = MSGSEG;
  383. down_read(&msg_ids(ns).rw_mutex);
  384. if (cmd == MSG_INFO) {
  385. msginfo.msgpool = msg_ids(ns).in_use;
  386. msginfo.msgmap = atomic_read(&msg_hdrs);
  387. msginfo.msgtql = atomic_read(&msg_bytes);
  388. } else {
  389. msginfo.msgmap = MSGMAP;
  390. msginfo.msgpool = MSGPOOL;
  391. msginfo.msgtql = MSGTQL;
  392. }
  393. max_id = ipc_get_maxid(&msg_ids(ns));
  394. up_read(&msg_ids(ns).rw_mutex);
  395. if (copy_to_user(buf, &msginfo, sizeof(struct msginfo)))
  396. return -EFAULT;
  397. return (max_id < 0) ? 0 : max_id;
  398. }
  399. case MSG_STAT: /* msqid is an index rather than a msg queue id */
  400. case IPC_STAT:
  401. {
  402. struct msqid64_ds tbuf;
  403. int success_return;
  404. if (!buf)
  405. return -EFAULT;
  406. if (cmd == MSG_STAT) {
  407. msq = msg_lock(ns, msqid);
  408. if (IS_ERR(msq))
  409. return PTR_ERR(msq);
  410. success_return = msq->q_perm.id;
  411. } else {
  412. msq = msg_lock_check(ns, msqid);
  413. if (IS_ERR(msq))
  414. return PTR_ERR(msq);
  415. success_return = 0;
  416. }
  417. err = -EACCES;
  418. if (ipcperms(&msq->q_perm, S_IRUGO))
  419. goto out_unlock;
  420. err = security_msg_queue_msgctl(msq, cmd);
  421. if (err)
  422. goto out_unlock;
  423. memset(&tbuf, 0, sizeof(tbuf));
  424. kernel_to_ipc64_perm(&msq->q_perm, &tbuf.msg_perm);
  425. tbuf.msg_stime = msq->q_stime;
  426. tbuf.msg_rtime = msq->q_rtime;
  427. tbuf.msg_ctime = msq->q_ctime;
  428. tbuf.msg_cbytes = msq->q_cbytes;
  429. tbuf.msg_qnum = msq->q_qnum;
  430. tbuf.msg_qbytes = msq->q_qbytes;
  431. tbuf.msg_lspid = msq->q_lspid;
  432. tbuf.msg_lrpid = msq->q_lrpid;
  433. msg_unlock(msq);
  434. if (copy_msqid_to_user(buf, &tbuf, version))
  435. return -EFAULT;
  436. return success_return;
  437. }
  438. case IPC_SET:
  439. if (!buf)
  440. return -EFAULT;
  441. if (copy_msqid_from_user(&setbuf, buf, version))
  442. return -EFAULT;
  443. break;
  444. case IPC_RMID:
  445. break;
  446. default:
  447. return -EINVAL;
  448. }
  449. down_write(&msg_ids(ns).rw_mutex);
  450. msq = msg_lock_check_down(ns, msqid);
  451. if (IS_ERR(msq)) {
  452. err = PTR_ERR(msq);
  453. goto out_up;
  454. }
  455. ipcp = &msq->q_perm;
  456. err = audit_ipc_obj(ipcp);
  457. if (err)
  458. goto out_unlock_up;
  459. if (cmd == IPC_SET) {
  460. err = audit_ipc_set_perm(setbuf.qbytes, setbuf.uid, setbuf.gid,
  461. setbuf.mode);
  462. if (err)
  463. goto out_unlock_up;
  464. }
  465. err = -EPERM;
  466. if (current->euid != ipcp->cuid &&
  467. current->euid != ipcp->uid && !capable(CAP_SYS_ADMIN))
  468. /* We _could_ check for CAP_CHOWN above, but we don't */
  469. goto out_unlock_up;
  470. err = security_msg_queue_msgctl(msq, cmd);
  471. if (err)
  472. goto out_unlock_up;
  473. switch (cmd) {
  474. case IPC_SET:
  475. {
  476. err = -EPERM;
  477. if (setbuf.qbytes > ns->msg_ctlmnb && !capable(CAP_SYS_RESOURCE))
  478. goto out_unlock_up;
  479. msq->q_qbytes = setbuf.qbytes;
  480. ipcp->uid = setbuf.uid;
  481. ipcp->gid = setbuf.gid;
  482. ipcp->mode = (ipcp->mode & ~S_IRWXUGO) |
  483. (S_IRWXUGO & setbuf.mode);
  484. msq->q_ctime = get_seconds();
  485. /* sleeping receivers might be excluded by
  486. * stricter permissions.
  487. */
  488. expunge_all(msq, -EAGAIN);
  489. /* sleeping senders might be able to send
  490. * due to a larger queue size.
  491. */
  492. ss_wakeup(&msq->q_senders, 0);
  493. msg_unlock(msq);
  494. break;
  495. }
  496. case IPC_RMID:
  497. freeque(ns, msq);
  498. break;
  499. }
  500. err = 0;
  501. out_up:
  502. up_write(&msg_ids(ns).rw_mutex);
  503. return err;
  504. out_unlock_up:
  505. msg_unlock(msq);
  506. goto out_up;
  507. out_unlock:
  508. msg_unlock(msq);
  509. return err;
  510. }
  511. static int testmsg(struct msg_msg *msg, long type, int mode)
  512. {
  513. switch(mode)
  514. {
  515. case SEARCH_ANY:
  516. return 1;
  517. case SEARCH_LESSEQUAL:
  518. if (msg->m_type <=type)
  519. return 1;
  520. break;
  521. case SEARCH_EQUAL:
  522. if (msg->m_type == type)
  523. return 1;
  524. break;
  525. case SEARCH_NOTEQUAL:
  526. if (msg->m_type != type)
  527. return 1;
  528. break;
  529. }
  530. return 0;
  531. }
  532. static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg)
  533. {
  534. struct list_head *tmp;
  535. tmp = msq->q_receivers.next;
  536. while (tmp != &msq->q_receivers) {
  537. struct msg_receiver *msr;
  538. msr = list_entry(tmp, struct msg_receiver, r_list);
  539. tmp = tmp->next;
  540. if (testmsg(msg, msr->r_msgtype, msr->r_mode) &&
  541. !security_msg_queue_msgrcv(msq, msg, msr->r_tsk,
  542. msr->r_msgtype, msr->r_mode)) {
  543. list_del(&msr->r_list);
  544. if (msr->r_maxsize < msg->m_ts) {
  545. msr->r_msg = NULL;
  546. wake_up_process(msr->r_tsk);
  547. smp_mb();
  548. msr->r_msg = ERR_PTR(-E2BIG);
  549. } else {
  550. msr->r_msg = NULL;
  551. msq->q_lrpid = task_pid_vnr(msr->r_tsk);
  552. msq->q_rtime = get_seconds();
  553. wake_up_process(msr->r_tsk);
  554. smp_mb();
  555. msr->r_msg = msg;
  556. return 1;
  557. }
  558. }
  559. }
  560. return 0;
  561. }
  562. long do_msgsnd(int msqid, long mtype, void __user *mtext,
  563. size_t msgsz, int msgflg)
  564. {
  565. struct msg_queue *msq;
  566. struct msg_msg *msg;
  567. int err;
  568. struct ipc_namespace *ns;
  569. ns = current->nsproxy->ipc_ns;
  570. if (msgsz > ns->msg_ctlmax || (long) msgsz < 0 || msqid < 0)
  571. return -EINVAL;
  572. if (mtype < 1)
  573. return -EINVAL;
  574. msg = load_msg(mtext, msgsz);
  575. if (IS_ERR(msg))
  576. return PTR_ERR(msg);
  577. msg->m_type = mtype;
  578. msg->m_ts = msgsz;
  579. msq = msg_lock_check(ns, msqid);
  580. if (IS_ERR(msq)) {
  581. err = PTR_ERR(msq);
  582. goto out_free;
  583. }
  584. for (;;) {
  585. struct msg_sender s;
  586. err = -EACCES;
  587. if (ipcperms(&msq->q_perm, S_IWUGO))
  588. goto out_unlock_free;
  589. err = security_msg_queue_msgsnd(msq, msg, msgflg);
  590. if (err)
  591. goto out_unlock_free;
  592. if (msgsz + msq->q_cbytes <= msq->q_qbytes &&
  593. 1 + msq->q_qnum <= msq->q_qbytes) {
  594. break;
  595. }
  596. /* queue full, wait: */
  597. if (msgflg & IPC_NOWAIT) {
  598. err = -EAGAIN;
  599. goto out_unlock_free;
  600. }
  601. ss_add(msq, &s);
  602. ipc_rcu_getref(msq);
  603. msg_unlock(msq);
  604. schedule();
  605. ipc_lock_by_ptr(&msq->q_perm);
  606. ipc_rcu_putref(msq);
  607. if (msq->q_perm.deleted) {
  608. err = -EIDRM;
  609. goto out_unlock_free;
  610. }
  611. ss_del(&s);
  612. if (signal_pending(current)) {
  613. err = -ERESTARTNOHAND;
  614. goto out_unlock_free;
  615. }
  616. }
  617. msq->q_lspid = task_tgid_vnr(current);
  618. msq->q_stime = get_seconds();
  619. if (!pipelined_send(msq, msg)) {
  620. /* noone is waiting for this message, enqueue it */
  621. list_add_tail(&msg->m_list, &msq->q_messages);
  622. msq->q_cbytes += msgsz;
  623. msq->q_qnum++;
  624. atomic_add(msgsz, &msg_bytes);
  625. atomic_inc(&msg_hdrs);
  626. }
  627. err = 0;
  628. msg = NULL;
  629. out_unlock_free:
  630. msg_unlock(msq);
  631. out_free:
  632. if (msg != NULL)
  633. free_msg(msg);
  634. return err;
  635. }
  636. asmlinkage long
  637. sys_msgsnd(int msqid, struct msgbuf __user *msgp, size_t msgsz, int msgflg)
  638. {
  639. long mtype;
  640. if (get_user(mtype, &msgp->mtype))
  641. return -EFAULT;
  642. return do_msgsnd(msqid, mtype, msgp->mtext, msgsz, msgflg);
  643. }
  644. static inline int convert_mode(long *msgtyp, int msgflg)
  645. {
  646. /*
  647. * find message of correct type.
  648. * msgtyp = 0 => get first.
  649. * msgtyp > 0 => get first message of matching type.
  650. * msgtyp < 0 => get message with least type must be < abs(msgtype).
  651. */
  652. if (*msgtyp == 0)
  653. return SEARCH_ANY;
  654. if (*msgtyp < 0) {
  655. *msgtyp = -*msgtyp;
  656. return SEARCH_LESSEQUAL;
  657. }
  658. if (msgflg & MSG_EXCEPT)
  659. return SEARCH_NOTEQUAL;
  660. return SEARCH_EQUAL;
  661. }
  662. long do_msgrcv(int msqid, long *pmtype, void __user *mtext,
  663. size_t msgsz, long msgtyp, int msgflg)
  664. {
  665. struct msg_queue *msq;
  666. struct msg_msg *msg;
  667. int mode;
  668. struct ipc_namespace *ns;
  669. if (msqid < 0 || (long) msgsz < 0)
  670. return -EINVAL;
  671. mode = convert_mode(&msgtyp, msgflg);
  672. ns = current->nsproxy->ipc_ns;
  673. msq = msg_lock_check(ns, msqid);
  674. if (IS_ERR(msq))
  675. return PTR_ERR(msq);
  676. for (;;) {
  677. struct msg_receiver msr_d;
  678. struct list_head *tmp;
  679. msg = ERR_PTR(-EACCES);
  680. if (ipcperms(&msq->q_perm, S_IRUGO))
  681. goto out_unlock;
  682. msg = ERR_PTR(-EAGAIN);
  683. tmp = msq->q_messages.next;
  684. while (tmp != &msq->q_messages) {
  685. struct msg_msg *walk_msg;
  686. walk_msg = list_entry(tmp, struct msg_msg, m_list);
  687. if (testmsg(walk_msg, msgtyp, mode) &&
  688. !security_msg_queue_msgrcv(msq, walk_msg, current,
  689. msgtyp, mode)) {
  690. msg = walk_msg;
  691. if (mode == SEARCH_LESSEQUAL &&
  692. walk_msg->m_type != 1) {
  693. msg = walk_msg;
  694. msgtyp = walk_msg->m_type - 1;
  695. } else {
  696. msg = walk_msg;
  697. break;
  698. }
  699. }
  700. tmp = tmp->next;
  701. }
  702. if (!IS_ERR(msg)) {
  703. /*
  704. * Found a suitable message.
  705. * Unlink it from the queue.
  706. */
  707. if ((msgsz < msg->m_ts) && !(msgflg & MSG_NOERROR)) {
  708. msg = ERR_PTR(-E2BIG);
  709. goto out_unlock;
  710. }
  711. list_del(&msg->m_list);
  712. msq->q_qnum--;
  713. msq->q_rtime = get_seconds();
  714. msq->q_lrpid = task_tgid_vnr(current);
  715. msq->q_cbytes -= msg->m_ts;
  716. atomic_sub(msg->m_ts, &msg_bytes);
  717. atomic_dec(&msg_hdrs);
  718. ss_wakeup(&msq->q_senders, 0);
  719. msg_unlock(msq);
  720. break;
  721. }
  722. /* No message waiting. Wait for a message */
  723. if (msgflg & IPC_NOWAIT) {
  724. msg = ERR_PTR(-ENOMSG);
  725. goto out_unlock;
  726. }
  727. list_add_tail(&msr_d.r_list, &msq->q_receivers);
  728. msr_d.r_tsk = current;
  729. msr_d.r_msgtype = msgtyp;
  730. msr_d.r_mode = mode;
  731. if (msgflg & MSG_NOERROR)
  732. msr_d.r_maxsize = INT_MAX;
  733. else
  734. msr_d.r_maxsize = msgsz;
  735. msr_d.r_msg = ERR_PTR(-EAGAIN);
  736. current->state = TASK_INTERRUPTIBLE;
  737. msg_unlock(msq);
  738. schedule();
  739. /* Lockless receive, part 1:
  740. * Disable preemption. We don't hold a reference to the queue
  741. * and getting a reference would defeat the idea of a lockless
  742. * operation, thus the code relies on rcu to guarantee the
  743. * existance of msq:
  744. * Prior to destruction, expunge_all(-EIRDM) changes r_msg.
  745. * Thus if r_msg is -EAGAIN, then the queue not yet destroyed.
  746. * rcu_read_lock() prevents preemption between reading r_msg
  747. * and the spin_lock() inside ipc_lock_by_ptr().
  748. */
  749. rcu_read_lock();
  750. /* Lockless receive, part 2:
  751. * Wait until pipelined_send or expunge_all are outside of
  752. * wake_up_process(). There is a race with exit(), see
  753. * ipc/mqueue.c for the details.
  754. */
  755. msg = (struct msg_msg*)msr_d.r_msg;
  756. while (msg == NULL) {
  757. cpu_relax();
  758. msg = (struct msg_msg *)msr_d.r_msg;
  759. }
  760. /* Lockless receive, part 3:
  761. * If there is a message or an error then accept it without
  762. * locking.
  763. */
  764. if (msg != ERR_PTR(-EAGAIN)) {
  765. rcu_read_unlock();
  766. break;
  767. }
  768. /* Lockless receive, part 3:
  769. * Acquire the queue spinlock.
  770. */
  771. ipc_lock_by_ptr(&msq->q_perm);
  772. rcu_read_unlock();
  773. /* Lockless receive, part 4:
  774. * Repeat test after acquiring the spinlock.
  775. */
  776. msg = (struct msg_msg*)msr_d.r_msg;
  777. if (msg != ERR_PTR(-EAGAIN))
  778. goto out_unlock;
  779. list_del(&msr_d.r_list);
  780. if (signal_pending(current)) {
  781. msg = ERR_PTR(-ERESTARTNOHAND);
  782. out_unlock:
  783. msg_unlock(msq);
  784. break;
  785. }
  786. }
  787. if (IS_ERR(msg))
  788. return PTR_ERR(msg);
  789. msgsz = (msgsz > msg->m_ts) ? msg->m_ts : msgsz;
  790. *pmtype = msg->m_type;
  791. if (store_msg(mtext, msg, msgsz))
  792. msgsz = -EFAULT;
  793. free_msg(msg);
  794. return msgsz;
  795. }
  796. asmlinkage long sys_msgrcv(int msqid, struct msgbuf __user *msgp, size_t msgsz,
  797. long msgtyp, int msgflg)
  798. {
  799. long err, mtype;
  800. err = do_msgrcv(msqid, &mtype, msgp->mtext, msgsz, msgtyp, msgflg);
  801. if (err < 0)
  802. goto out;
  803. if (put_user(mtype, &msgp->mtype))
  804. err = -EFAULT;
  805. out:
  806. return err;
  807. }
  808. #ifdef CONFIG_PROC_FS
  809. static int sysvipc_msg_proc_show(struct seq_file *s, void *it)
  810. {
  811. struct msg_queue *msq = it;
  812. return seq_printf(s,
  813. "%10d %10d %4o %10lu %10lu %5u %5u %5u %5u %5u %5u %10lu %10lu %10lu\n",
  814. msq->q_perm.key,
  815. msq->q_perm.id,
  816. msq->q_perm.mode,
  817. msq->q_cbytes,
  818. msq->q_qnum,
  819. msq->q_lspid,
  820. msq->q_lrpid,
  821. msq->q_perm.uid,
  822. msq->q_perm.gid,
  823. msq->q_perm.cuid,
  824. msq->q_perm.cgid,
  825. msq->q_stime,
  826. msq->q_rtime,
  827. msq->q_ctime);
  828. }
  829. #endif