mqueue.c 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452
  1. /*
  2. * POSIX message queues filesystem for Linux.
  3. *
  4. * Copyright (C) 2003,2004 Krzysztof Benedyczak (golbi@mat.uni.torun.pl)
  5. * Michal Wronski (michal.wronski@gmail.com)
  6. *
  7. * Spinlocks: Mohamed Abbas (abbas.mohamed@intel.com)
  8. * Lockless receive & send, fd based notify:
  9. * Manfred Spraul (manfred@colorfullife.com)
  10. *
  11. * Audit: George Wilson (ltcgcw@us.ibm.com)
  12. *
  13. * This file is released under the GPL.
  14. */
  15. #include <linux/capability.h>
  16. #include <linux/init.h>
  17. #include <linux/pagemap.h>
  18. #include <linux/file.h>
  19. #include <linux/mount.h>
  20. #include <linux/namei.h>
  21. #include <linux/sysctl.h>
  22. #include <linux/poll.h>
  23. #include <linux/mqueue.h>
  24. #include <linux/msg.h>
  25. #include <linux/skbuff.h>
  26. #include <linux/vmalloc.h>
  27. #include <linux/netlink.h>
  28. #include <linux/syscalls.h>
  29. #include <linux/audit.h>
  30. #include <linux/signal.h>
  31. #include <linux/mutex.h>
  32. #include <linux/nsproxy.h>
  33. #include <linux/pid.h>
  34. #include <linux/ipc_namespace.h>
  35. #include <linux/user_namespace.h>
  36. #include <linux/slab.h>
  37. #include <net/sock.h>
  38. #include "util.h"
  39. #define MQUEUE_MAGIC 0x19800202
  40. #define DIRENT_SIZE 20
  41. #define FILENT_SIZE 80
  42. #define SEND 0
  43. #define RECV 1
  44. #define STATE_NONE 0
  45. #define STATE_PENDING 1
  46. #define STATE_READY 2
  47. struct posix_msg_tree_node {
  48. struct rb_node rb_node;
  49. struct list_head msg_list;
  50. int priority;
  51. };
  52. struct ext_wait_queue { /* queue of sleeping tasks */
  53. struct task_struct *task;
  54. struct list_head list;
  55. struct msg_msg *msg; /* ptr of loaded message */
  56. int state; /* one of STATE_* values */
  57. };
  58. struct mqueue_inode_info {
  59. spinlock_t lock;
  60. struct inode vfs_inode;
  61. wait_queue_head_t wait_q;
  62. struct rb_root msg_tree;
  63. struct posix_msg_tree_node *node_cache;
  64. struct mq_attr attr;
  65. struct sigevent notify;
  66. struct pid* notify_owner;
  67. struct user_namespace *notify_user_ns;
  68. struct user_struct *user; /* user who created, for accounting */
  69. struct sock *notify_sock;
  70. struct sk_buff *notify_cookie;
  71. /* for tasks waiting for free space and messages, respectively */
  72. struct ext_wait_queue e_wait_q[2];
  73. unsigned long qsize; /* size of queue in memory (sum of all msgs) */
  74. };
  75. static const struct inode_operations mqueue_dir_inode_operations;
  76. static const struct file_operations mqueue_file_operations;
  77. static const struct super_operations mqueue_super_ops;
  78. static void remove_notification(struct mqueue_inode_info *info);
  79. static struct kmem_cache *mqueue_inode_cachep;
  80. static struct ctl_table_header * mq_sysctl_table;
  81. static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode)
  82. {
  83. return container_of(inode, struct mqueue_inode_info, vfs_inode);
  84. }
  85. /*
  86. * This routine should be called with the mq_lock held.
  87. */
  88. static inline struct ipc_namespace *__get_ns_from_inode(struct inode *inode)
  89. {
  90. return get_ipc_ns(inode->i_sb->s_fs_info);
  91. }
  92. static struct ipc_namespace *get_ns_from_inode(struct inode *inode)
  93. {
  94. struct ipc_namespace *ns;
  95. spin_lock(&mq_lock);
  96. ns = __get_ns_from_inode(inode);
  97. spin_unlock(&mq_lock);
  98. return ns;
  99. }
  100. /* Auxiliary functions to manipulate messages' list */
  101. static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info)
  102. {
  103. struct rb_node **p, *parent = NULL;
  104. struct posix_msg_tree_node *leaf;
  105. p = &info->msg_tree.rb_node;
  106. while (*p) {
  107. parent = *p;
  108. leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node);
  109. if (likely(leaf->priority == msg->m_type))
  110. goto insert_msg;
  111. else if (msg->m_type < leaf->priority)
  112. p = &(*p)->rb_left;
  113. else
  114. p = &(*p)->rb_right;
  115. }
  116. if (info->node_cache) {
  117. leaf = info->node_cache;
  118. info->node_cache = NULL;
  119. } else {
  120. leaf = kmalloc(sizeof(*leaf), GFP_ATOMIC);
  121. if (!leaf)
  122. return -ENOMEM;
  123. rb_init_node(&leaf->rb_node);
  124. INIT_LIST_HEAD(&leaf->msg_list);
  125. info->qsize += sizeof(*leaf);
  126. }
  127. leaf->priority = msg->m_type;
  128. rb_link_node(&leaf->rb_node, parent, p);
  129. rb_insert_color(&leaf->rb_node, &info->msg_tree);
  130. insert_msg:
  131. info->attr.mq_curmsgs++;
  132. info->qsize += msg->m_ts;
  133. list_add_tail(&msg->m_list, &leaf->msg_list);
  134. return 0;
  135. }
  136. static inline struct msg_msg *msg_get(struct mqueue_inode_info *info)
  137. {
  138. struct rb_node **p, *parent = NULL;
  139. struct posix_msg_tree_node *leaf;
  140. struct msg_msg *msg;
  141. try_again:
  142. p = &info->msg_tree.rb_node;
  143. while (*p) {
  144. parent = *p;
  145. /*
  146. * During insert, low priorities go to the left and high to the
  147. * right. On receive, we want the highest priorities first, so
  148. * walk all the way to the right.
  149. */
  150. p = &(*p)->rb_right;
  151. }
  152. if (!parent) {
  153. if (info->attr.mq_curmsgs) {
  154. pr_warn_once("Inconsistency in POSIX message queue, "
  155. "no tree element, but supposedly messages "
  156. "should exist!\n");
  157. info->attr.mq_curmsgs = 0;
  158. }
  159. return NULL;
  160. }
  161. leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node);
  162. if (unlikely(list_empty(&leaf->msg_list))) {
  163. pr_warn_once("Inconsistency in POSIX message queue, "
  164. "empty leaf node but we haven't implemented "
  165. "lazy leaf delete!\n");
  166. rb_erase(&leaf->rb_node, &info->msg_tree);
  167. if (info->node_cache) {
  168. info->qsize -= sizeof(*leaf);
  169. kfree(leaf);
  170. } else {
  171. info->node_cache = leaf;
  172. }
  173. goto try_again;
  174. } else {
  175. msg = list_first_entry(&leaf->msg_list,
  176. struct msg_msg, m_list);
  177. list_del(&msg->m_list);
  178. if (list_empty(&leaf->msg_list)) {
  179. rb_erase(&leaf->rb_node, &info->msg_tree);
  180. if (info->node_cache) {
  181. info->qsize -= sizeof(*leaf);
  182. kfree(leaf);
  183. } else {
  184. info->node_cache = leaf;
  185. }
  186. }
  187. }
  188. info->attr.mq_curmsgs--;
  189. info->qsize -= msg->m_ts;
  190. return msg;
  191. }
  192. static struct inode *mqueue_get_inode(struct super_block *sb,
  193. struct ipc_namespace *ipc_ns, umode_t mode,
  194. struct mq_attr *attr)
  195. {
  196. struct user_struct *u = current_user();
  197. struct inode *inode;
  198. int ret = -ENOMEM;
  199. inode = new_inode(sb);
  200. if (!inode)
  201. goto err;
  202. inode->i_ino = get_next_ino();
  203. inode->i_mode = mode;
  204. inode->i_uid = current_fsuid();
  205. inode->i_gid = current_fsgid();
  206. inode->i_mtime = inode->i_ctime = inode->i_atime = CURRENT_TIME;
  207. if (S_ISREG(mode)) {
  208. struct mqueue_inode_info *info;
  209. unsigned long mq_bytes, mq_treesize;
  210. inode->i_fop = &mqueue_file_operations;
  211. inode->i_size = FILENT_SIZE;
  212. /* mqueue specific info */
  213. info = MQUEUE_I(inode);
  214. spin_lock_init(&info->lock);
  215. init_waitqueue_head(&info->wait_q);
  216. INIT_LIST_HEAD(&info->e_wait_q[0].list);
  217. INIT_LIST_HEAD(&info->e_wait_q[1].list);
  218. info->notify_owner = NULL;
  219. info->notify_user_ns = NULL;
  220. info->qsize = 0;
  221. info->user = NULL; /* set when all is ok */
  222. info->msg_tree = RB_ROOT;
  223. info->node_cache = NULL;
  224. memset(&info->attr, 0, sizeof(info->attr));
  225. info->attr.mq_maxmsg = min(ipc_ns->mq_msg_max,
  226. ipc_ns->mq_msg_default);
  227. info->attr.mq_msgsize = min(ipc_ns->mq_msgsize_max,
  228. ipc_ns->mq_msgsize_default);
  229. if (attr) {
  230. info->attr.mq_maxmsg = attr->mq_maxmsg;
  231. info->attr.mq_msgsize = attr->mq_msgsize;
  232. }
  233. /*
  234. * We used to allocate a static array of pointers and account
  235. * the size of that array as well as one msg_msg struct per
  236. * possible message into the queue size. That's no longer
  237. * accurate as the queue is now an rbtree and will grow and
  238. * shrink depending on usage patterns. We can, however, still
  239. * account one msg_msg struct per message, but the nodes are
  240. * allocated depending on priority usage, and most programs
  241. * only use one, or a handful, of priorities. However, since
  242. * this is pinned memory, we need to assume worst case, so
  243. * that means the min(mq_maxmsg, max_priorities) * struct
  244. * posix_msg_tree_node.
  245. */
  246. mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
  247. min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
  248. sizeof(struct posix_msg_tree_node);
  249. mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
  250. info->attr.mq_msgsize);
  251. spin_lock(&mq_lock);
  252. if (u->mq_bytes + mq_bytes < u->mq_bytes ||
  253. u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
  254. spin_unlock(&mq_lock);
  255. /* mqueue_evict_inode() releases info->messages */
  256. ret = -EMFILE;
  257. goto out_inode;
  258. }
  259. u->mq_bytes += mq_bytes;
  260. spin_unlock(&mq_lock);
  261. /* all is ok */
  262. info->user = get_uid(u);
  263. } else if (S_ISDIR(mode)) {
  264. inc_nlink(inode);
  265. /* Some things misbehave if size == 0 on a directory */
  266. inode->i_size = 2 * DIRENT_SIZE;
  267. inode->i_op = &mqueue_dir_inode_operations;
  268. inode->i_fop = &simple_dir_operations;
  269. }
  270. return inode;
  271. out_inode:
  272. iput(inode);
  273. err:
  274. return ERR_PTR(ret);
  275. }
  276. static int mqueue_fill_super(struct super_block *sb, void *data, int silent)
  277. {
  278. struct inode *inode;
  279. struct ipc_namespace *ns = data;
  280. sb->s_blocksize = PAGE_CACHE_SIZE;
  281. sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
  282. sb->s_magic = MQUEUE_MAGIC;
  283. sb->s_op = &mqueue_super_ops;
  284. inode = mqueue_get_inode(sb, ns, S_IFDIR | S_ISVTX | S_IRWXUGO, NULL);
  285. if (IS_ERR(inode))
  286. return PTR_ERR(inode);
  287. sb->s_root = d_make_root(inode);
  288. if (!sb->s_root)
  289. return -ENOMEM;
  290. return 0;
  291. }
  292. static struct dentry *mqueue_mount(struct file_system_type *fs_type,
  293. int flags, const char *dev_name,
  294. void *data)
  295. {
  296. if (!(flags & MS_KERNMOUNT))
  297. data = current->nsproxy->ipc_ns;
  298. return mount_ns(fs_type, flags, data, mqueue_fill_super);
  299. }
  300. static void init_once(void *foo)
  301. {
  302. struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo;
  303. inode_init_once(&p->vfs_inode);
  304. }
  305. static struct inode *mqueue_alloc_inode(struct super_block *sb)
  306. {
  307. struct mqueue_inode_info *ei;
  308. ei = kmem_cache_alloc(mqueue_inode_cachep, GFP_KERNEL);
  309. if (!ei)
  310. return NULL;
  311. return &ei->vfs_inode;
  312. }
  313. static void mqueue_i_callback(struct rcu_head *head)
  314. {
  315. struct inode *inode = container_of(head, struct inode, i_rcu);
  316. kmem_cache_free(mqueue_inode_cachep, MQUEUE_I(inode));
  317. }
  318. static void mqueue_destroy_inode(struct inode *inode)
  319. {
  320. call_rcu(&inode->i_rcu, mqueue_i_callback);
  321. }
  322. static void mqueue_evict_inode(struct inode *inode)
  323. {
  324. struct mqueue_inode_info *info;
  325. struct user_struct *user;
  326. unsigned long mq_bytes, mq_treesize;
  327. struct ipc_namespace *ipc_ns;
  328. struct msg_msg *msg;
  329. clear_inode(inode);
  330. if (S_ISDIR(inode->i_mode))
  331. return;
  332. ipc_ns = get_ns_from_inode(inode);
  333. info = MQUEUE_I(inode);
  334. spin_lock(&info->lock);
  335. while ((msg = msg_get(info)) != NULL)
  336. free_msg(msg);
  337. kfree(info->node_cache);
  338. spin_unlock(&info->lock);
  339. /* Total amount of bytes accounted for the mqueue */
  340. mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
  341. min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
  342. sizeof(struct posix_msg_tree_node);
  343. mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
  344. info->attr.mq_msgsize);
  345. user = info->user;
  346. if (user) {
  347. spin_lock(&mq_lock);
  348. user->mq_bytes -= mq_bytes;
  349. /*
  350. * get_ns_from_inode() ensures that the
  351. * (ipc_ns = sb->s_fs_info) is either a valid ipc_ns
  352. * to which we now hold a reference, or it is NULL.
  353. * We can't put it here under mq_lock, though.
  354. */
  355. if (ipc_ns)
  356. ipc_ns->mq_queues_count--;
  357. spin_unlock(&mq_lock);
  358. free_uid(user);
  359. }
  360. if (ipc_ns)
  361. put_ipc_ns(ipc_ns);
  362. }
  363. static int mqueue_create(struct inode *dir, struct dentry *dentry,
  364. umode_t mode, bool excl)
  365. {
  366. struct inode *inode;
  367. struct mq_attr *attr = dentry->d_fsdata;
  368. int error;
  369. struct ipc_namespace *ipc_ns;
  370. spin_lock(&mq_lock);
  371. ipc_ns = __get_ns_from_inode(dir);
  372. if (!ipc_ns) {
  373. error = -EACCES;
  374. goto out_unlock;
  375. }
  376. if (ipc_ns->mq_queues_count >= HARD_QUEUESMAX ||
  377. (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max &&
  378. !capable(CAP_SYS_RESOURCE))) {
  379. error = -ENOSPC;
  380. goto out_unlock;
  381. }
  382. ipc_ns->mq_queues_count++;
  383. spin_unlock(&mq_lock);
  384. inode = mqueue_get_inode(dir->i_sb, ipc_ns, mode, attr);
  385. if (IS_ERR(inode)) {
  386. error = PTR_ERR(inode);
  387. spin_lock(&mq_lock);
  388. ipc_ns->mq_queues_count--;
  389. goto out_unlock;
  390. }
  391. put_ipc_ns(ipc_ns);
  392. dir->i_size += DIRENT_SIZE;
  393. dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME;
  394. d_instantiate(dentry, inode);
  395. dget(dentry);
  396. return 0;
  397. out_unlock:
  398. spin_unlock(&mq_lock);
  399. if (ipc_ns)
  400. put_ipc_ns(ipc_ns);
  401. return error;
  402. }
  403. static int mqueue_unlink(struct inode *dir, struct dentry *dentry)
  404. {
  405. struct inode *inode = dentry->d_inode;
  406. dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME;
  407. dir->i_size -= DIRENT_SIZE;
  408. drop_nlink(inode);
  409. dput(dentry);
  410. return 0;
  411. }
  412. /*
  413. * This is routine for system read from queue file.
  414. * To avoid mess with doing here some sort of mq_receive we allow
  415. * to read only queue size & notification info (the only values
  416. * that are interesting from user point of view and aren't accessible
  417. * through std routines)
  418. */
  419. static ssize_t mqueue_read_file(struct file *filp, char __user *u_data,
  420. size_t count, loff_t *off)
  421. {
  422. struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode);
  423. char buffer[FILENT_SIZE];
  424. ssize_t ret;
  425. spin_lock(&info->lock);
  426. snprintf(buffer, sizeof(buffer),
  427. "QSIZE:%-10lu NOTIFY:%-5d SIGNO:%-5d NOTIFY_PID:%-6d\n",
  428. info->qsize,
  429. info->notify_owner ? info->notify.sigev_notify : 0,
  430. (info->notify_owner &&
  431. info->notify.sigev_notify == SIGEV_SIGNAL) ?
  432. info->notify.sigev_signo : 0,
  433. pid_vnr(info->notify_owner));
  434. spin_unlock(&info->lock);
  435. buffer[sizeof(buffer)-1] = '\0';
  436. ret = simple_read_from_buffer(u_data, count, off, buffer,
  437. strlen(buffer));
  438. if (ret <= 0)
  439. return ret;
  440. filp->f_path.dentry->d_inode->i_atime = filp->f_path.dentry->d_inode->i_ctime = CURRENT_TIME;
  441. return ret;
  442. }
  443. static int mqueue_flush_file(struct file *filp, fl_owner_t id)
  444. {
  445. struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode);
  446. spin_lock(&info->lock);
  447. if (task_tgid(current) == info->notify_owner)
  448. remove_notification(info);
  449. spin_unlock(&info->lock);
  450. return 0;
  451. }
  452. static unsigned int mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab)
  453. {
  454. struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode);
  455. int retval = 0;
  456. poll_wait(filp, &info->wait_q, poll_tab);
  457. spin_lock(&info->lock);
  458. if (info->attr.mq_curmsgs)
  459. retval = POLLIN | POLLRDNORM;
  460. if (info->attr.mq_curmsgs < info->attr.mq_maxmsg)
  461. retval |= POLLOUT | POLLWRNORM;
  462. spin_unlock(&info->lock);
  463. return retval;
  464. }
  465. /* Adds current to info->e_wait_q[sr] before element with smaller prio */
  466. static void wq_add(struct mqueue_inode_info *info, int sr,
  467. struct ext_wait_queue *ewp)
  468. {
  469. struct ext_wait_queue *walk;
  470. ewp->task = current;
  471. list_for_each_entry(walk, &info->e_wait_q[sr].list, list) {
  472. if (walk->task->static_prio <= current->static_prio) {
  473. list_add_tail(&ewp->list, &walk->list);
  474. return;
  475. }
  476. }
  477. list_add_tail(&ewp->list, &info->e_wait_q[sr].list);
  478. }
  479. /*
  480. * Puts current task to sleep. Caller must hold queue lock. After return
  481. * lock isn't held.
  482. * sr: SEND or RECV
  483. */
  484. static int wq_sleep(struct mqueue_inode_info *info, int sr,
  485. ktime_t *timeout, struct ext_wait_queue *ewp)
  486. {
  487. int retval;
  488. signed long time;
  489. wq_add(info, sr, ewp);
  490. for (;;) {
  491. set_current_state(TASK_INTERRUPTIBLE);
  492. spin_unlock(&info->lock);
  493. time = schedule_hrtimeout_range_clock(timeout, 0,
  494. HRTIMER_MODE_ABS, CLOCK_REALTIME);
  495. while (ewp->state == STATE_PENDING)
  496. cpu_relax();
  497. if (ewp->state == STATE_READY) {
  498. retval = 0;
  499. goto out;
  500. }
  501. spin_lock(&info->lock);
  502. if (ewp->state == STATE_READY) {
  503. retval = 0;
  504. goto out_unlock;
  505. }
  506. if (signal_pending(current)) {
  507. retval = -ERESTARTSYS;
  508. break;
  509. }
  510. if (time == 0) {
  511. retval = -ETIMEDOUT;
  512. break;
  513. }
  514. }
  515. list_del(&ewp->list);
  516. out_unlock:
  517. spin_unlock(&info->lock);
  518. out:
  519. return retval;
  520. }
  521. /*
  522. * Returns waiting task that should be serviced first or NULL if none exists
  523. */
  524. static struct ext_wait_queue *wq_get_first_waiter(
  525. struct mqueue_inode_info *info, int sr)
  526. {
  527. struct list_head *ptr;
  528. ptr = info->e_wait_q[sr].list.prev;
  529. if (ptr == &info->e_wait_q[sr].list)
  530. return NULL;
  531. return list_entry(ptr, struct ext_wait_queue, list);
  532. }
  533. static inline void set_cookie(struct sk_buff *skb, char code)
  534. {
  535. ((char*)skb->data)[NOTIFY_COOKIE_LEN-1] = code;
  536. }
  537. /*
  538. * The next function is only to split too long sys_mq_timedsend
  539. */
  540. static void __do_notify(struct mqueue_inode_info *info)
  541. {
  542. /* notification
  543. * invoked when there is registered process and there isn't process
  544. * waiting synchronously for message AND state of queue changed from
  545. * empty to not empty. Here we are sure that no one is waiting
  546. * synchronously. */
  547. if (info->notify_owner &&
  548. info->attr.mq_curmsgs == 1) {
  549. struct siginfo sig_i;
  550. switch (info->notify.sigev_notify) {
  551. case SIGEV_NONE:
  552. break;
  553. case SIGEV_SIGNAL:
  554. /* sends signal */
  555. sig_i.si_signo = info->notify.sigev_signo;
  556. sig_i.si_errno = 0;
  557. sig_i.si_code = SI_MESGQ;
  558. sig_i.si_value = info->notify.sigev_value;
  559. /* map current pid/uid into info->owner's namespaces */
  560. rcu_read_lock();
  561. sig_i.si_pid = task_tgid_nr_ns(current,
  562. ns_of_pid(info->notify_owner));
  563. sig_i.si_uid = from_kuid_munged(info->notify_user_ns, current_uid());
  564. rcu_read_unlock();
  565. kill_pid_info(info->notify.sigev_signo,
  566. &sig_i, info->notify_owner);
  567. break;
  568. case SIGEV_THREAD:
  569. set_cookie(info->notify_cookie, NOTIFY_WOKENUP);
  570. netlink_sendskb(info->notify_sock, info->notify_cookie);
  571. break;
  572. }
  573. /* after notification unregisters process */
  574. put_pid(info->notify_owner);
  575. put_user_ns(info->notify_user_ns);
  576. info->notify_owner = NULL;
  577. info->notify_user_ns = NULL;
  578. }
  579. wake_up(&info->wait_q);
  580. }
  581. static int prepare_timeout(const struct timespec __user *u_abs_timeout,
  582. ktime_t *expires, struct timespec *ts)
  583. {
  584. if (copy_from_user(ts, u_abs_timeout, sizeof(struct timespec)))
  585. return -EFAULT;
  586. if (!timespec_valid(ts))
  587. return -EINVAL;
  588. *expires = timespec_to_ktime(*ts);
  589. return 0;
  590. }
  591. static void remove_notification(struct mqueue_inode_info *info)
  592. {
  593. if (info->notify_owner != NULL &&
  594. info->notify.sigev_notify == SIGEV_THREAD) {
  595. set_cookie(info->notify_cookie, NOTIFY_REMOVED);
  596. netlink_sendskb(info->notify_sock, info->notify_cookie);
  597. }
  598. put_pid(info->notify_owner);
  599. put_user_ns(info->notify_user_ns);
  600. info->notify_owner = NULL;
  601. info->notify_user_ns = NULL;
  602. }
  603. static int mq_attr_ok(struct ipc_namespace *ipc_ns, struct mq_attr *attr)
  604. {
  605. int mq_treesize;
  606. unsigned long total_size;
  607. if (attr->mq_maxmsg <= 0 || attr->mq_msgsize <= 0)
  608. return -EINVAL;
  609. if (capable(CAP_SYS_RESOURCE)) {
  610. if (attr->mq_maxmsg > HARD_MSGMAX ||
  611. attr->mq_msgsize > HARD_MSGSIZEMAX)
  612. return -EINVAL;
  613. } else {
  614. if (attr->mq_maxmsg > ipc_ns->mq_msg_max ||
  615. attr->mq_msgsize > ipc_ns->mq_msgsize_max)
  616. return -EINVAL;
  617. }
  618. /* check for overflow */
  619. if (attr->mq_msgsize > ULONG_MAX/attr->mq_maxmsg)
  620. return -EOVERFLOW;
  621. mq_treesize = attr->mq_maxmsg * sizeof(struct msg_msg) +
  622. min_t(unsigned int, attr->mq_maxmsg, MQ_PRIO_MAX) *
  623. sizeof(struct posix_msg_tree_node);
  624. total_size = attr->mq_maxmsg * attr->mq_msgsize;
  625. if (total_size + mq_treesize < total_size)
  626. return -EOVERFLOW;
  627. return 0;
  628. }
  629. /*
  630. * Invoked when creating a new queue via sys_mq_open
  631. */
  632. static struct file *do_create(struct ipc_namespace *ipc_ns, struct inode *dir,
  633. struct path *path, int oflag, umode_t mode,
  634. struct mq_attr *attr)
  635. {
  636. const struct cred *cred = current_cred();
  637. int ret;
  638. if (attr) {
  639. ret = mq_attr_ok(ipc_ns, attr);
  640. if (ret)
  641. return ERR_PTR(ret);
  642. /* store for use during create */
  643. path->dentry->d_fsdata = attr;
  644. } else {
  645. struct mq_attr def_attr;
  646. def_attr.mq_maxmsg = min(ipc_ns->mq_msg_max,
  647. ipc_ns->mq_msg_default);
  648. def_attr.mq_msgsize = min(ipc_ns->mq_msgsize_max,
  649. ipc_ns->mq_msgsize_default);
  650. ret = mq_attr_ok(ipc_ns, &def_attr);
  651. if (ret)
  652. return ERR_PTR(ret);
  653. }
  654. mode &= ~current_umask();
  655. ret = vfs_create(dir, path->dentry, mode, true);
  656. path->dentry->d_fsdata = NULL;
  657. if (ret)
  658. return ERR_PTR(ret);
  659. return dentry_open(path, oflag, cred);
  660. }
  661. /* Opens existing queue */
  662. static struct file *do_open(struct path *path, int oflag)
  663. {
  664. static const int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE,
  665. MAY_READ | MAY_WRITE };
  666. int acc;
  667. if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY))
  668. return ERR_PTR(-EINVAL);
  669. acc = oflag2acc[oflag & O_ACCMODE];
  670. if (inode_permission(path->dentry->d_inode, acc))
  671. return ERR_PTR(-EACCES);
  672. return dentry_open(path, oflag, current_cred());
  673. }
  674. SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode,
  675. struct mq_attr __user *, u_attr)
  676. {
  677. struct path path;
  678. struct file *filp;
  679. char *name;
  680. struct mq_attr attr;
  681. int fd, error;
  682. struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
  683. struct vfsmount *mnt = ipc_ns->mq_mnt;
  684. struct dentry *root = mnt->mnt_root;
  685. int ro;
  686. if (u_attr && copy_from_user(&attr, u_attr, sizeof(struct mq_attr)))
  687. return -EFAULT;
  688. audit_mq_open(oflag, mode, u_attr ? &attr : NULL);
  689. if (IS_ERR(name = getname(u_name)))
  690. return PTR_ERR(name);
  691. fd = get_unused_fd_flags(O_CLOEXEC);
  692. if (fd < 0)
  693. goto out_putname;
  694. ro = mnt_want_write(mnt); /* we'll drop it in any case */
  695. error = 0;
  696. mutex_lock(&root->d_inode->i_mutex);
  697. path.dentry = lookup_one_len(name, root, strlen(name));
  698. if (IS_ERR(path.dentry)) {
  699. error = PTR_ERR(path.dentry);
  700. goto out_putfd;
  701. }
  702. path.mnt = mntget(mnt);
  703. if (oflag & O_CREAT) {
  704. if (path.dentry->d_inode) { /* entry already exists */
  705. audit_inode(name, path.dentry);
  706. if (oflag & O_EXCL) {
  707. error = -EEXIST;
  708. goto out;
  709. }
  710. filp = do_open(&path, oflag);
  711. } else {
  712. if (ro) {
  713. error = ro;
  714. goto out;
  715. }
  716. filp = do_create(ipc_ns, root->d_inode,
  717. &path, oflag, mode,
  718. u_attr ? &attr : NULL);
  719. }
  720. } else {
  721. if (!path.dentry->d_inode) {
  722. error = -ENOENT;
  723. goto out;
  724. }
  725. audit_inode(name, path.dentry);
  726. filp = do_open(&path, oflag);
  727. }
  728. if (!IS_ERR(filp))
  729. fd_install(fd, filp);
  730. else
  731. error = PTR_ERR(filp);
  732. out:
  733. path_put(&path);
  734. out_putfd:
  735. if (error) {
  736. put_unused_fd(fd);
  737. fd = error;
  738. }
  739. mutex_unlock(&root->d_inode->i_mutex);
  740. mnt_drop_write(mnt);
  741. out_putname:
  742. putname(name);
  743. return fd;
  744. }
  745. SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name)
  746. {
  747. int err;
  748. char *name;
  749. struct dentry *dentry;
  750. struct inode *inode = NULL;
  751. struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
  752. struct vfsmount *mnt = ipc_ns->mq_mnt;
  753. name = getname(u_name);
  754. if (IS_ERR(name))
  755. return PTR_ERR(name);
  756. err = mnt_want_write(mnt);
  757. if (err)
  758. goto out_name;
  759. mutex_lock_nested(&mnt->mnt_root->d_inode->i_mutex, I_MUTEX_PARENT);
  760. dentry = lookup_one_len(name, mnt->mnt_root, strlen(name));
  761. if (IS_ERR(dentry)) {
  762. err = PTR_ERR(dentry);
  763. goto out_unlock;
  764. }
  765. inode = dentry->d_inode;
  766. if (!inode) {
  767. err = -ENOENT;
  768. } else {
  769. ihold(inode);
  770. err = vfs_unlink(dentry->d_parent->d_inode, dentry);
  771. }
  772. dput(dentry);
  773. out_unlock:
  774. mutex_unlock(&mnt->mnt_root->d_inode->i_mutex);
  775. if (inode)
  776. iput(inode);
  777. mnt_drop_write(mnt);
  778. out_name:
  779. putname(name);
  780. return err;
  781. }
  782. /* Pipelined send and receive functions.
  783. *
  784. * If a receiver finds no waiting message, then it registers itself in the
  785. * list of waiting receivers. A sender checks that list before adding the new
  786. * message into the message array. If there is a waiting receiver, then it
  787. * bypasses the message array and directly hands the message over to the
  788. * receiver.
  789. * The receiver accepts the message and returns without grabbing the queue
  790. * spinlock. Therefore an intermediate STATE_PENDING state and memory barriers
  791. * are necessary. The same algorithm is used for sysv semaphores, see
  792. * ipc/sem.c for more details.
  793. *
  794. * The same algorithm is used for senders.
  795. */
  796. /* pipelined_send() - send a message directly to the task waiting in
  797. * sys_mq_timedreceive() (without inserting message into a queue).
  798. */
  799. static inline void pipelined_send(struct mqueue_inode_info *info,
  800. struct msg_msg *message,
  801. struct ext_wait_queue *receiver)
  802. {
  803. receiver->msg = message;
  804. list_del(&receiver->list);
  805. receiver->state = STATE_PENDING;
  806. wake_up_process(receiver->task);
  807. smp_wmb();
  808. receiver->state = STATE_READY;
  809. }
  810. /* pipelined_receive() - if there is task waiting in sys_mq_timedsend()
  811. * gets its message and put to the queue (we have one free place for sure). */
  812. static inline void pipelined_receive(struct mqueue_inode_info *info)
  813. {
  814. struct ext_wait_queue *sender = wq_get_first_waiter(info, SEND);
  815. if (!sender) {
  816. /* for poll */
  817. wake_up_interruptible(&info->wait_q);
  818. return;
  819. }
  820. if (msg_insert(sender->msg, info))
  821. return;
  822. list_del(&sender->list);
  823. sender->state = STATE_PENDING;
  824. wake_up_process(sender->task);
  825. smp_wmb();
  826. sender->state = STATE_READY;
  827. }
  828. SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
  829. size_t, msg_len, unsigned int, msg_prio,
  830. const struct timespec __user *, u_abs_timeout)
  831. {
  832. struct file *filp;
  833. struct inode *inode;
  834. struct ext_wait_queue wait;
  835. struct ext_wait_queue *receiver;
  836. struct msg_msg *msg_ptr;
  837. struct mqueue_inode_info *info;
  838. ktime_t expires, *timeout = NULL;
  839. struct timespec ts;
  840. struct posix_msg_tree_node *new_leaf = NULL;
  841. int ret = 0;
  842. if (u_abs_timeout) {
  843. int res = prepare_timeout(u_abs_timeout, &expires, &ts);
  844. if (res)
  845. return res;
  846. timeout = &expires;
  847. }
  848. if (unlikely(msg_prio >= (unsigned long) MQ_PRIO_MAX))
  849. return -EINVAL;
  850. audit_mq_sendrecv(mqdes, msg_len, msg_prio, timeout ? &ts : NULL);
  851. filp = fget(mqdes);
  852. if (unlikely(!filp)) {
  853. ret = -EBADF;
  854. goto out;
  855. }
  856. inode = filp->f_path.dentry->d_inode;
  857. if (unlikely(filp->f_op != &mqueue_file_operations)) {
  858. ret = -EBADF;
  859. goto out_fput;
  860. }
  861. info = MQUEUE_I(inode);
  862. audit_inode(NULL, filp->f_path.dentry);
  863. if (unlikely(!(filp->f_mode & FMODE_WRITE))) {
  864. ret = -EBADF;
  865. goto out_fput;
  866. }
  867. if (unlikely(msg_len > info->attr.mq_msgsize)) {
  868. ret = -EMSGSIZE;
  869. goto out_fput;
  870. }
  871. /* First try to allocate memory, before doing anything with
  872. * existing queues. */
  873. msg_ptr = load_msg(u_msg_ptr, msg_len);
  874. if (IS_ERR(msg_ptr)) {
  875. ret = PTR_ERR(msg_ptr);
  876. goto out_fput;
  877. }
  878. msg_ptr->m_ts = msg_len;
  879. msg_ptr->m_type = msg_prio;
  880. /*
  881. * msg_insert really wants us to have a valid, spare node struct so
  882. * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will
  883. * fall back to that if necessary.
  884. */
  885. if (!info->node_cache)
  886. new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL);
  887. spin_lock(&info->lock);
  888. if (!info->node_cache && new_leaf) {
  889. /* Save our speculative allocation into the cache */
  890. rb_init_node(&new_leaf->rb_node);
  891. INIT_LIST_HEAD(&new_leaf->msg_list);
  892. info->node_cache = new_leaf;
  893. info->qsize += sizeof(*new_leaf);
  894. new_leaf = NULL;
  895. } else {
  896. kfree(new_leaf);
  897. }
  898. if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) {
  899. if (filp->f_flags & O_NONBLOCK) {
  900. ret = -EAGAIN;
  901. } else {
  902. wait.task = current;
  903. wait.msg = (void *) msg_ptr;
  904. wait.state = STATE_NONE;
  905. ret = wq_sleep(info, SEND, timeout, &wait);
  906. /*
  907. * wq_sleep must be called with info->lock held, and
  908. * returns with the lock released
  909. */
  910. goto out_free;
  911. }
  912. } else {
  913. receiver = wq_get_first_waiter(info, RECV);
  914. if (receiver) {
  915. pipelined_send(info, msg_ptr, receiver);
  916. } else {
  917. /* adds message to the queue */
  918. ret = msg_insert(msg_ptr, info);
  919. if (ret)
  920. goto out_unlock;
  921. __do_notify(info);
  922. }
  923. inode->i_atime = inode->i_mtime = inode->i_ctime =
  924. CURRENT_TIME;
  925. }
  926. out_unlock:
  927. spin_unlock(&info->lock);
  928. out_free:
  929. if (ret)
  930. free_msg(msg_ptr);
  931. out_fput:
  932. fput(filp);
  933. out:
  934. return ret;
  935. }
  936. SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
  937. size_t, msg_len, unsigned int __user *, u_msg_prio,
  938. const struct timespec __user *, u_abs_timeout)
  939. {
  940. ssize_t ret;
  941. struct msg_msg *msg_ptr;
  942. struct file *filp;
  943. struct inode *inode;
  944. struct mqueue_inode_info *info;
  945. struct ext_wait_queue wait;
  946. ktime_t expires, *timeout = NULL;
  947. struct timespec ts;
  948. struct posix_msg_tree_node *new_leaf = NULL;
  949. if (u_abs_timeout) {
  950. int res = prepare_timeout(u_abs_timeout, &expires, &ts);
  951. if (res)
  952. return res;
  953. timeout = &expires;
  954. }
  955. audit_mq_sendrecv(mqdes, msg_len, 0, timeout ? &ts : NULL);
  956. filp = fget(mqdes);
  957. if (unlikely(!filp)) {
  958. ret = -EBADF;
  959. goto out;
  960. }
  961. inode = filp->f_path.dentry->d_inode;
  962. if (unlikely(filp->f_op != &mqueue_file_operations)) {
  963. ret = -EBADF;
  964. goto out_fput;
  965. }
  966. info = MQUEUE_I(inode);
  967. audit_inode(NULL, filp->f_path.dentry);
  968. if (unlikely(!(filp->f_mode & FMODE_READ))) {
  969. ret = -EBADF;
  970. goto out_fput;
  971. }
  972. /* checks if buffer is big enough */
  973. if (unlikely(msg_len < info->attr.mq_msgsize)) {
  974. ret = -EMSGSIZE;
  975. goto out_fput;
  976. }
  977. /*
  978. * msg_insert really wants us to have a valid, spare node struct so
  979. * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will
  980. * fall back to that if necessary.
  981. */
  982. if (!info->node_cache)
  983. new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL);
  984. spin_lock(&info->lock);
  985. if (!info->node_cache && new_leaf) {
  986. /* Save our speculative allocation into the cache */
  987. rb_init_node(&new_leaf->rb_node);
  988. INIT_LIST_HEAD(&new_leaf->msg_list);
  989. info->node_cache = new_leaf;
  990. info->qsize += sizeof(*new_leaf);
  991. } else {
  992. kfree(new_leaf);
  993. }
  994. if (info->attr.mq_curmsgs == 0) {
  995. if (filp->f_flags & O_NONBLOCK) {
  996. spin_unlock(&info->lock);
  997. ret = -EAGAIN;
  998. } else {
  999. wait.task = current;
  1000. wait.state = STATE_NONE;
  1001. ret = wq_sleep(info, RECV, timeout, &wait);
  1002. msg_ptr = wait.msg;
  1003. }
  1004. } else {
  1005. msg_ptr = msg_get(info);
  1006. inode->i_atime = inode->i_mtime = inode->i_ctime =
  1007. CURRENT_TIME;
  1008. /* There is now free space in queue. */
  1009. pipelined_receive(info);
  1010. spin_unlock(&info->lock);
  1011. ret = 0;
  1012. }
  1013. if (ret == 0) {
  1014. ret = msg_ptr->m_ts;
  1015. if ((u_msg_prio && put_user(msg_ptr->m_type, u_msg_prio)) ||
  1016. store_msg(u_msg_ptr, msg_ptr, msg_ptr->m_ts)) {
  1017. ret = -EFAULT;
  1018. }
  1019. free_msg(msg_ptr);
  1020. }
  1021. out_fput:
  1022. fput(filp);
  1023. out:
  1024. return ret;
  1025. }
  1026. /*
  1027. * Notes: the case when user wants us to deregister (with NULL as pointer)
  1028. * and he isn't currently owner of notification, will be silently discarded.
  1029. * It isn't explicitly defined in the POSIX.
  1030. */
  1031. SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes,
  1032. const struct sigevent __user *, u_notification)
  1033. {
  1034. int ret;
  1035. struct file *filp;
  1036. struct sock *sock;
  1037. struct inode *inode;
  1038. struct sigevent notification;
  1039. struct mqueue_inode_info *info;
  1040. struct sk_buff *nc;
  1041. if (u_notification) {
  1042. if (copy_from_user(&notification, u_notification,
  1043. sizeof(struct sigevent)))
  1044. return -EFAULT;
  1045. }
  1046. audit_mq_notify(mqdes, u_notification ? &notification : NULL);
  1047. nc = NULL;
  1048. sock = NULL;
  1049. if (u_notification != NULL) {
  1050. if (unlikely(notification.sigev_notify != SIGEV_NONE &&
  1051. notification.sigev_notify != SIGEV_SIGNAL &&
  1052. notification.sigev_notify != SIGEV_THREAD))
  1053. return -EINVAL;
  1054. if (notification.sigev_notify == SIGEV_SIGNAL &&
  1055. !valid_signal(notification.sigev_signo)) {
  1056. return -EINVAL;
  1057. }
  1058. if (notification.sigev_notify == SIGEV_THREAD) {
  1059. long timeo;
  1060. /* create the notify skb */
  1061. nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL);
  1062. if (!nc) {
  1063. ret = -ENOMEM;
  1064. goto out;
  1065. }
  1066. if (copy_from_user(nc->data,
  1067. notification.sigev_value.sival_ptr,
  1068. NOTIFY_COOKIE_LEN)) {
  1069. ret = -EFAULT;
  1070. goto out;
  1071. }
  1072. /* TODO: add a header? */
  1073. skb_put(nc, NOTIFY_COOKIE_LEN);
  1074. /* and attach it to the socket */
  1075. retry:
  1076. filp = fget(notification.sigev_signo);
  1077. if (!filp) {
  1078. ret = -EBADF;
  1079. goto out;
  1080. }
  1081. sock = netlink_getsockbyfilp(filp);
  1082. fput(filp);
  1083. if (IS_ERR(sock)) {
  1084. ret = PTR_ERR(sock);
  1085. sock = NULL;
  1086. goto out;
  1087. }
  1088. timeo = MAX_SCHEDULE_TIMEOUT;
  1089. ret = netlink_attachskb(sock, nc, &timeo, NULL);
  1090. if (ret == 1)
  1091. goto retry;
  1092. if (ret) {
  1093. sock = NULL;
  1094. nc = NULL;
  1095. goto out;
  1096. }
  1097. }
  1098. }
  1099. filp = fget(mqdes);
  1100. if (!filp) {
  1101. ret = -EBADF;
  1102. goto out;
  1103. }
  1104. inode = filp->f_path.dentry->d_inode;
  1105. if (unlikely(filp->f_op != &mqueue_file_operations)) {
  1106. ret = -EBADF;
  1107. goto out_fput;
  1108. }
  1109. info = MQUEUE_I(inode);
  1110. ret = 0;
  1111. spin_lock(&info->lock);
  1112. if (u_notification == NULL) {
  1113. if (info->notify_owner == task_tgid(current)) {
  1114. remove_notification(info);
  1115. inode->i_atime = inode->i_ctime = CURRENT_TIME;
  1116. }
  1117. } else if (info->notify_owner != NULL) {
  1118. ret = -EBUSY;
  1119. } else {
  1120. switch (notification.sigev_notify) {
  1121. case SIGEV_NONE:
  1122. info->notify.sigev_notify = SIGEV_NONE;
  1123. break;
  1124. case SIGEV_THREAD:
  1125. info->notify_sock = sock;
  1126. info->notify_cookie = nc;
  1127. sock = NULL;
  1128. nc = NULL;
  1129. info->notify.sigev_notify = SIGEV_THREAD;
  1130. break;
  1131. case SIGEV_SIGNAL:
  1132. info->notify.sigev_signo = notification.sigev_signo;
  1133. info->notify.sigev_value = notification.sigev_value;
  1134. info->notify.sigev_notify = SIGEV_SIGNAL;
  1135. break;
  1136. }
  1137. info->notify_owner = get_pid(task_tgid(current));
  1138. info->notify_user_ns = get_user_ns(current_user_ns());
  1139. inode->i_atime = inode->i_ctime = CURRENT_TIME;
  1140. }
  1141. spin_unlock(&info->lock);
  1142. out_fput:
  1143. fput(filp);
  1144. out:
  1145. if (sock) {
  1146. netlink_detachskb(sock, nc);
  1147. } else if (nc) {
  1148. dev_kfree_skb(nc);
  1149. }
  1150. return ret;
  1151. }
  1152. SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes,
  1153. const struct mq_attr __user *, u_mqstat,
  1154. struct mq_attr __user *, u_omqstat)
  1155. {
  1156. int ret;
  1157. struct mq_attr mqstat, omqstat;
  1158. struct file *filp;
  1159. struct inode *inode;
  1160. struct mqueue_inode_info *info;
  1161. if (u_mqstat != NULL) {
  1162. if (copy_from_user(&mqstat, u_mqstat, sizeof(struct mq_attr)))
  1163. return -EFAULT;
  1164. if (mqstat.mq_flags & (~O_NONBLOCK))
  1165. return -EINVAL;
  1166. }
  1167. filp = fget(mqdes);
  1168. if (!filp) {
  1169. ret = -EBADF;
  1170. goto out;
  1171. }
  1172. inode = filp->f_path.dentry->d_inode;
  1173. if (unlikely(filp->f_op != &mqueue_file_operations)) {
  1174. ret = -EBADF;
  1175. goto out_fput;
  1176. }
  1177. info = MQUEUE_I(inode);
  1178. spin_lock(&info->lock);
  1179. omqstat = info->attr;
  1180. omqstat.mq_flags = filp->f_flags & O_NONBLOCK;
  1181. if (u_mqstat) {
  1182. audit_mq_getsetattr(mqdes, &mqstat);
  1183. spin_lock(&filp->f_lock);
  1184. if (mqstat.mq_flags & O_NONBLOCK)
  1185. filp->f_flags |= O_NONBLOCK;
  1186. else
  1187. filp->f_flags &= ~O_NONBLOCK;
  1188. spin_unlock(&filp->f_lock);
  1189. inode->i_atime = inode->i_ctime = CURRENT_TIME;
  1190. }
  1191. spin_unlock(&info->lock);
  1192. ret = 0;
  1193. if (u_omqstat != NULL && copy_to_user(u_omqstat, &omqstat,
  1194. sizeof(struct mq_attr)))
  1195. ret = -EFAULT;
  1196. out_fput:
  1197. fput(filp);
  1198. out:
  1199. return ret;
  1200. }
  1201. static const struct inode_operations mqueue_dir_inode_operations = {
  1202. .lookup = simple_lookup,
  1203. .create = mqueue_create,
  1204. .unlink = mqueue_unlink,
  1205. };
  1206. static const struct file_operations mqueue_file_operations = {
  1207. .flush = mqueue_flush_file,
  1208. .poll = mqueue_poll_file,
  1209. .read = mqueue_read_file,
  1210. .llseek = default_llseek,
  1211. };
  1212. static const struct super_operations mqueue_super_ops = {
  1213. .alloc_inode = mqueue_alloc_inode,
  1214. .destroy_inode = mqueue_destroy_inode,
  1215. .evict_inode = mqueue_evict_inode,
  1216. .statfs = simple_statfs,
  1217. };
  1218. static struct file_system_type mqueue_fs_type = {
  1219. .name = "mqueue",
  1220. .mount = mqueue_mount,
  1221. .kill_sb = kill_litter_super,
  1222. };
  1223. int mq_init_ns(struct ipc_namespace *ns)
  1224. {
  1225. ns->mq_queues_count = 0;
  1226. ns->mq_queues_max = DFLT_QUEUESMAX;
  1227. ns->mq_msg_max = DFLT_MSGMAX;
  1228. ns->mq_msgsize_max = DFLT_MSGSIZEMAX;
  1229. ns->mq_msg_default = DFLT_MSG;
  1230. ns->mq_msgsize_default = DFLT_MSGSIZE;
  1231. ns->mq_mnt = kern_mount_data(&mqueue_fs_type, ns);
  1232. if (IS_ERR(ns->mq_mnt)) {
  1233. int err = PTR_ERR(ns->mq_mnt);
  1234. ns->mq_mnt = NULL;
  1235. return err;
  1236. }
  1237. return 0;
  1238. }
  1239. void mq_clear_sbinfo(struct ipc_namespace *ns)
  1240. {
  1241. ns->mq_mnt->mnt_sb->s_fs_info = NULL;
  1242. }
  1243. void mq_put_mnt(struct ipc_namespace *ns)
  1244. {
  1245. kern_unmount(ns->mq_mnt);
  1246. }
  1247. static int __init init_mqueue_fs(void)
  1248. {
  1249. int error;
  1250. mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache",
  1251. sizeof(struct mqueue_inode_info), 0,
  1252. SLAB_HWCACHE_ALIGN, init_once);
  1253. if (mqueue_inode_cachep == NULL)
  1254. return -ENOMEM;
  1255. /* ignore failures - they are not fatal */
  1256. mq_sysctl_table = mq_register_sysctl_table();
  1257. error = register_filesystem(&mqueue_fs_type);
  1258. if (error)
  1259. goto out_sysctl;
  1260. spin_lock_init(&mq_lock);
  1261. error = mq_init_ns(&init_ipc_ns);
  1262. if (error)
  1263. goto out_filesystem;
  1264. return 0;
  1265. out_filesystem:
  1266. unregister_filesystem(&mqueue_fs_type);
  1267. out_sysctl:
  1268. if (mq_sysctl_table)
  1269. unregister_sysctl_table(mq_sysctl_table);
  1270. kmem_cache_destroy(mqueue_inode_cachep);
  1271. return error;
  1272. }
  1273. __initcall(init_mqueue_fs);