mqueue.c 35 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457
  1. /*
  2. * POSIX message queues filesystem for Linux.
  3. *
  4. * Copyright (C) 2003,2004 Krzysztof Benedyczak (golbi@mat.uni.torun.pl)
  5. * Michal Wronski (michal.wronski@gmail.com)
  6. *
  7. * Spinlocks: Mohamed Abbas (abbas.mohamed@intel.com)
  8. * Lockless receive & send, fd based notify:
  9. * Manfred Spraul (manfred@colorfullife.com)
  10. *
  11. * Audit: George Wilson (ltcgcw@us.ibm.com)
  12. *
  13. * This file is released under the GPL.
  14. */
  15. #include <linux/capability.h>
  16. #include <linux/init.h>
  17. #include <linux/pagemap.h>
  18. #include <linux/file.h>
  19. #include <linux/mount.h>
  20. #include <linux/namei.h>
  21. #include <linux/sysctl.h>
  22. #include <linux/poll.h>
  23. #include <linux/mqueue.h>
  24. #include <linux/msg.h>
  25. #include <linux/skbuff.h>
  26. #include <linux/vmalloc.h>
  27. #include <linux/netlink.h>
  28. #include <linux/syscalls.h>
  29. #include <linux/audit.h>
  30. #include <linux/signal.h>
  31. #include <linux/mutex.h>
  32. #include <linux/nsproxy.h>
  33. #include <linux/pid.h>
  34. #include <linux/ipc_namespace.h>
  35. #include <linux/user_namespace.h>
  36. #include <linux/slab.h>
  37. #include <net/sock.h>
  38. #include "util.h"
  39. #define MQUEUE_MAGIC 0x19800202
  40. #define DIRENT_SIZE 20
  41. #define FILENT_SIZE 80
  42. #define SEND 0
  43. #define RECV 1
  44. #define STATE_NONE 0
  45. #define STATE_PENDING 1
  46. #define STATE_READY 2
  47. struct posix_msg_tree_node {
  48. struct rb_node rb_node;
  49. struct list_head msg_list;
  50. int priority;
  51. };
  52. struct ext_wait_queue { /* queue of sleeping tasks */
  53. struct task_struct *task;
  54. struct list_head list;
  55. struct msg_msg *msg; /* ptr of loaded message */
  56. int state; /* one of STATE_* values */
  57. };
  58. struct mqueue_inode_info {
  59. spinlock_t lock;
  60. struct inode vfs_inode;
  61. wait_queue_head_t wait_q;
  62. struct rb_root msg_tree;
  63. struct posix_msg_tree_node *node_cache;
  64. struct mq_attr attr;
  65. struct sigevent notify;
  66. struct pid* notify_owner;
  67. struct user_namespace *notify_user_ns;
  68. struct user_struct *user; /* user who created, for accounting */
  69. struct sock *notify_sock;
  70. struct sk_buff *notify_cookie;
  71. /* for tasks waiting for free space and messages, respectively */
  72. struct ext_wait_queue e_wait_q[2];
  73. unsigned long qsize; /* size of queue in memory (sum of all msgs) */
  74. };
  75. static const struct inode_operations mqueue_dir_inode_operations;
  76. static const struct file_operations mqueue_file_operations;
  77. static const struct super_operations mqueue_super_ops;
  78. static void remove_notification(struct mqueue_inode_info *info);
  79. static struct kmem_cache *mqueue_inode_cachep;
  80. static struct ctl_table_header * mq_sysctl_table;
  81. static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode)
  82. {
  83. return container_of(inode, struct mqueue_inode_info, vfs_inode);
  84. }
  85. /*
  86. * This routine should be called with the mq_lock held.
  87. */
  88. static inline struct ipc_namespace *__get_ns_from_inode(struct inode *inode)
  89. {
  90. return get_ipc_ns(inode->i_sb->s_fs_info);
  91. }
  92. static struct ipc_namespace *get_ns_from_inode(struct inode *inode)
  93. {
  94. struct ipc_namespace *ns;
  95. spin_lock(&mq_lock);
  96. ns = __get_ns_from_inode(inode);
  97. spin_unlock(&mq_lock);
  98. return ns;
  99. }
  100. /* Auxiliary functions to manipulate messages' list */
  101. static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info)
  102. {
  103. struct rb_node **p, *parent = NULL;
  104. struct posix_msg_tree_node *leaf;
  105. p = &info->msg_tree.rb_node;
  106. while (*p) {
  107. parent = *p;
  108. leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node);
  109. if (likely(leaf->priority == msg->m_type))
  110. goto insert_msg;
  111. else if (msg->m_type < leaf->priority)
  112. p = &(*p)->rb_left;
  113. else
  114. p = &(*p)->rb_right;
  115. }
  116. if (info->node_cache) {
  117. leaf = info->node_cache;
  118. info->node_cache = NULL;
  119. } else {
  120. leaf = kmalloc(sizeof(*leaf), GFP_ATOMIC);
  121. if (!leaf)
  122. return -ENOMEM;
  123. rb_init_node(&leaf->rb_node);
  124. INIT_LIST_HEAD(&leaf->msg_list);
  125. info->qsize += sizeof(*leaf);
  126. }
  127. leaf->priority = msg->m_type;
  128. rb_link_node(&leaf->rb_node, parent, p);
  129. rb_insert_color(&leaf->rb_node, &info->msg_tree);
  130. insert_msg:
  131. info->attr.mq_curmsgs++;
  132. info->qsize += msg->m_ts;
  133. list_add_tail(&msg->m_list, &leaf->msg_list);
  134. return 0;
  135. }
  136. static inline struct msg_msg *msg_get(struct mqueue_inode_info *info)
  137. {
  138. struct rb_node **p, *parent = NULL;
  139. struct posix_msg_tree_node *leaf;
  140. struct msg_msg *msg;
  141. try_again:
  142. p = &info->msg_tree.rb_node;
  143. while (*p) {
  144. parent = *p;
  145. /*
  146. * During insert, low priorities go to the left and high to the
  147. * right. On receive, we want the highest priorities first, so
  148. * walk all the way to the right.
  149. */
  150. p = &(*p)->rb_right;
  151. }
  152. if (!parent) {
  153. if (info->attr.mq_curmsgs) {
  154. pr_warn_once("Inconsistency in POSIX message queue, "
  155. "no tree element, but supposedly messages "
  156. "should exist!\n");
  157. info->attr.mq_curmsgs = 0;
  158. }
  159. return NULL;
  160. }
  161. leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node);
  162. if (unlikely(list_empty(&leaf->msg_list))) {
  163. pr_warn_once("Inconsistency in POSIX message queue, "
  164. "empty leaf node but we haven't implemented "
  165. "lazy leaf delete!\n");
  166. rb_erase(&leaf->rb_node, &info->msg_tree);
  167. if (info->node_cache) {
  168. info->qsize -= sizeof(*leaf);
  169. kfree(leaf);
  170. } else {
  171. info->node_cache = leaf;
  172. }
  173. goto try_again;
  174. } else {
  175. msg = list_first_entry(&leaf->msg_list,
  176. struct msg_msg, m_list);
  177. list_del(&msg->m_list);
  178. if (list_empty(&leaf->msg_list)) {
  179. rb_erase(&leaf->rb_node, &info->msg_tree);
  180. if (info->node_cache) {
  181. info->qsize -= sizeof(*leaf);
  182. kfree(leaf);
  183. } else {
  184. info->node_cache = leaf;
  185. }
  186. }
  187. }
  188. info->attr.mq_curmsgs--;
  189. info->qsize -= msg->m_ts;
  190. return msg;
  191. }
  192. static struct inode *mqueue_get_inode(struct super_block *sb,
  193. struct ipc_namespace *ipc_ns, umode_t mode,
  194. struct mq_attr *attr)
  195. {
  196. struct user_struct *u = current_user();
  197. struct inode *inode;
  198. int ret = -ENOMEM;
  199. inode = new_inode(sb);
  200. if (!inode)
  201. goto err;
  202. inode->i_ino = get_next_ino();
  203. inode->i_mode = mode;
  204. inode->i_uid = current_fsuid();
  205. inode->i_gid = current_fsgid();
  206. inode->i_mtime = inode->i_ctime = inode->i_atime = CURRENT_TIME;
  207. if (S_ISREG(mode)) {
  208. struct mqueue_inode_info *info;
  209. unsigned long mq_bytes, mq_treesize;
  210. inode->i_fop = &mqueue_file_operations;
  211. inode->i_size = FILENT_SIZE;
  212. /* mqueue specific info */
  213. info = MQUEUE_I(inode);
  214. spin_lock_init(&info->lock);
  215. init_waitqueue_head(&info->wait_q);
  216. INIT_LIST_HEAD(&info->e_wait_q[0].list);
  217. INIT_LIST_HEAD(&info->e_wait_q[1].list);
  218. info->notify_owner = NULL;
  219. info->notify_user_ns = NULL;
  220. info->qsize = 0;
  221. info->user = NULL; /* set when all is ok */
  222. info->msg_tree = RB_ROOT;
  223. info->node_cache = NULL;
  224. memset(&info->attr, 0, sizeof(info->attr));
  225. info->attr.mq_maxmsg = min(ipc_ns->mq_msg_max,
  226. ipc_ns->mq_msg_default);
  227. info->attr.mq_msgsize = min(ipc_ns->mq_msgsize_max,
  228. ipc_ns->mq_msgsize_default);
  229. if (attr) {
  230. info->attr.mq_maxmsg = attr->mq_maxmsg;
  231. info->attr.mq_msgsize = attr->mq_msgsize;
  232. }
  233. /*
  234. * We used to allocate a static array of pointers and account
  235. * the size of that array as well as one msg_msg struct per
  236. * possible message into the queue size. That's no longer
  237. * accurate as the queue is now an rbtree and will grow and
  238. * shrink depending on usage patterns. We can, however, still
  239. * account one msg_msg struct per message, but the nodes are
  240. * allocated depending on priority usage, and most programs
  241. * only use one, or a handful, of priorities. However, since
  242. * this is pinned memory, we need to assume worst case, so
  243. * that means the min(mq_maxmsg, max_priorities) * struct
  244. * posix_msg_tree_node.
  245. */
  246. mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
  247. min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
  248. sizeof(struct posix_msg_tree_node);
  249. mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
  250. info->attr.mq_msgsize);
  251. spin_lock(&mq_lock);
  252. if (u->mq_bytes + mq_bytes < u->mq_bytes ||
  253. u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
  254. spin_unlock(&mq_lock);
  255. /* mqueue_evict_inode() releases info->messages */
  256. ret = -EMFILE;
  257. goto out_inode;
  258. }
  259. u->mq_bytes += mq_bytes;
  260. spin_unlock(&mq_lock);
  261. /* all is ok */
  262. info->user = get_uid(u);
  263. } else if (S_ISDIR(mode)) {
  264. inc_nlink(inode);
  265. /* Some things misbehave if size == 0 on a directory */
  266. inode->i_size = 2 * DIRENT_SIZE;
  267. inode->i_op = &mqueue_dir_inode_operations;
  268. inode->i_fop = &simple_dir_operations;
  269. }
  270. return inode;
  271. out_inode:
  272. iput(inode);
  273. err:
  274. return ERR_PTR(ret);
  275. }
  276. static int mqueue_fill_super(struct super_block *sb, void *data, int silent)
  277. {
  278. struct inode *inode;
  279. struct ipc_namespace *ns = data;
  280. sb->s_blocksize = PAGE_CACHE_SIZE;
  281. sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
  282. sb->s_magic = MQUEUE_MAGIC;
  283. sb->s_op = &mqueue_super_ops;
  284. inode = mqueue_get_inode(sb, ns, S_IFDIR | S_ISVTX | S_IRWXUGO, NULL);
  285. if (IS_ERR(inode))
  286. return PTR_ERR(inode);
  287. sb->s_root = d_make_root(inode);
  288. if (!sb->s_root)
  289. return -ENOMEM;
  290. return 0;
  291. }
  292. static struct dentry *mqueue_mount(struct file_system_type *fs_type,
  293. int flags, const char *dev_name,
  294. void *data)
  295. {
  296. if (!(flags & MS_KERNMOUNT))
  297. data = current->nsproxy->ipc_ns;
  298. return mount_ns(fs_type, flags, data, mqueue_fill_super);
  299. }
  300. static void init_once(void *foo)
  301. {
  302. struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo;
  303. inode_init_once(&p->vfs_inode);
  304. }
  305. static struct inode *mqueue_alloc_inode(struct super_block *sb)
  306. {
  307. struct mqueue_inode_info *ei;
  308. ei = kmem_cache_alloc(mqueue_inode_cachep, GFP_KERNEL);
  309. if (!ei)
  310. return NULL;
  311. return &ei->vfs_inode;
  312. }
  313. static void mqueue_i_callback(struct rcu_head *head)
  314. {
  315. struct inode *inode = container_of(head, struct inode, i_rcu);
  316. kmem_cache_free(mqueue_inode_cachep, MQUEUE_I(inode));
  317. }
  318. static void mqueue_destroy_inode(struct inode *inode)
  319. {
  320. call_rcu(&inode->i_rcu, mqueue_i_callback);
  321. }
  322. static void mqueue_evict_inode(struct inode *inode)
  323. {
  324. struct mqueue_inode_info *info;
  325. struct user_struct *user;
  326. unsigned long mq_bytes, mq_treesize;
  327. struct ipc_namespace *ipc_ns;
  328. struct msg_msg *msg;
  329. clear_inode(inode);
  330. if (S_ISDIR(inode->i_mode))
  331. return;
  332. ipc_ns = get_ns_from_inode(inode);
  333. info = MQUEUE_I(inode);
  334. spin_lock(&info->lock);
  335. while ((msg = msg_get(info)) != NULL)
  336. free_msg(msg);
  337. kfree(info->node_cache);
  338. spin_unlock(&info->lock);
  339. /* Total amount of bytes accounted for the mqueue */
  340. mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
  341. min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
  342. sizeof(struct posix_msg_tree_node);
  343. mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
  344. info->attr.mq_msgsize);
  345. user = info->user;
  346. if (user) {
  347. spin_lock(&mq_lock);
  348. user->mq_bytes -= mq_bytes;
  349. /*
  350. * get_ns_from_inode() ensures that the
  351. * (ipc_ns = sb->s_fs_info) is either a valid ipc_ns
  352. * to which we now hold a reference, or it is NULL.
  353. * We can't put it here under mq_lock, though.
  354. */
  355. if (ipc_ns)
  356. ipc_ns->mq_queues_count--;
  357. spin_unlock(&mq_lock);
  358. free_uid(user);
  359. }
  360. if (ipc_ns)
  361. put_ipc_ns(ipc_ns);
  362. }
  363. static int mqueue_create(struct inode *dir, struct dentry *dentry,
  364. umode_t mode, bool excl)
  365. {
  366. struct inode *inode;
  367. struct mq_attr *attr = dentry->d_fsdata;
  368. int error;
  369. struct ipc_namespace *ipc_ns;
  370. spin_lock(&mq_lock);
  371. ipc_ns = __get_ns_from_inode(dir);
  372. if (!ipc_ns) {
  373. error = -EACCES;
  374. goto out_unlock;
  375. }
  376. if (ipc_ns->mq_queues_count >= HARD_QUEUESMAX ||
  377. (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max &&
  378. !capable(CAP_SYS_RESOURCE))) {
  379. error = -ENOSPC;
  380. goto out_unlock;
  381. }
  382. ipc_ns->mq_queues_count++;
  383. spin_unlock(&mq_lock);
  384. inode = mqueue_get_inode(dir->i_sb, ipc_ns, mode, attr);
  385. if (IS_ERR(inode)) {
  386. error = PTR_ERR(inode);
  387. spin_lock(&mq_lock);
  388. ipc_ns->mq_queues_count--;
  389. goto out_unlock;
  390. }
  391. put_ipc_ns(ipc_ns);
  392. dir->i_size += DIRENT_SIZE;
  393. dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME;
  394. d_instantiate(dentry, inode);
  395. dget(dentry);
  396. return 0;
  397. out_unlock:
  398. spin_unlock(&mq_lock);
  399. if (ipc_ns)
  400. put_ipc_ns(ipc_ns);
  401. return error;
  402. }
  403. static int mqueue_unlink(struct inode *dir, struct dentry *dentry)
  404. {
  405. struct inode *inode = dentry->d_inode;
  406. dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME;
  407. dir->i_size -= DIRENT_SIZE;
  408. drop_nlink(inode);
  409. dput(dentry);
  410. return 0;
  411. }
  412. /*
  413. * This is routine for system read from queue file.
  414. * To avoid mess with doing here some sort of mq_receive we allow
  415. * to read only queue size & notification info (the only values
  416. * that are interesting from user point of view and aren't accessible
  417. * through std routines)
  418. */
  419. static ssize_t mqueue_read_file(struct file *filp, char __user *u_data,
  420. size_t count, loff_t *off)
  421. {
  422. struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode);
  423. char buffer[FILENT_SIZE];
  424. ssize_t ret;
  425. spin_lock(&info->lock);
  426. snprintf(buffer, sizeof(buffer),
  427. "QSIZE:%-10lu NOTIFY:%-5d SIGNO:%-5d NOTIFY_PID:%-6d\n",
  428. info->qsize,
  429. info->notify_owner ? info->notify.sigev_notify : 0,
  430. (info->notify_owner &&
  431. info->notify.sigev_notify == SIGEV_SIGNAL) ?
  432. info->notify.sigev_signo : 0,
  433. pid_vnr(info->notify_owner));
  434. spin_unlock(&info->lock);
  435. buffer[sizeof(buffer)-1] = '\0';
  436. ret = simple_read_from_buffer(u_data, count, off, buffer,
  437. strlen(buffer));
  438. if (ret <= 0)
  439. return ret;
  440. filp->f_path.dentry->d_inode->i_atime = filp->f_path.dentry->d_inode->i_ctime = CURRENT_TIME;
  441. return ret;
  442. }
  443. static int mqueue_flush_file(struct file *filp, fl_owner_t id)
  444. {
  445. struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode);
  446. spin_lock(&info->lock);
  447. if (task_tgid(current) == info->notify_owner)
  448. remove_notification(info);
  449. spin_unlock(&info->lock);
  450. return 0;
  451. }
  452. static unsigned int mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab)
  453. {
  454. struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode);
  455. int retval = 0;
  456. poll_wait(filp, &info->wait_q, poll_tab);
  457. spin_lock(&info->lock);
  458. if (info->attr.mq_curmsgs)
  459. retval = POLLIN | POLLRDNORM;
  460. if (info->attr.mq_curmsgs < info->attr.mq_maxmsg)
  461. retval |= POLLOUT | POLLWRNORM;
  462. spin_unlock(&info->lock);
  463. return retval;
  464. }
  465. /* Adds current to info->e_wait_q[sr] before element with smaller prio */
  466. static void wq_add(struct mqueue_inode_info *info, int sr,
  467. struct ext_wait_queue *ewp)
  468. {
  469. struct ext_wait_queue *walk;
  470. ewp->task = current;
  471. list_for_each_entry(walk, &info->e_wait_q[sr].list, list) {
  472. if (walk->task->static_prio <= current->static_prio) {
  473. list_add_tail(&ewp->list, &walk->list);
  474. return;
  475. }
  476. }
  477. list_add_tail(&ewp->list, &info->e_wait_q[sr].list);
  478. }
  479. /*
  480. * Puts current task to sleep. Caller must hold queue lock. After return
  481. * lock isn't held.
  482. * sr: SEND or RECV
  483. */
  484. static int wq_sleep(struct mqueue_inode_info *info, int sr,
  485. ktime_t *timeout, struct ext_wait_queue *ewp)
  486. {
  487. int retval;
  488. signed long time;
  489. wq_add(info, sr, ewp);
  490. for (;;) {
  491. set_current_state(TASK_INTERRUPTIBLE);
  492. spin_unlock(&info->lock);
  493. time = schedule_hrtimeout_range_clock(timeout, 0,
  494. HRTIMER_MODE_ABS, CLOCK_REALTIME);
  495. while (ewp->state == STATE_PENDING)
  496. cpu_relax();
  497. if (ewp->state == STATE_READY) {
  498. retval = 0;
  499. goto out;
  500. }
  501. spin_lock(&info->lock);
  502. if (ewp->state == STATE_READY) {
  503. retval = 0;
  504. goto out_unlock;
  505. }
  506. if (signal_pending(current)) {
  507. retval = -ERESTARTSYS;
  508. break;
  509. }
  510. if (time == 0) {
  511. retval = -ETIMEDOUT;
  512. break;
  513. }
  514. }
  515. list_del(&ewp->list);
  516. out_unlock:
  517. spin_unlock(&info->lock);
  518. out:
  519. return retval;
  520. }
  521. /*
  522. * Returns waiting task that should be serviced first or NULL if none exists
  523. */
  524. static struct ext_wait_queue *wq_get_first_waiter(
  525. struct mqueue_inode_info *info, int sr)
  526. {
  527. struct list_head *ptr;
  528. ptr = info->e_wait_q[sr].list.prev;
  529. if (ptr == &info->e_wait_q[sr].list)
  530. return NULL;
  531. return list_entry(ptr, struct ext_wait_queue, list);
  532. }
  533. static inline void set_cookie(struct sk_buff *skb, char code)
  534. {
  535. ((char*)skb->data)[NOTIFY_COOKIE_LEN-1] = code;
  536. }
  537. /*
  538. * The next function is only to split too long sys_mq_timedsend
  539. */
  540. static void __do_notify(struct mqueue_inode_info *info)
  541. {
  542. /* notification
  543. * invoked when there is registered process and there isn't process
  544. * waiting synchronously for message AND state of queue changed from
  545. * empty to not empty. Here we are sure that no one is waiting
  546. * synchronously. */
  547. if (info->notify_owner &&
  548. info->attr.mq_curmsgs == 1) {
  549. struct siginfo sig_i;
  550. switch (info->notify.sigev_notify) {
  551. case SIGEV_NONE:
  552. break;
  553. case SIGEV_SIGNAL:
  554. /* sends signal */
  555. sig_i.si_signo = info->notify.sigev_signo;
  556. sig_i.si_errno = 0;
  557. sig_i.si_code = SI_MESGQ;
  558. sig_i.si_value = info->notify.sigev_value;
  559. /* map current pid/uid into info->owner's namespaces */
  560. rcu_read_lock();
  561. sig_i.si_pid = task_tgid_nr_ns(current,
  562. ns_of_pid(info->notify_owner));
  563. sig_i.si_uid = from_kuid_munged(info->notify_user_ns, current_uid());
  564. rcu_read_unlock();
  565. kill_pid_info(info->notify.sigev_signo,
  566. &sig_i, info->notify_owner);
  567. break;
  568. case SIGEV_THREAD:
  569. set_cookie(info->notify_cookie, NOTIFY_WOKENUP);
  570. netlink_sendskb(info->notify_sock, info->notify_cookie);
  571. break;
  572. }
  573. /* after notification unregisters process */
  574. put_pid(info->notify_owner);
  575. put_user_ns(info->notify_user_ns);
  576. info->notify_owner = NULL;
  577. info->notify_user_ns = NULL;
  578. }
  579. wake_up(&info->wait_q);
  580. }
  581. static int prepare_timeout(const struct timespec __user *u_abs_timeout,
  582. ktime_t *expires, struct timespec *ts)
  583. {
  584. if (copy_from_user(ts, u_abs_timeout, sizeof(struct timespec)))
  585. return -EFAULT;
  586. if (!timespec_valid(ts))
  587. return -EINVAL;
  588. *expires = timespec_to_ktime(*ts);
  589. return 0;
  590. }
  591. static void remove_notification(struct mqueue_inode_info *info)
  592. {
  593. if (info->notify_owner != NULL &&
  594. info->notify.sigev_notify == SIGEV_THREAD) {
  595. set_cookie(info->notify_cookie, NOTIFY_REMOVED);
  596. netlink_sendskb(info->notify_sock, info->notify_cookie);
  597. }
  598. put_pid(info->notify_owner);
  599. put_user_ns(info->notify_user_ns);
  600. info->notify_owner = NULL;
  601. info->notify_user_ns = NULL;
  602. }
  603. static int mq_attr_ok(struct ipc_namespace *ipc_ns, struct mq_attr *attr)
  604. {
  605. int mq_treesize;
  606. unsigned long total_size;
  607. if (attr->mq_maxmsg <= 0 || attr->mq_msgsize <= 0)
  608. return -EINVAL;
  609. if (capable(CAP_SYS_RESOURCE)) {
  610. if (attr->mq_maxmsg > HARD_MSGMAX ||
  611. attr->mq_msgsize > HARD_MSGSIZEMAX)
  612. return -EINVAL;
  613. } else {
  614. if (attr->mq_maxmsg > ipc_ns->mq_msg_max ||
  615. attr->mq_msgsize > ipc_ns->mq_msgsize_max)
  616. return -EINVAL;
  617. }
  618. /* check for overflow */
  619. if (attr->mq_msgsize > ULONG_MAX/attr->mq_maxmsg)
  620. return -EOVERFLOW;
  621. mq_treesize = attr->mq_maxmsg * sizeof(struct msg_msg) +
  622. min_t(unsigned int, attr->mq_maxmsg, MQ_PRIO_MAX) *
  623. sizeof(struct posix_msg_tree_node);
  624. total_size = attr->mq_maxmsg * attr->mq_msgsize;
  625. if (total_size + mq_treesize < total_size)
  626. return -EOVERFLOW;
  627. return 0;
  628. }
  629. /*
  630. * Invoked when creating a new queue via sys_mq_open
  631. */
  632. static struct file *do_create(struct ipc_namespace *ipc_ns, struct inode *dir,
  633. struct path *path, int oflag, umode_t mode,
  634. struct mq_attr *attr)
  635. {
  636. const struct cred *cred = current_cred();
  637. struct file *result;
  638. int ret;
  639. if (attr) {
  640. ret = mq_attr_ok(ipc_ns, attr);
  641. if (ret)
  642. return ERR_PTR(ret);
  643. /* store for use during create */
  644. path->dentry->d_fsdata = attr;
  645. } else {
  646. struct mq_attr def_attr;
  647. def_attr.mq_maxmsg = min(ipc_ns->mq_msg_max,
  648. ipc_ns->mq_msg_default);
  649. def_attr.mq_msgsize = min(ipc_ns->mq_msgsize_max,
  650. ipc_ns->mq_msgsize_default);
  651. ret = mq_attr_ok(ipc_ns, &def_attr);
  652. if (ret)
  653. return ERR_PTR(ret);
  654. }
  655. mode &= ~current_umask();
  656. ret = mnt_want_write(path->mnt);
  657. if (ret)
  658. return ERR_PTR(ret);
  659. ret = vfs_create(dir, path->dentry, mode, true);
  660. path->dentry->d_fsdata = NULL;
  661. if (!ret)
  662. result = dentry_open(path, oflag, cred);
  663. else
  664. result = ERR_PTR(ret);
  665. /*
  666. * dentry_open() took a persistent mnt_want_write(),
  667. * so we can now drop this one.
  668. */
  669. mnt_drop_write(path->mnt);
  670. return result;
  671. }
  672. /* Opens existing queue */
  673. static struct file *do_open(struct path *path, int oflag)
  674. {
  675. static const int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE,
  676. MAY_READ | MAY_WRITE };
  677. int acc;
  678. if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY))
  679. return ERR_PTR(-EINVAL);
  680. acc = oflag2acc[oflag & O_ACCMODE];
  681. if (inode_permission(path->dentry->d_inode, acc))
  682. return ERR_PTR(-EACCES);
  683. return dentry_open(path, oflag, current_cred());
  684. }
  685. SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode,
  686. struct mq_attr __user *, u_attr)
  687. {
  688. struct path path;
  689. struct file *filp;
  690. char *name;
  691. struct mq_attr attr;
  692. int fd, error;
  693. struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
  694. struct dentry *root = ipc_ns->mq_mnt->mnt_root;
  695. if (u_attr && copy_from_user(&attr, u_attr, sizeof(struct mq_attr)))
  696. return -EFAULT;
  697. audit_mq_open(oflag, mode, u_attr ? &attr : NULL);
  698. if (IS_ERR(name = getname(u_name)))
  699. return PTR_ERR(name);
  700. fd = get_unused_fd_flags(O_CLOEXEC);
  701. if (fd < 0)
  702. goto out_putname;
  703. error = 0;
  704. mutex_lock(&root->d_inode->i_mutex);
  705. path.dentry = lookup_one_len(name, root, strlen(name));
  706. if (IS_ERR(path.dentry)) {
  707. error = PTR_ERR(path.dentry);
  708. goto out_putfd;
  709. }
  710. path.mnt = mntget(ipc_ns->mq_mnt);
  711. if (oflag & O_CREAT) {
  712. if (path.dentry->d_inode) { /* entry already exists */
  713. audit_inode(name, path.dentry);
  714. if (oflag & O_EXCL) {
  715. error = -EEXIST;
  716. goto out;
  717. }
  718. filp = do_open(&path, oflag);
  719. } else {
  720. filp = do_create(ipc_ns, root->d_inode,
  721. &path, oflag, mode,
  722. u_attr ? &attr : NULL);
  723. }
  724. } else {
  725. if (!path.dentry->d_inode) {
  726. error = -ENOENT;
  727. goto out;
  728. }
  729. audit_inode(name, path.dentry);
  730. filp = do_open(&path, oflag);
  731. }
  732. if (!IS_ERR(filp))
  733. fd_install(fd, filp);
  734. else
  735. error = PTR_ERR(filp);
  736. out:
  737. path_put(&path);
  738. out_putfd:
  739. if (error) {
  740. put_unused_fd(fd);
  741. fd = error;
  742. }
  743. mutex_unlock(&root->d_inode->i_mutex);
  744. out_putname:
  745. putname(name);
  746. return fd;
  747. }
  748. SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name)
  749. {
  750. int err;
  751. char *name;
  752. struct dentry *dentry;
  753. struct inode *inode = NULL;
  754. struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
  755. name = getname(u_name);
  756. if (IS_ERR(name))
  757. return PTR_ERR(name);
  758. mutex_lock_nested(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex,
  759. I_MUTEX_PARENT);
  760. dentry = lookup_one_len(name, ipc_ns->mq_mnt->mnt_root, strlen(name));
  761. if (IS_ERR(dentry)) {
  762. err = PTR_ERR(dentry);
  763. goto out_unlock;
  764. }
  765. if (!dentry->d_inode) {
  766. err = -ENOENT;
  767. goto out_err;
  768. }
  769. inode = dentry->d_inode;
  770. if (inode)
  771. ihold(inode);
  772. err = mnt_want_write(ipc_ns->mq_mnt);
  773. if (err)
  774. goto out_err;
  775. err = vfs_unlink(dentry->d_parent->d_inode, dentry);
  776. mnt_drop_write(ipc_ns->mq_mnt);
  777. out_err:
  778. dput(dentry);
  779. out_unlock:
  780. mutex_unlock(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex);
  781. putname(name);
  782. if (inode)
  783. iput(inode);
  784. return err;
  785. }
  786. /* Pipelined send and receive functions.
  787. *
  788. * If a receiver finds no waiting message, then it registers itself in the
  789. * list of waiting receivers. A sender checks that list before adding the new
  790. * message into the message array. If there is a waiting receiver, then it
  791. * bypasses the message array and directly hands the message over to the
  792. * receiver.
  793. * The receiver accepts the message and returns without grabbing the queue
  794. * spinlock. Therefore an intermediate STATE_PENDING state and memory barriers
  795. * are necessary. The same algorithm is used for sysv semaphores, see
  796. * ipc/sem.c for more details.
  797. *
  798. * The same algorithm is used for senders.
  799. */
  800. /* pipelined_send() - send a message directly to the task waiting in
  801. * sys_mq_timedreceive() (without inserting message into a queue).
  802. */
  803. static inline void pipelined_send(struct mqueue_inode_info *info,
  804. struct msg_msg *message,
  805. struct ext_wait_queue *receiver)
  806. {
  807. receiver->msg = message;
  808. list_del(&receiver->list);
  809. receiver->state = STATE_PENDING;
  810. wake_up_process(receiver->task);
  811. smp_wmb();
  812. receiver->state = STATE_READY;
  813. }
  814. /* pipelined_receive() - if there is task waiting in sys_mq_timedsend()
  815. * gets its message and put to the queue (we have one free place for sure). */
  816. static inline void pipelined_receive(struct mqueue_inode_info *info)
  817. {
  818. struct ext_wait_queue *sender = wq_get_first_waiter(info, SEND);
  819. if (!sender) {
  820. /* for poll */
  821. wake_up_interruptible(&info->wait_q);
  822. return;
  823. }
  824. if (msg_insert(sender->msg, info))
  825. return;
  826. list_del(&sender->list);
  827. sender->state = STATE_PENDING;
  828. wake_up_process(sender->task);
  829. smp_wmb();
  830. sender->state = STATE_READY;
  831. }
  832. SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
  833. size_t, msg_len, unsigned int, msg_prio,
  834. const struct timespec __user *, u_abs_timeout)
  835. {
  836. struct file *filp;
  837. struct inode *inode;
  838. struct ext_wait_queue wait;
  839. struct ext_wait_queue *receiver;
  840. struct msg_msg *msg_ptr;
  841. struct mqueue_inode_info *info;
  842. ktime_t expires, *timeout = NULL;
  843. struct timespec ts;
  844. struct posix_msg_tree_node *new_leaf = NULL;
  845. int ret = 0;
  846. if (u_abs_timeout) {
  847. int res = prepare_timeout(u_abs_timeout, &expires, &ts);
  848. if (res)
  849. return res;
  850. timeout = &expires;
  851. }
  852. if (unlikely(msg_prio >= (unsigned long) MQ_PRIO_MAX))
  853. return -EINVAL;
  854. audit_mq_sendrecv(mqdes, msg_len, msg_prio, timeout ? &ts : NULL);
  855. filp = fget(mqdes);
  856. if (unlikely(!filp)) {
  857. ret = -EBADF;
  858. goto out;
  859. }
  860. inode = filp->f_path.dentry->d_inode;
  861. if (unlikely(filp->f_op != &mqueue_file_operations)) {
  862. ret = -EBADF;
  863. goto out_fput;
  864. }
  865. info = MQUEUE_I(inode);
  866. audit_inode(NULL, filp->f_path.dentry);
  867. if (unlikely(!(filp->f_mode & FMODE_WRITE))) {
  868. ret = -EBADF;
  869. goto out_fput;
  870. }
  871. if (unlikely(msg_len > info->attr.mq_msgsize)) {
  872. ret = -EMSGSIZE;
  873. goto out_fput;
  874. }
  875. /* First try to allocate memory, before doing anything with
  876. * existing queues. */
  877. msg_ptr = load_msg(u_msg_ptr, msg_len);
  878. if (IS_ERR(msg_ptr)) {
  879. ret = PTR_ERR(msg_ptr);
  880. goto out_fput;
  881. }
  882. msg_ptr->m_ts = msg_len;
  883. msg_ptr->m_type = msg_prio;
  884. /*
  885. * msg_insert really wants us to have a valid, spare node struct so
  886. * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will
  887. * fall back to that if necessary.
  888. */
  889. if (!info->node_cache)
  890. new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL);
  891. spin_lock(&info->lock);
  892. if (!info->node_cache && new_leaf) {
  893. /* Save our speculative allocation into the cache */
  894. rb_init_node(&new_leaf->rb_node);
  895. INIT_LIST_HEAD(&new_leaf->msg_list);
  896. info->node_cache = new_leaf;
  897. info->qsize += sizeof(*new_leaf);
  898. new_leaf = NULL;
  899. } else {
  900. kfree(new_leaf);
  901. }
  902. if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) {
  903. if (filp->f_flags & O_NONBLOCK) {
  904. ret = -EAGAIN;
  905. } else {
  906. wait.task = current;
  907. wait.msg = (void *) msg_ptr;
  908. wait.state = STATE_NONE;
  909. ret = wq_sleep(info, SEND, timeout, &wait);
  910. /*
  911. * wq_sleep must be called with info->lock held, and
  912. * returns with the lock released
  913. */
  914. goto out_free;
  915. }
  916. } else {
  917. receiver = wq_get_first_waiter(info, RECV);
  918. if (receiver) {
  919. pipelined_send(info, msg_ptr, receiver);
  920. } else {
  921. /* adds message to the queue */
  922. ret = msg_insert(msg_ptr, info);
  923. if (ret)
  924. goto out_unlock;
  925. __do_notify(info);
  926. }
  927. inode->i_atime = inode->i_mtime = inode->i_ctime =
  928. CURRENT_TIME;
  929. }
  930. out_unlock:
  931. spin_unlock(&info->lock);
  932. out_free:
  933. if (ret)
  934. free_msg(msg_ptr);
  935. out_fput:
  936. fput(filp);
  937. out:
  938. return ret;
  939. }
  940. SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
  941. size_t, msg_len, unsigned int __user *, u_msg_prio,
  942. const struct timespec __user *, u_abs_timeout)
  943. {
  944. ssize_t ret;
  945. struct msg_msg *msg_ptr;
  946. struct file *filp;
  947. struct inode *inode;
  948. struct mqueue_inode_info *info;
  949. struct ext_wait_queue wait;
  950. ktime_t expires, *timeout = NULL;
  951. struct timespec ts;
  952. struct posix_msg_tree_node *new_leaf = NULL;
  953. if (u_abs_timeout) {
  954. int res = prepare_timeout(u_abs_timeout, &expires, &ts);
  955. if (res)
  956. return res;
  957. timeout = &expires;
  958. }
  959. audit_mq_sendrecv(mqdes, msg_len, 0, timeout ? &ts : NULL);
  960. filp = fget(mqdes);
  961. if (unlikely(!filp)) {
  962. ret = -EBADF;
  963. goto out;
  964. }
  965. inode = filp->f_path.dentry->d_inode;
  966. if (unlikely(filp->f_op != &mqueue_file_operations)) {
  967. ret = -EBADF;
  968. goto out_fput;
  969. }
  970. info = MQUEUE_I(inode);
  971. audit_inode(NULL, filp->f_path.dentry);
  972. if (unlikely(!(filp->f_mode & FMODE_READ))) {
  973. ret = -EBADF;
  974. goto out_fput;
  975. }
  976. /* checks if buffer is big enough */
  977. if (unlikely(msg_len < info->attr.mq_msgsize)) {
  978. ret = -EMSGSIZE;
  979. goto out_fput;
  980. }
  981. /*
  982. * msg_insert really wants us to have a valid, spare node struct so
  983. * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will
  984. * fall back to that if necessary.
  985. */
  986. if (!info->node_cache)
  987. new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL);
  988. spin_lock(&info->lock);
  989. if (!info->node_cache && new_leaf) {
  990. /* Save our speculative allocation into the cache */
  991. rb_init_node(&new_leaf->rb_node);
  992. INIT_LIST_HEAD(&new_leaf->msg_list);
  993. info->node_cache = new_leaf;
  994. info->qsize += sizeof(*new_leaf);
  995. } else {
  996. kfree(new_leaf);
  997. }
  998. if (info->attr.mq_curmsgs == 0) {
  999. if (filp->f_flags & O_NONBLOCK) {
  1000. spin_unlock(&info->lock);
  1001. ret = -EAGAIN;
  1002. } else {
  1003. wait.task = current;
  1004. wait.state = STATE_NONE;
  1005. ret = wq_sleep(info, RECV, timeout, &wait);
  1006. msg_ptr = wait.msg;
  1007. }
  1008. } else {
  1009. msg_ptr = msg_get(info);
  1010. inode->i_atime = inode->i_mtime = inode->i_ctime =
  1011. CURRENT_TIME;
  1012. /* There is now free space in queue. */
  1013. pipelined_receive(info);
  1014. spin_unlock(&info->lock);
  1015. ret = 0;
  1016. }
  1017. if (ret == 0) {
  1018. ret = msg_ptr->m_ts;
  1019. if ((u_msg_prio && put_user(msg_ptr->m_type, u_msg_prio)) ||
  1020. store_msg(u_msg_ptr, msg_ptr, msg_ptr->m_ts)) {
  1021. ret = -EFAULT;
  1022. }
  1023. free_msg(msg_ptr);
  1024. }
  1025. out_fput:
  1026. fput(filp);
  1027. out:
  1028. return ret;
  1029. }
  1030. /*
  1031. * Notes: the case when user wants us to deregister (with NULL as pointer)
  1032. * and he isn't currently owner of notification, will be silently discarded.
  1033. * It isn't explicitly defined in the POSIX.
  1034. */
  1035. SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes,
  1036. const struct sigevent __user *, u_notification)
  1037. {
  1038. int ret;
  1039. struct file *filp;
  1040. struct sock *sock;
  1041. struct inode *inode;
  1042. struct sigevent notification;
  1043. struct mqueue_inode_info *info;
  1044. struct sk_buff *nc;
  1045. if (u_notification) {
  1046. if (copy_from_user(&notification, u_notification,
  1047. sizeof(struct sigevent)))
  1048. return -EFAULT;
  1049. }
  1050. audit_mq_notify(mqdes, u_notification ? &notification : NULL);
  1051. nc = NULL;
  1052. sock = NULL;
  1053. if (u_notification != NULL) {
  1054. if (unlikely(notification.sigev_notify != SIGEV_NONE &&
  1055. notification.sigev_notify != SIGEV_SIGNAL &&
  1056. notification.sigev_notify != SIGEV_THREAD))
  1057. return -EINVAL;
  1058. if (notification.sigev_notify == SIGEV_SIGNAL &&
  1059. !valid_signal(notification.sigev_signo)) {
  1060. return -EINVAL;
  1061. }
  1062. if (notification.sigev_notify == SIGEV_THREAD) {
  1063. long timeo;
  1064. /* create the notify skb */
  1065. nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL);
  1066. if (!nc) {
  1067. ret = -ENOMEM;
  1068. goto out;
  1069. }
  1070. if (copy_from_user(nc->data,
  1071. notification.sigev_value.sival_ptr,
  1072. NOTIFY_COOKIE_LEN)) {
  1073. ret = -EFAULT;
  1074. goto out;
  1075. }
  1076. /* TODO: add a header? */
  1077. skb_put(nc, NOTIFY_COOKIE_LEN);
  1078. /* and attach it to the socket */
  1079. retry:
  1080. filp = fget(notification.sigev_signo);
  1081. if (!filp) {
  1082. ret = -EBADF;
  1083. goto out;
  1084. }
  1085. sock = netlink_getsockbyfilp(filp);
  1086. fput(filp);
  1087. if (IS_ERR(sock)) {
  1088. ret = PTR_ERR(sock);
  1089. sock = NULL;
  1090. goto out;
  1091. }
  1092. timeo = MAX_SCHEDULE_TIMEOUT;
  1093. ret = netlink_attachskb(sock, nc, &timeo, NULL);
  1094. if (ret == 1)
  1095. goto retry;
  1096. if (ret) {
  1097. sock = NULL;
  1098. nc = NULL;
  1099. goto out;
  1100. }
  1101. }
  1102. }
  1103. filp = fget(mqdes);
  1104. if (!filp) {
  1105. ret = -EBADF;
  1106. goto out;
  1107. }
  1108. inode = filp->f_path.dentry->d_inode;
  1109. if (unlikely(filp->f_op != &mqueue_file_operations)) {
  1110. ret = -EBADF;
  1111. goto out_fput;
  1112. }
  1113. info = MQUEUE_I(inode);
  1114. ret = 0;
  1115. spin_lock(&info->lock);
  1116. if (u_notification == NULL) {
  1117. if (info->notify_owner == task_tgid(current)) {
  1118. remove_notification(info);
  1119. inode->i_atime = inode->i_ctime = CURRENT_TIME;
  1120. }
  1121. } else if (info->notify_owner != NULL) {
  1122. ret = -EBUSY;
  1123. } else {
  1124. switch (notification.sigev_notify) {
  1125. case SIGEV_NONE:
  1126. info->notify.sigev_notify = SIGEV_NONE;
  1127. break;
  1128. case SIGEV_THREAD:
  1129. info->notify_sock = sock;
  1130. info->notify_cookie = nc;
  1131. sock = NULL;
  1132. nc = NULL;
  1133. info->notify.sigev_notify = SIGEV_THREAD;
  1134. break;
  1135. case SIGEV_SIGNAL:
  1136. info->notify.sigev_signo = notification.sigev_signo;
  1137. info->notify.sigev_value = notification.sigev_value;
  1138. info->notify.sigev_notify = SIGEV_SIGNAL;
  1139. break;
  1140. }
  1141. info->notify_owner = get_pid(task_tgid(current));
  1142. info->notify_user_ns = get_user_ns(current_user_ns());
  1143. inode->i_atime = inode->i_ctime = CURRENT_TIME;
  1144. }
  1145. spin_unlock(&info->lock);
  1146. out_fput:
  1147. fput(filp);
  1148. out:
  1149. if (sock) {
  1150. netlink_detachskb(sock, nc);
  1151. } else if (nc) {
  1152. dev_kfree_skb(nc);
  1153. }
  1154. return ret;
  1155. }
  1156. SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes,
  1157. const struct mq_attr __user *, u_mqstat,
  1158. struct mq_attr __user *, u_omqstat)
  1159. {
  1160. int ret;
  1161. struct mq_attr mqstat, omqstat;
  1162. struct file *filp;
  1163. struct inode *inode;
  1164. struct mqueue_inode_info *info;
  1165. if (u_mqstat != NULL) {
  1166. if (copy_from_user(&mqstat, u_mqstat, sizeof(struct mq_attr)))
  1167. return -EFAULT;
  1168. if (mqstat.mq_flags & (~O_NONBLOCK))
  1169. return -EINVAL;
  1170. }
  1171. filp = fget(mqdes);
  1172. if (!filp) {
  1173. ret = -EBADF;
  1174. goto out;
  1175. }
  1176. inode = filp->f_path.dentry->d_inode;
  1177. if (unlikely(filp->f_op != &mqueue_file_operations)) {
  1178. ret = -EBADF;
  1179. goto out_fput;
  1180. }
  1181. info = MQUEUE_I(inode);
  1182. spin_lock(&info->lock);
  1183. omqstat = info->attr;
  1184. omqstat.mq_flags = filp->f_flags & O_NONBLOCK;
  1185. if (u_mqstat) {
  1186. audit_mq_getsetattr(mqdes, &mqstat);
  1187. spin_lock(&filp->f_lock);
  1188. if (mqstat.mq_flags & O_NONBLOCK)
  1189. filp->f_flags |= O_NONBLOCK;
  1190. else
  1191. filp->f_flags &= ~O_NONBLOCK;
  1192. spin_unlock(&filp->f_lock);
  1193. inode->i_atime = inode->i_ctime = CURRENT_TIME;
  1194. }
  1195. spin_unlock(&info->lock);
  1196. ret = 0;
  1197. if (u_omqstat != NULL && copy_to_user(u_omqstat, &omqstat,
  1198. sizeof(struct mq_attr)))
  1199. ret = -EFAULT;
  1200. out_fput:
  1201. fput(filp);
  1202. out:
  1203. return ret;
  1204. }
  1205. static const struct inode_operations mqueue_dir_inode_operations = {
  1206. .lookup = simple_lookup,
  1207. .create = mqueue_create,
  1208. .unlink = mqueue_unlink,
  1209. };
  1210. static const struct file_operations mqueue_file_operations = {
  1211. .flush = mqueue_flush_file,
  1212. .poll = mqueue_poll_file,
  1213. .read = mqueue_read_file,
  1214. .llseek = default_llseek,
  1215. };
  1216. static const struct super_operations mqueue_super_ops = {
  1217. .alloc_inode = mqueue_alloc_inode,
  1218. .destroy_inode = mqueue_destroy_inode,
  1219. .evict_inode = mqueue_evict_inode,
  1220. .statfs = simple_statfs,
  1221. };
  1222. static struct file_system_type mqueue_fs_type = {
  1223. .name = "mqueue",
  1224. .mount = mqueue_mount,
  1225. .kill_sb = kill_litter_super,
  1226. };
  1227. int mq_init_ns(struct ipc_namespace *ns)
  1228. {
  1229. ns->mq_queues_count = 0;
  1230. ns->mq_queues_max = DFLT_QUEUESMAX;
  1231. ns->mq_msg_max = DFLT_MSGMAX;
  1232. ns->mq_msgsize_max = DFLT_MSGSIZEMAX;
  1233. ns->mq_msg_default = DFLT_MSG;
  1234. ns->mq_msgsize_default = DFLT_MSGSIZE;
  1235. ns->mq_mnt = kern_mount_data(&mqueue_fs_type, ns);
  1236. if (IS_ERR(ns->mq_mnt)) {
  1237. int err = PTR_ERR(ns->mq_mnt);
  1238. ns->mq_mnt = NULL;
  1239. return err;
  1240. }
  1241. return 0;
  1242. }
  1243. void mq_clear_sbinfo(struct ipc_namespace *ns)
  1244. {
  1245. ns->mq_mnt->mnt_sb->s_fs_info = NULL;
  1246. }
  1247. void mq_put_mnt(struct ipc_namespace *ns)
  1248. {
  1249. kern_unmount(ns->mq_mnt);
  1250. }
  1251. static int __init init_mqueue_fs(void)
  1252. {
  1253. int error;
  1254. mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache",
  1255. sizeof(struct mqueue_inode_info), 0,
  1256. SLAB_HWCACHE_ALIGN, init_once);
  1257. if (mqueue_inode_cachep == NULL)
  1258. return -ENOMEM;
  1259. /* ignore failures - they are not fatal */
  1260. mq_sysctl_table = mq_register_sysctl_table();
  1261. error = register_filesystem(&mqueue_fs_type);
  1262. if (error)
  1263. goto out_sysctl;
  1264. spin_lock_init(&mq_lock);
  1265. error = mq_init_ns(&init_ipc_ns);
  1266. if (error)
  1267. goto out_filesystem;
  1268. return 0;
  1269. out_filesystem:
  1270. unregister_filesystem(&mqueue_fs_type);
  1271. out_sysctl:
  1272. if (mq_sysctl_table)
  1273. unregister_sysctl_table(mq_sysctl_table);
  1274. kmem_cache_destroy(mqueue_inode_cachep);
  1275. return error;
  1276. }
  1277. __initcall(init_mqueue_fs);