xenbus.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598
  1. /*
  2. * Driver giving user-space access to the kernel's xenbus connection
  3. * to xenstore.
  4. *
  5. * Copyright (c) 2005, Christian Limpach
  6. * Copyright (c) 2005, Rusty Russell, IBM Corporation
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public License version 2
  10. * as published by the Free Software Foundation; or, when distributed
  11. * separately from the Linux kernel or incorporated into other
  12. * software packages, subject to the following license:
  13. *
  14. * Permission is hereby granted, free of charge, to any person obtaining a copy
  15. * of this source file (the "Software"), to deal in the Software without
  16. * restriction, including without limitation the rights to use, copy, modify,
  17. * merge, publish, distribute, sublicense, and/or sell copies of the Software,
  18. * and to permit persons to whom the Software is furnished to do so, subject to
  19. * the following conditions:
  20. *
  21. * The above copyright notice and this permission notice shall be included in
  22. * all copies or substantial portions of the Software.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  25. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  26. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  27. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  28. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  29. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  30. * IN THE SOFTWARE.
  31. *
  32. * Changes:
  33. * 2008-10-07 Alex Zeffertt Replaced /proc/xen/xenbus with xenfs filesystem
  34. * and /proc/xen compatibility mount point.
  35. * Turned xenfs into a loadable module.
  36. */
  37. #include <linux/kernel.h>
  38. #include <linux/errno.h>
  39. #include <linux/uio.h>
  40. #include <linux/notifier.h>
  41. #include <linux/wait.h>
  42. #include <linux/fs.h>
  43. #include <linux/poll.h>
  44. #include <linux/mutex.h>
  45. #include <linux/sched.h>
  46. #include <linux/spinlock.h>
  47. #include <linux/mount.h>
  48. #include <linux/pagemap.h>
  49. #include <linux/uaccess.h>
  50. #include <linux/init.h>
  51. #include <linux/namei.h>
  52. #include <linux/string.h>
  53. #include <linux/slab.h>
  54. #include "xenfs.h"
  55. #include "../xenbus/xenbus_comms.h"
  56. #include <xen/xenbus.h>
  57. #include <asm/xen/hypervisor.h>
  58. /*
  59. * An element of a list of outstanding transactions, for which we're
  60. * still waiting a reply.
  61. */
  62. struct xenbus_transaction_holder {
  63. struct list_head list;
  64. struct xenbus_transaction handle;
  65. };
  66. /*
  67. * A buffer of data on the queue.
  68. */
  69. struct read_buffer {
  70. struct list_head list;
  71. unsigned int cons;
  72. unsigned int len;
  73. char msg[];
  74. };
  75. struct xenbus_file_priv {
  76. /*
  77. * msgbuffer_mutex is held while partial requests are built up
  78. * and complete requests are acted on. It therefore protects
  79. * the "transactions" and "watches" lists, and the partial
  80. * request length and buffer.
  81. *
  82. * reply_mutex protects the reply being built up to return to
  83. * usermode. It nests inside msgbuffer_mutex but may be held
  84. * alone during a watch callback.
  85. */
  86. struct mutex msgbuffer_mutex;
  87. /* In-progress transactions */
  88. struct list_head transactions;
  89. /* Active watches. */
  90. struct list_head watches;
  91. /* Partial request. */
  92. unsigned int len;
  93. union {
  94. struct xsd_sockmsg msg;
  95. char buffer[PAGE_SIZE];
  96. } u;
  97. /* Response queue. */
  98. struct mutex reply_mutex;
  99. struct list_head read_buffers;
  100. wait_queue_head_t read_waitq;
  101. };
  102. /* Read out any raw xenbus messages queued up. */
  103. static ssize_t xenbus_file_read(struct file *filp,
  104. char __user *ubuf,
  105. size_t len, loff_t *ppos)
  106. {
  107. struct xenbus_file_priv *u = filp->private_data;
  108. struct read_buffer *rb;
  109. unsigned i;
  110. int ret;
  111. mutex_lock(&u->reply_mutex);
  112. while (list_empty(&u->read_buffers)) {
  113. mutex_unlock(&u->reply_mutex);
  114. if (filp->f_flags & O_NONBLOCK)
  115. return -EAGAIN;
  116. ret = wait_event_interruptible(u->read_waitq,
  117. !list_empty(&u->read_buffers));
  118. if (ret)
  119. return ret;
  120. mutex_lock(&u->reply_mutex);
  121. }
  122. rb = list_entry(u->read_buffers.next, struct read_buffer, list);
  123. i = 0;
  124. while (i < len) {
  125. unsigned sz = min((unsigned)len - i, rb->len - rb->cons);
  126. ret = copy_to_user(ubuf + i, &rb->msg[rb->cons], sz);
  127. i += sz - ret;
  128. rb->cons += sz - ret;
  129. if (ret != sz) {
  130. if (i == 0)
  131. i = -EFAULT;
  132. goto out;
  133. }
  134. /* Clear out buffer if it has been consumed */
  135. if (rb->cons == rb->len) {
  136. list_del(&rb->list);
  137. kfree(rb);
  138. if (list_empty(&u->read_buffers))
  139. break;
  140. rb = list_entry(u->read_buffers.next,
  141. struct read_buffer, list);
  142. }
  143. }
  144. out:
  145. mutex_unlock(&u->reply_mutex);
  146. return i;
  147. }
  148. /*
  149. * Add a buffer to the queue. Caller must hold the appropriate lock
  150. * if the queue is not local. (Commonly the caller will build up
  151. * multiple queued buffers on a temporary local list, and then add it
  152. * to the appropriate list under lock once all the buffers have een
  153. * successfully allocated.)
  154. */
  155. static int queue_reply(struct list_head *queue, const void *data, size_t len)
  156. {
  157. struct read_buffer *rb;
  158. if (len == 0)
  159. return 0;
  160. rb = kmalloc(sizeof(*rb) + len, GFP_KERNEL);
  161. if (rb == NULL)
  162. return -ENOMEM;
  163. rb->cons = 0;
  164. rb->len = len;
  165. memcpy(rb->msg, data, len);
  166. list_add_tail(&rb->list, queue);
  167. return 0;
  168. }
  169. /*
  170. * Free all the read_buffer s on a list.
  171. * Caller must have sole reference to list.
  172. */
  173. static void queue_cleanup(struct list_head *list)
  174. {
  175. struct read_buffer *rb;
  176. while (!list_empty(list)) {
  177. rb = list_entry(list->next, struct read_buffer, list);
  178. list_del(list->next);
  179. kfree(rb);
  180. }
  181. }
  182. struct watch_adapter {
  183. struct list_head list;
  184. struct xenbus_watch watch;
  185. struct xenbus_file_priv *dev_data;
  186. char *token;
  187. };
  188. static void free_watch_adapter(struct watch_adapter *watch)
  189. {
  190. kfree(watch->watch.node);
  191. kfree(watch->token);
  192. kfree(watch);
  193. }
  194. static struct watch_adapter *alloc_watch_adapter(const char *path,
  195. const char *token)
  196. {
  197. struct watch_adapter *watch;
  198. watch = kzalloc(sizeof(*watch), GFP_KERNEL);
  199. if (watch == NULL)
  200. goto out_fail;
  201. watch->watch.node = kstrdup(path, GFP_KERNEL);
  202. if (watch->watch.node == NULL)
  203. goto out_free;
  204. watch->token = kstrdup(token, GFP_KERNEL);
  205. if (watch->token == NULL)
  206. goto out_free;
  207. return watch;
  208. out_free:
  209. free_watch_adapter(watch);
  210. out_fail:
  211. return NULL;
  212. }
  213. static void watch_fired(struct xenbus_watch *watch,
  214. const char **vec,
  215. unsigned int len)
  216. {
  217. struct watch_adapter *adap;
  218. struct xsd_sockmsg hdr;
  219. const char *path, *token;
  220. int path_len, tok_len, body_len, data_len = 0;
  221. int ret;
  222. LIST_HEAD(staging_q);
  223. adap = container_of(watch, struct watch_adapter, watch);
  224. path = vec[XS_WATCH_PATH];
  225. token = adap->token;
  226. path_len = strlen(path) + 1;
  227. tok_len = strlen(token) + 1;
  228. if (len > 2)
  229. data_len = vec[len] - vec[2] + 1;
  230. body_len = path_len + tok_len + data_len;
  231. hdr.type = XS_WATCH_EVENT;
  232. hdr.len = body_len;
  233. mutex_lock(&adap->dev_data->reply_mutex);
  234. ret = queue_reply(&staging_q, &hdr, sizeof(hdr));
  235. if (!ret)
  236. ret = queue_reply(&staging_q, path, path_len);
  237. if (!ret)
  238. ret = queue_reply(&staging_q, token, tok_len);
  239. if (!ret && len > 2)
  240. ret = queue_reply(&staging_q, vec[2], data_len);
  241. if (!ret) {
  242. /* success: pass reply list onto watcher */
  243. list_splice_tail(&staging_q, &adap->dev_data->read_buffers);
  244. wake_up(&adap->dev_data->read_waitq);
  245. } else
  246. queue_cleanup(&staging_q);
  247. mutex_unlock(&adap->dev_data->reply_mutex);
  248. }
  249. static int xenbus_write_transaction(unsigned msg_type,
  250. struct xenbus_file_priv *u)
  251. {
  252. int rc;
  253. void *reply;
  254. struct xenbus_transaction_holder *trans = NULL;
  255. LIST_HEAD(staging_q);
  256. if (msg_type == XS_TRANSACTION_START) {
  257. trans = kmalloc(sizeof(*trans), GFP_KERNEL);
  258. if (!trans) {
  259. rc = -ENOMEM;
  260. goto out;
  261. }
  262. }
  263. reply = xenbus_dev_request_and_reply(&u->u.msg);
  264. if (IS_ERR(reply)) {
  265. kfree(trans);
  266. rc = PTR_ERR(reply);
  267. goto out;
  268. }
  269. if (msg_type == XS_TRANSACTION_START) {
  270. trans->handle.id = simple_strtoul(reply, NULL, 0);
  271. list_add(&trans->list, &u->transactions);
  272. } else if (msg_type == XS_TRANSACTION_END) {
  273. list_for_each_entry(trans, &u->transactions, list)
  274. if (trans->handle.id == u->u.msg.tx_id)
  275. break;
  276. BUG_ON(&trans->list == &u->transactions);
  277. list_del(&trans->list);
  278. kfree(trans);
  279. }
  280. mutex_lock(&u->reply_mutex);
  281. rc = queue_reply(&staging_q, &u->u.msg, sizeof(u->u.msg));
  282. if (!rc)
  283. rc = queue_reply(&staging_q, reply, u->u.msg.len);
  284. if (!rc) {
  285. list_splice_tail(&staging_q, &u->read_buffers);
  286. wake_up(&u->read_waitq);
  287. } else {
  288. queue_cleanup(&staging_q);
  289. }
  290. mutex_unlock(&u->reply_mutex);
  291. kfree(reply);
  292. out:
  293. return rc;
  294. }
  295. static int xenbus_write_watch(unsigned msg_type, struct xenbus_file_priv *u)
  296. {
  297. struct watch_adapter *watch, *tmp_watch;
  298. char *path, *token;
  299. int err, rc;
  300. LIST_HEAD(staging_q);
  301. path = u->u.buffer + sizeof(u->u.msg);
  302. token = memchr(path, 0, u->u.msg.len);
  303. if (token == NULL) {
  304. rc = -EILSEQ;
  305. goto out;
  306. }
  307. token++;
  308. if (msg_type == XS_WATCH) {
  309. watch = alloc_watch_adapter(path, token);
  310. if (watch == NULL) {
  311. rc = -ENOMEM;
  312. goto out;
  313. }
  314. watch->watch.callback = watch_fired;
  315. watch->dev_data = u;
  316. err = register_xenbus_watch(&watch->watch);
  317. if (err) {
  318. free_watch_adapter(watch);
  319. rc = err;
  320. goto out;
  321. }
  322. list_add(&watch->list, &u->watches);
  323. } else {
  324. list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) {
  325. if (!strcmp(watch->token, token) &&
  326. !strcmp(watch->watch.node, path)) {
  327. unregister_xenbus_watch(&watch->watch);
  328. list_del(&watch->list);
  329. free_watch_adapter(watch);
  330. break;
  331. }
  332. }
  333. }
  334. /* Success. Synthesize a reply to say all is OK. */
  335. {
  336. struct {
  337. struct xsd_sockmsg hdr;
  338. char body[3];
  339. } __packed reply = {
  340. {
  341. .type = msg_type,
  342. .len = sizeof(reply.body)
  343. },
  344. "OK"
  345. };
  346. mutex_lock(&u->reply_mutex);
  347. rc = queue_reply(&u->read_buffers, &reply, sizeof(reply));
  348. mutex_unlock(&u->reply_mutex);
  349. }
  350. out:
  351. return rc;
  352. }
  353. static ssize_t xenbus_file_write(struct file *filp,
  354. const char __user *ubuf,
  355. size_t len, loff_t *ppos)
  356. {
  357. struct xenbus_file_priv *u = filp->private_data;
  358. uint32_t msg_type;
  359. int rc = len;
  360. int ret;
  361. LIST_HEAD(staging_q);
  362. /*
  363. * We're expecting usermode to be writing properly formed
  364. * xenbus messages. If they write an incomplete message we
  365. * buffer it up. Once it is complete, we act on it.
  366. */
  367. /*
  368. * Make sure concurrent writers can't stomp all over each
  369. * other's messages and make a mess of our partial message
  370. * buffer. We don't make any attemppt to stop multiple
  371. * writers from making a mess of each other's incomplete
  372. * messages; we're just trying to guarantee our own internal
  373. * consistency and make sure that single writes are handled
  374. * atomically.
  375. */
  376. mutex_lock(&u->msgbuffer_mutex);
  377. /* Get this out of the way early to avoid confusion */
  378. if (len == 0)
  379. goto out;
  380. /* Can't write a xenbus message larger we can buffer */
  381. if ((len + u->len) > sizeof(u->u.buffer)) {
  382. /* On error, dump existing buffer */
  383. u->len = 0;
  384. rc = -EINVAL;
  385. goto out;
  386. }
  387. ret = copy_from_user(u->u.buffer + u->len, ubuf, len);
  388. if (ret == len) {
  389. rc = -EFAULT;
  390. goto out;
  391. }
  392. /* Deal with a partial copy. */
  393. len -= ret;
  394. rc = len;
  395. u->len += len;
  396. /* Return if we haven't got a full message yet */
  397. if (u->len < sizeof(u->u.msg))
  398. goto out; /* not even the header yet */
  399. /* If we're expecting a message that's larger than we can
  400. possibly send, dump what we have and return an error. */
  401. if ((sizeof(u->u.msg) + u->u.msg.len) > sizeof(u->u.buffer)) {
  402. rc = -E2BIG;
  403. u->len = 0;
  404. goto out;
  405. }
  406. if (u->len < (sizeof(u->u.msg) + u->u.msg.len))
  407. goto out; /* incomplete data portion */
  408. /*
  409. * OK, now we have a complete message. Do something with it.
  410. */
  411. msg_type = u->u.msg.type;
  412. switch (msg_type) {
  413. case XS_TRANSACTION_START:
  414. case XS_TRANSACTION_END:
  415. case XS_DIRECTORY:
  416. case XS_READ:
  417. case XS_GET_PERMS:
  418. case XS_RELEASE:
  419. case XS_GET_DOMAIN_PATH:
  420. case XS_WRITE:
  421. case XS_MKDIR:
  422. case XS_RM:
  423. case XS_SET_PERMS:
  424. /* Send out a transaction */
  425. ret = xenbus_write_transaction(msg_type, u);
  426. break;
  427. case XS_WATCH:
  428. case XS_UNWATCH:
  429. /* (Un)Ask for some path to be watched for changes */
  430. ret = xenbus_write_watch(msg_type, u);
  431. break;
  432. default:
  433. ret = -EINVAL;
  434. break;
  435. }
  436. if (ret != 0)
  437. rc = ret;
  438. /* Buffered message consumed */
  439. u->len = 0;
  440. out:
  441. mutex_unlock(&u->msgbuffer_mutex);
  442. return rc;
  443. }
  444. static int xenbus_file_open(struct inode *inode, struct file *filp)
  445. {
  446. struct xenbus_file_priv *u;
  447. if (xen_store_evtchn == 0)
  448. return -ENOENT;
  449. nonseekable_open(inode, filp);
  450. u = kzalloc(sizeof(*u), GFP_KERNEL);
  451. if (u == NULL)
  452. return -ENOMEM;
  453. INIT_LIST_HEAD(&u->transactions);
  454. INIT_LIST_HEAD(&u->watches);
  455. INIT_LIST_HEAD(&u->read_buffers);
  456. init_waitqueue_head(&u->read_waitq);
  457. mutex_init(&u->reply_mutex);
  458. mutex_init(&u->msgbuffer_mutex);
  459. filp->private_data = u;
  460. return 0;
  461. }
  462. static int xenbus_file_release(struct inode *inode, struct file *filp)
  463. {
  464. struct xenbus_file_priv *u = filp->private_data;
  465. struct xenbus_transaction_holder *trans, *tmp;
  466. struct watch_adapter *watch, *tmp_watch;
  467. /*
  468. * No need for locking here because there are no other users,
  469. * by definition.
  470. */
  471. list_for_each_entry_safe(trans, tmp, &u->transactions, list) {
  472. xenbus_transaction_end(trans->handle, 1);
  473. list_del(&trans->list);
  474. kfree(trans);
  475. }
  476. list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) {
  477. unregister_xenbus_watch(&watch->watch);
  478. list_del(&watch->list);
  479. free_watch_adapter(watch);
  480. }
  481. kfree(u);
  482. return 0;
  483. }
  484. static unsigned int xenbus_file_poll(struct file *file, poll_table *wait)
  485. {
  486. struct xenbus_file_priv *u = file->private_data;
  487. poll_wait(file, &u->read_waitq, wait);
  488. if (!list_empty(&u->read_buffers))
  489. return POLLIN | POLLRDNORM;
  490. return 0;
  491. }
  492. const struct file_operations xenbus_file_ops = {
  493. .read = xenbus_file_read,
  494. .write = xenbus_file_write,
  495. .open = xenbus_file_open,
  496. .release = xenbus_file_release,
  497. .poll = xenbus_file_poll,
  498. .llseek = no_llseek,
  499. };