blkback.c 36 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270
  1. /******************************************************************************
  2. *
  3. * Back-end of the driver for virtual block devices. This portion of the
  4. * driver exports a 'unified' block-device interface that can be accessed
  5. * by any operating system that implements a compatible front end. A
  6. * reference front-end implementation can be found in:
  7. * drivers/block/xen-blkfront.c
  8. *
  9. * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
  10. * Copyright (c) 2005, Christopher Clark
  11. *
  12. * This program is free software; you can redistribute it and/or
  13. * modify it under the terms of the GNU General Public License version 2
  14. * as published by the Free Software Foundation; or, when distributed
  15. * separately from the Linux kernel or incorporated into other
  16. * software packages, subject to the following license:
  17. *
  18. * Permission is hereby granted, free of charge, to any person obtaining a copy
  19. * of this source file (the "Software"), to deal in the Software without
  20. * restriction, including without limitation the rights to use, copy, modify,
  21. * merge, publish, distribute, sublicense, and/or sell copies of the Software,
  22. * and to permit persons to whom the Software is furnished to do so, subject to
  23. * the following conditions:
  24. *
  25. * The above copyright notice and this permission notice shall be included in
  26. * all copies or substantial portions of the Software.
  27. *
  28. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  29. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  30. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  31. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  32. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  33. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  34. * IN THE SOFTWARE.
  35. */
  36. #include <linux/spinlock.h>
  37. #include <linux/kthread.h>
  38. #include <linux/list.h>
  39. #include <linux/delay.h>
  40. #include <linux/freezer.h>
  41. #include <linux/bitmap.h>
  42. #include <xen/events.h>
  43. #include <xen/page.h>
  44. #include <xen/xen.h>
  45. #include <asm/xen/hypervisor.h>
  46. #include <asm/xen/hypercall.h>
  47. #include <xen/balloon.h>
  48. #include "common.h"
  49. /*
  50. * Maximum number of unused free pages to keep in the internal buffer.
  51. * Setting this to a value too low will reduce memory used in each backend,
  52. * but can have a performance penalty.
  53. *
  54. * A sane value is xen_blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST, but can
  55. * be set to a lower value that might degrade performance on some intensive
  56. * IO workloads.
  57. */
  58. static int xen_blkif_max_buffer_pages = 704;
  59. module_param_named(max_buffer_pages, xen_blkif_max_buffer_pages, int, 0644);
  60. MODULE_PARM_DESC(max_buffer_pages,
  61. "Maximum number of free pages to keep in each block backend buffer");
  62. /*
  63. * Maximum number of grants to map persistently in blkback. For maximum
  64. * performance this should be the total numbers of grants that can be used
  65. * to fill the ring, but since this might become too high, specially with
  66. * the use of indirect descriptors, we set it to a value that provides good
  67. * performance without using too much memory.
  68. *
  69. * When the list of persistent grants is full we clean it up using a LRU
  70. * algorithm.
  71. */
  72. static int xen_blkif_max_pgrants = 352;
  73. module_param_named(max_persistent_grants, xen_blkif_max_pgrants, int, 0644);
  74. MODULE_PARM_DESC(max_persistent_grants,
  75. "Maximum number of grants to map persistently");
  76. /*
  77. * The LRU mechanism to clean the lists of persistent grants needs to
  78. * be executed periodically. The time interval between consecutive executions
  79. * of the purge mechanism is set in ms.
  80. */
  81. #define LRU_INTERVAL 100
  82. /*
  83. * When the persistent grants list is full we will remove unused grants
  84. * from the list. The percent number of grants to be removed at each LRU
  85. * execution.
  86. */
  87. #define LRU_PERCENT_CLEAN 5
  88. /* Run-time switchable: /sys/module/blkback/parameters/ */
  89. static unsigned int log_stats;
  90. module_param(log_stats, int, 0644);
  91. #define BLKBACK_INVALID_HANDLE (~0)
  92. /* Number of free pages to remove on each call to free_xenballooned_pages */
  93. #define NUM_BATCH_FREE_PAGES 10
  94. static inline int get_free_page(struct xen_blkif *blkif, struct page **page)
  95. {
  96. unsigned long flags;
  97. spin_lock_irqsave(&blkif->free_pages_lock, flags);
  98. if (list_empty(&blkif->free_pages)) {
  99. BUG_ON(blkif->free_pages_num != 0);
  100. spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
  101. return alloc_xenballooned_pages(1, page, false);
  102. }
  103. BUG_ON(blkif->free_pages_num == 0);
  104. page[0] = list_first_entry(&blkif->free_pages, struct page, lru);
  105. list_del(&page[0]->lru);
  106. blkif->free_pages_num--;
  107. spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
  108. return 0;
  109. }
  110. static inline void put_free_pages(struct xen_blkif *blkif, struct page **page,
  111. int num)
  112. {
  113. unsigned long flags;
  114. int i;
  115. spin_lock_irqsave(&blkif->free_pages_lock, flags);
  116. for (i = 0; i < num; i++)
  117. list_add(&page[i]->lru, &blkif->free_pages);
  118. blkif->free_pages_num += num;
  119. spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
  120. }
  121. static inline void shrink_free_pagepool(struct xen_blkif *blkif, int num)
  122. {
  123. /* Remove requested pages in batches of NUM_BATCH_FREE_PAGES */
  124. struct page *page[NUM_BATCH_FREE_PAGES];
  125. unsigned int num_pages = 0;
  126. unsigned long flags;
  127. spin_lock_irqsave(&blkif->free_pages_lock, flags);
  128. while (blkif->free_pages_num > num) {
  129. BUG_ON(list_empty(&blkif->free_pages));
  130. page[num_pages] = list_first_entry(&blkif->free_pages,
  131. struct page, lru);
  132. list_del(&page[num_pages]->lru);
  133. blkif->free_pages_num--;
  134. if (++num_pages == NUM_BATCH_FREE_PAGES) {
  135. spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
  136. free_xenballooned_pages(num_pages, page);
  137. spin_lock_irqsave(&blkif->free_pages_lock, flags);
  138. num_pages = 0;
  139. }
  140. }
  141. spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
  142. if (num_pages != 0)
  143. free_xenballooned_pages(num_pages, page);
  144. }
  145. #define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page)))
  146. #define pending_handle(_req, _seg) \
  147. (_req->grant_handles[_seg])
  148. static int do_block_io_op(struct xen_blkif *blkif);
  149. static int dispatch_rw_block_io(struct xen_blkif *blkif,
  150. struct blkif_request *req,
  151. struct pending_req *pending_req);
  152. static void make_response(struct xen_blkif *blkif, u64 id,
  153. unsigned short op, int st);
  154. #define foreach_grant_safe(pos, n, rbtree, node) \
  155. for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \
  156. (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL; \
  157. &(pos)->node != NULL; \
  158. (pos) = container_of(n, typeof(*(pos)), node), \
  159. (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL)
  160. /*
  161. * We don't need locking around the persistent grant helpers
  162. * because blkback uses a single-thread for each backed, so we
  163. * can be sure that this functions will never be called recursively.
  164. *
  165. * The only exception to that is put_persistent_grant, that can be called
  166. * from interrupt context (by xen_blkbk_unmap), so we have to use atomic
  167. * bit operations to modify the flags of a persistent grant and to count
  168. * the number of used grants.
  169. */
  170. static int add_persistent_gnt(struct xen_blkif *blkif,
  171. struct persistent_gnt *persistent_gnt)
  172. {
  173. struct rb_node **new = NULL, *parent = NULL;
  174. struct persistent_gnt *this;
  175. if (blkif->persistent_gnt_c >= xen_blkif_max_pgrants) {
  176. if (!blkif->vbd.overflow_max_grants)
  177. blkif->vbd.overflow_max_grants = 1;
  178. return -EBUSY;
  179. }
  180. /* Figure out where to put new node */
  181. new = &blkif->persistent_gnts.rb_node;
  182. while (*new) {
  183. this = container_of(*new, struct persistent_gnt, node);
  184. parent = *new;
  185. if (persistent_gnt->gnt < this->gnt)
  186. new = &((*new)->rb_left);
  187. else if (persistent_gnt->gnt > this->gnt)
  188. new = &((*new)->rb_right);
  189. else {
  190. pr_alert_ratelimited(DRV_PFX " trying to add a gref that's already in the tree\n");
  191. return -EINVAL;
  192. }
  193. }
  194. bitmap_zero(persistent_gnt->flags, PERSISTENT_GNT_FLAGS_SIZE);
  195. set_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags);
  196. /* Add new node and rebalance tree. */
  197. rb_link_node(&(persistent_gnt->node), parent, new);
  198. rb_insert_color(&(persistent_gnt->node), &blkif->persistent_gnts);
  199. blkif->persistent_gnt_c++;
  200. atomic_inc(&blkif->persistent_gnt_in_use);
  201. return 0;
  202. }
  203. static struct persistent_gnt *get_persistent_gnt(struct xen_blkif *blkif,
  204. grant_ref_t gref)
  205. {
  206. struct persistent_gnt *data;
  207. struct rb_node *node = NULL;
  208. node = blkif->persistent_gnts.rb_node;
  209. while (node) {
  210. data = container_of(node, struct persistent_gnt, node);
  211. if (gref < data->gnt)
  212. node = node->rb_left;
  213. else if (gref > data->gnt)
  214. node = node->rb_right;
  215. else {
  216. if(test_bit(PERSISTENT_GNT_ACTIVE, data->flags)) {
  217. pr_alert_ratelimited(DRV_PFX " requesting a grant already in use\n");
  218. return NULL;
  219. }
  220. set_bit(PERSISTENT_GNT_ACTIVE, data->flags);
  221. atomic_inc(&blkif->persistent_gnt_in_use);
  222. return data;
  223. }
  224. }
  225. return NULL;
  226. }
  227. static void put_persistent_gnt(struct xen_blkif *blkif,
  228. struct persistent_gnt *persistent_gnt)
  229. {
  230. if(!test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags))
  231. pr_alert_ratelimited(DRV_PFX " freeing a grant already unused");
  232. set_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags);
  233. clear_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags);
  234. atomic_dec(&blkif->persistent_gnt_in_use);
  235. }
  236. static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
  237. unsigned int num)
  238. {
  239. struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
  240. struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
  241. struct persistent_gnt *persistent_gnt;
  242. struct rb_node *n;
  243. int ret = 0;
  244. int segs_to_unmap = 0;
  245. foreach_grant_safe(persistent_gnt, n, root, node) {
  246. BUG_ON(persistent_gnt->handle ==
  247. BLKBACK_INVALID_HANDLE);
  248. gnttab_set_unmap_op(&unmap[segs_to_unmap],
  249. (unsigned long) pfn_to_kaddr(page_to_pfn(
  250. persistent_gnt->page)),
  251. GNTMAP_host_map,
  252. persistent_gnt->handle);
  253. pages[segs_to_unmap] = persistent_gnt->page;
  254. if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST ||
  255. !rb_next(&persistent_gnt->node)) {
  256. ret = gnttab_unmap_refs(unmap, NULL, pages,
  257. segs_to_unmap);
  258. BUG_ON(ret);
  259. put_free_pages(blkif, pages, segs_to_unmap);
  260. segs_to_unmap = 0;
  261. }
  262. rb_erase(&persistent_gnt->node, root);
  263. kfree(persistent_gnt);
  264. num--;
  265. }
  266. BUG_ON(num != 0);
  267. }
  268. static void unmap_purged_grants(struct work_struct *work)
  269. {
  270. struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
  271. struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
  272. struct persistent_gnt *persistent_gnt;
  273. int ret, segs_to_unmap = 0;
  274. struct xen_blkif *blkif = container_of(work, typeof(*blkif), persistent_purge_work);
  275. while(!list_empty(&blkif->persistent_purge_list)) {
  276. persistent_gnt = list_first_entry(&blkif->persistent_purge_list,
  277. struct persistent_gnt,
  278. remove_node);
  279. list_del(&persistent_gnt->remove_node);
  280. gnttab_set_unmap_op(&unmap[segs_to_unmap],
  281. vaddr(persistent_gnt->page),
  282. GNTMAP_host_map,
  283. persistent_gnt->handle);
  284. pages[segs_to_unmap] = persistent_gnt->page;
  285. if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
  286. ret = gnttab_unmap_refs(unmap, NULL, pages,
  287. segs_to_unmap);
  288. BUG_ON(ret);
  289. put_free_pages(blkif, pages, segs_to_unmap);
  290. segs_to_unmap = 0;
  291. }
  292. kfree(persistent_gnt);
  293. }
  294. if (segs_to_unmap > 0) {
  295. ret = gnttab_unmap_refs(unmap, NULL, pages, segs_to_unmap);
  296. BUG_ON(ret);
  297. put_free_pages(blkif, pages, segs_to_unmap);
  298. }
  299. }
  300. static void purge_persistent_gnt(struct xen_blkif *blkif)
  301. {
  302. struct persistent_gnt *persistent_gnt;
  303. struct rb_node *n;
  304. unsigned int num_clean, total;
  305. bool scan_used = false;
  306. struct rb_root *root;
  307. if (blkif->persistent_gnt_c < xen_blkif_max_pgrants ||
  308. (blkif->persistent_gnt_c == xen_blkif_max_pgrants &&
  309. !blkif->vbd.overflow_max_grants)) {
  310. return;
  311. }
  312. if (work_pending(&blkif->persistent_purge_work)) {
  313. pr_alert_ratelimited(DRV_PFX "Scheduled work from previous purge is still pending, cannot purge list\n");
  314. return;
  315. }
  316. num_clean = (xen_blkif_max_pgrants / 100) * LRU_PERCENT_CLEAN;
  317. num_clean = blkif->persistent_gnt_c - xen_blkif_max_pgrants + num_clean;
  318. num_clean = min(blkif->persistent_gnt_c, num_clean);
  319. if (num_clean >
  320. (blkif->persistent_gnt_c -
  321. atomic_read(&blkif->persistent_gnt_in_use)))
  322. return;
  323. /*
  324. * At this point, we can assure that there will be no calls
  325. * to get_persistent_grant (because we are executing this code from
  326. * xen_blkif_schedule), there can only be calls to put_persistent_gnt,
  327. * which means that the number of currently used grants will go down,
  328. * but never up, so we will always be able to remove the requested
  329. * number of grants.
  330. */
  331. total = num_clean;
  332. pr_debug(DRV_PFX "Going to purge %u persistent grants\n", num_clean);
  333. INIT_LIST_HEAD(&blkif->persistent_purge_list);
  334. root = &blkif->persistent_gnts;
  335. purge_list:
  336. foreach_grant_safe(persistent_gnt, n, root, node) {
  337. BUG_ON(persistent_gnt->handle ==
  338. BLKBACK_INVALID_HANDLE);
  339. if (test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags))
  340. continue;
  341. if (!scan_used &&
  342. (test_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags)))
  343. continue;
  344. rb_erase(&persistent_gnt->node, root);
  345. list_add(&persistent_gnt->remove_node,
  346. &blkif->persistent_purge_list);
  347. if (--num_clean == 0)
  348. goto finished;
  349. }
  350. /*
  351. * If we get here it means we also need to start cleaning
  352. * grants that were used since last purge in order to cope
  353. * with the requested num
  354. */
  355. if (!scan_used) {
  356. pr_debug(DRV_PFX "Still missing %u purged frames\n", num_clean);
  357. scan_used = true;
  358. goto purge_list;
  359. }
  360. finished:
  361. /* Remove the "used" flag from all the persistent grants */
  362. foreach_grant_safe(persistent_gnt, n, root, node) {
  363. BUG_ON(persistent_gnt->handle ==
  364. BLKBACK_INVALID_HANDLE);
  365. clear_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags);
  366. }
  367. blkif->persistent_gnt_c -= (total - num_clean);
  368. blkif->vbd.overflow_max_grants = 0;
  369. /* We can defer this work */
  370. INIT_WORK(&blkif->persistent_purge_work, unmap_purged_grants);
  371. schedule_work(&blkif->persistent_purge_work);
  372. pr_debug(DRV_PFX "Purged %u/%u\n", (total - num_clean), total);
  373. return;
  374. }
  375. /*
  376. * Retrieve from the 'pending_reqs' a free pending_req structure to be used.
  377. */
  378. static struct pending_req *alloc_req(struct xen_blkif *blkif)
  379. {
  380. struct pending_req *req = NULL;
  381. unsigned long flags;
  382. spin_lock_irqsave(&blkif->pending_free_lock, flags);
  383. if (!list_empty(&blkif->pending_free)) {
  384. req = list_entry(blkif->pending_free.next, struct pending_req,
  385. free_list);
  386. list_del(&req->free_list);
  387. }
  388. spin_unlock_irqrestore(&blkif->pending_free_lock, flags);
  389. return req;
  390. }
  391. /*
  392. * Return the 'pending_req' structure back to the freepool. We also
  393. * wake up the thread if it was waiting for a free page.
  394. */
  395. static void free_req(struct xen_blkif *blkif, struct pending_req *req)
  396. {
  397. unsigned long flags;
  398. int was_empty;
  399. spin_lock_irqsave(&blkif->pending_free_lock, flags);
  400. was_empty = list_empty(&blkif->pending_free);
  401. list_add(&req->free_list, &blkif->pending_free);
  402. spin_unlock_irqrestore(&blkif->pending_free_lock, flags);
  403. if (was_empty)
  404. wake_up(&blkif->pending_free_wq);
  405. }
  406. /*
  407. * Routines for managing virtual block devices (vbds).
  408. */
  409. static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif,
  410. int operation)
  411. {
  412. struct xen_vbd *vbd = &blkif->vbd;
  413. int rc = -EACCES;
  414. if ((operation != READ) && vbd->readonly)
  415. goto out;
  416. if (likely(req->nr_sects)) {
  417. blkif_sector_t end = req->sector_number + req->nr_sects;
  418. if (unlikely(end < req->sector_number))
  419. goto out;
  420. if (unlikely(end > vbd_sz(vbd)))
  421. goto out;
  422. }
  423. req->dev = vbd->pdevice;
  424. req->bdev = vbd->bdev;
  425. rc = 0;
  426. out:
  427. return rc;
  428. }
  429. static void xen_vbd_resize(struct xen_blkif *blkif)
  430. {
  431. struct xen_vbd *vbd = &blkif->vbd;
  432. struct xenbus_transaction xbt;
  433. int err;
  434. struct xenbus_device *dev = xen_blkbk_xenbus(blkif->be);
  435. unsigned long long new_size = vbd_sz(vbd);
  436. pr_info(DRV_PFX "VBD Resize: Domid: %d, Device: (%d, %d)\n",
  437. blkif->domid, MAJOR(vbd->pdevice), MINOR(vbd->pdevice));
  438. pr_info(DRV_PFX "VBD Resize: new size %llu\n", new_size);
  439. vbd->size = new_size;
  440. again:
  441. err = xenbus_transaction_start(&xbt);
  442. if (err) {
  443. pr_warn(DRV_PFX "Error starting transaction");
  444. return;
  445. }
  446. err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
  447. (unsigned long long)vbd_sz(vbd));
  448. if (err) {
  449. pr_warn(DRV_PFX "Error writing new size");
  450. goto abort;
  451. }
  452. /*
  453. * Write the current state; we will use this to synchronize
  454. * the front-end. If the current state is "connected" the
  455. * front-end will get the new size information online.
  456. */
  457. err = xenbus_printf(xbt, dev->nodename, "state", "%d", dev->state);
  458. if (err) {
  459. pr_warn(DRV_PFX "Error writing the state");
  460. goto abort;
  461. }
  462. err = xenbus_transaction_end(xbt, 0);
  463. if (err == -EAGAIN)
  464. goto again;
  465. if (err)
  466. pr_warn(DRV_PFX "Error ending transaction");
  467. return;
  468. abort:
  469. xenbus_transaction_end(xbt, 1);
  470. }
  471. /*
  472. * Notification from the guest OS.
  473. */
  474. static void blkif_notify_work(struct xen_blkif *blkif)
  475. {
  476. blkif->waiting_reqs = 1;
  477. wake_up(&blkif->wq);
  478. }
  479. irqreturn_t xen_blkif_be_int(int irq, void *dev_id)
  480. {
  481. blkif_notify_work(dev_id);
  482. return IRQ_HANDLED;
  483. }
  484. /*
  485. * SCHEDULER FUNCTIONS
  486. */
  487. static void print_stats(struct xen_blkif *blkif)
  488. {
  489. pr_info("xen-blkback (%s): oo %3llu | rd %4llu | wr %4llu | f %4llu"
  490. " | ds %4llu | pg: %4u/%4d\n",
  491. current->comm, blkif->st_oo_req,
  492. blkif->st_rd_req, blkif->st_wr_req,
  493. blkif->st_f_req, blkif->st_ds_req,
  494. blkif->persistent_gnt_c,
  495. xen_blkif_max_pgrants);
  496. blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000);
  497. blkif->st_rd_req = 0;
  498. blkif->st_wr_req = 0;
  499. blkif->st_oo_req = 0;
  500. blkif->st_ds_req = 0;
  501. }
  502. int xen_blkif_schedule(void *arg)
  503. {
  504. struct xen_blkif *blkif = arg;
  505. struct xen_vbd *vbd = &blkif->vbd;
  506. unsigned long timeout;
  507. xen_blkif_get(blkif);
  508. while (!kthread_should_stop()) {
  509. if (try_to_freeze())
  510. continue;
  511. if (unlikely(vbd->size != vbd_sz(vbd)))
  512. xen_vbd_resize(blkif);
  513. timeout = msecs_to_jiffies(LRU_INTERVAL);
  514. timeout = wait_event_interruptible_timeout(
  515. blkif->wq,
  516. blkif->waiting_reqs || kthread_should_stop(),
  517. timeout);
  518. if (timeout == 0)
  519. goto purge_gnt_list;
  520. timeout = wait_event_interruptible_timeout(
  521. blkif->pending_free_wq,
  522. !list_empty(&blkif->pending_free) ||
  523. kthread_should_stop(),
  524. timeout);
  525. if (timeout == 0)
  526. goto purge_gnt_list;
  527. blkif->waiting_reqs = 0;
  528. smp_mb(); /* clear flag *before* checking for work */
  529. if (do_block_io_op(blkif))
  530. blkif->waiting_reqs = 1;
  531. purge_gnt_list:
  532. if (blkif->vbd.feature_gnt_persistent &&
  533. time_after(jiffies, blkif->next_lru)) {
  534. purge_persistent_gnt(blkif);
  535. blkif->next_lru = jiffies + msecs_to_jiffies(LRU_INTERVAL);
  536. }
  537. /* Shrink if we have more than xen_blkif_max_buffer_pages */
  538. shrink_free_pagepool(blkif, xen_blkif_max_buffer_pages);
  539. if (log_stats && time_after(jiffies, blkif->st_print))
  540. print_stats(blkif);
  541. }
  542. /* Since we are shutting down remove all pages from the buffer */
  543. shrink_free_pagepool(blkif, 0 /* All */);
  544. /* Free all persistent grant pages */
  545. if (!RB_EMPTY_ROOT(&blkif->persistent_gnts))
  546. free_persistent_gnts(blkif, &blkif->persistent_gnts,
  547. blkif->persistent_gnt_c);
  548. BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts));
  549. blkif->persistent_gnt_c = 0;
  550. if (log_stats)
  551. print_stats(blkif);
  552. blkif->xenblkd = NULL;
  553. xen_blkif_put(blkif);
  554. return 0;
  555. }
  556. struct seg_buf {
  557. unsigned int offset;
  558. unsigned int nsec;
  559. };
  560. /*
  561. * Unmap the grant references, and also remove the M2P over-rides
  562. * used in the 'pending_req'.
  563. */
  564. static void xen_blkbk_unmap(struct pending_req *req)
  565. {
  566. struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
  567. struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
  568. unsigned int i, invcount = 0;
  569. grant_handle_t handle;
  570. struct xen_blkif *blkif = req->blkif;
  571. int ret;
  572. for (i = 0; i < req->nr_pages; i++) {
  573. if (req->persistent_gnts[i] != NULL) {
  574. put_persistent_gnt(blkif, req->persistent_gnts[i]);
  575. continue;
  576. }
  577. handle = pending_handle(req, i);
  578. pages[invcount] = req->pages[i];
  579. if (handle == BLKBACK_INVALID_HANDLE)
  580. continue;
  581. gnttab_set_unmap_op(&unmap[invcount], vaddr(pages[invcount]),
  582. GNTMAP_host_map, handle);
  583. pending_handle(req, i) = BLKBACK_INVALID_HANDLE;
  584. invcount++;
  585. }
  586. ret = gnttab_unmap_refs(unmap, NULL, pages, invcount);
  587. BUG_ON(ret);
  588. put_free_pages(blkif, pages, invcount);
  589. }
  590. static int xen_blkbk_map(struct blkif_request *req,
  591. struct pending_req *pending_req,
  592. struct seg_buf seg[],
  593. struct page *pages[])
  594. {
  595. struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
  596. struct page *pages_to_gnt[BLKIF_MAX_SEGMENTS_PER_REQUEST];
  597. struct persistent_gnt **persistent_gnts = pending_req->persistent_gnts;
  598. struct persistent_gnt *persistent_gnt = NULL;
  599. struct xen_blkif *blkif = pending_req->blkif;
  600. phys_addr_t addr = 0;
  601. int i, seg_idx, new_map_idx;
  602. int nseg = req->u.rw.nr_segments;
  603. int segs_to_map = 0;
  604. int ret = 0;
  605. int use_persistent_gnts;
  606. use_persistent_gnts = (blkif->vbd.feature_gnt_persistent);
  607. /*
  608. * Fill out preq.nr_sects with proper amount of sectors, and setup
  609. * assign map[..] with the PFN of the page in our domain with the
  610. * corresponding grant reference for each page.
  611. */
  612. for (i = 0; i < nseg; i++) {
  613. uint32_t flags;
  614. if (use_persistent_gnts)
  615. persistent_gnt = get_persistent_gnt(
  616. blkif,
  617. req->u.rw.seg[i].gref);
  618. if (persistent_gnt) {
  619. /*
  620. * We are using persistent grants and
  621. * the grant is already mapped
  622. */
  623. pages[i] = persistent_gnt->page;
  624. persistent_gnts[i] = persistent_gnt;
  625. } else {
  626. if (get_free_page(blkif, &pages[i]))
  627. goto out_of_memory;
  628. addr = vaddr(pages[i]);
  629. pages_to_gnt[segs_to_map] = pages[i];
  630. persistent_gnts[i] = NULL;
  631. flags = GNTMAP_host_map;
  632. if (!use_persistent_gnts &&
  633. (pending_req->operation != BLKIF_OP_READ))
  634. flags |= GNTMAP_readonly;
  635. gnttab_set_map_op(&map[segs_to_map++], addr,
  636. flags, req->u.rw.seg[i].gref,
  637. blkif->domid);
  638. }
  639. }
  640. if (segs_to_map) {
  641. ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map);
  642. BUG_ON(ret);
  643. }
  644. /*
  645. * Now swizzle the MFN in our domain with the MFN from the other domain
  646. * so that when we access vaddr(pending_req,i) it has the contents of
  647. * the page from the other domain.
  648. */
  649. for (seg_idx = 0, new_map_idx = 0; seg_idx < nseg; seg_idx++) {
  650. if (!persistent_gnts[seg_idx]) {
  651. /* This is a newly mapped grant */
  652. BUG_ON(new_map_idx >= segs_to_map);
  653. if (unlikely(map[new_map_idx].status != 0)) {
  654. pr_debug(DRV_PFX "invalid buffer -- could not remap it\n");
  655. pending_handle(pending_req, seg_idx) = BLKBACK_INVALID_HANDLE;
  656. ret |= 1;
  657. new_map_idx++;
  658. /*
  659. * No need to set unmap_seg bit, since
  660. * we can not unmap this grant because
  661. * the handle is invalid.
  662. */
  663. continue;
  664. }
  665. pending_handle(pending_req, seg_idx) = map[new_map_idx].handle;
  666. } else {
  667. /* This grant is persistent and already mapped */
  668. goto next;
  669. }
  670. if (use_persistent_gnts &&
  671. blkif->persistent_gnt_c < xen_blkif_max_pgrants) {
  672. /*
  673. * We are using persistent grants, the grant is
  674. * not mapped but we might have room for it.
  675. */
  676. persistent_gnt = kmalloc(sizeof(struct persistent_gnt),
  677. GFP_KERNEL);
  678. if (!persistent_gnt) {
  679. /*
  680. * If we don't have enough memory to
  681. * allocate the persistent_gnt struct
  682. * map this grant non-persistenly
  683. */
  684. goto next_unmap;
  685. }
  686. persistent_gnt->gnt = map[new_map_idx].ref;
  687. persistent_gnt->handle = map[new_map_idx].handle;
  688. persistent_gnt->page = pages[seg_idx];
  689. if (add_persistent_gnt(blkif,
  690. persistent_gnt)) {
  691. kfree(persistent_gnt);
  692. persistent_gnt = NULL;
  693. goto next_unmap;
  694. }
  695. persistent_gnts[seg_idx] = persistent_gnt;
  696. pr_debug(DRV_PFX " grant %u added to the tree of persistent grants, using %u/%u\n",
  697. persistent_gnt->gnt, blkif->persistent_gnt_c,
  698. xen_blkif_max_pgrants);
  699. new_map_idx++;
  700. goto next;
  701. }
  702. if (use_persistent_gnts && !blkif->vbd.overflow_max_grants) {
  703. blkif->vbd.overflow_max_grants = 1;
  704. pr_debug(DRV_PFX " domain %u, device %#x is using maximum number of persistent grants\n",
  705. blkif->domid, blkif->vbd.handle);
  706. }
  707. next_unmap:
  708. /*
  709. * We could not map this grant persistently, so use it as
  710. * a non-persistent grant.
  711. */
  712. new_map_idx++;
  713. next:
  714. seg[seg_idx].offset = (req->u.rw.seg[seg_idx].first_sect << 9);
  715. }
  716. return ret;
  717. out_of_memory:
  718. pr_alert(DRV_PFX "%s: out of memory\n", __func__);
  719. put_free_pages(blkif, pages_to_gnt, segs_to_map);
  720. return -ENOMEM;
  721. }
  722. static int dispatch_discard_io(struct xen_blkif *blkif,
  723. struct blkif_request *req)
  724. {
  725. int err = 0;
  726. int status = BLKIF_RSP_OKAY;
  727. struct block_device *bdev = blkif->vbd.bdev;
  728. unsigned long secure;
  729. blkif->st_ds_req++;
  730. xen_blkif_get(blkif);
  731. secure = (blkif->vbd.discard_secure &&
  732. (req->u.discard.flag & BLKIF_DISCARD_SECURE)) ?
  733. BLKDEV_DISCARD_SECURE : 0;
  734. err = blkdev_issue_discard(bdev, req->u.discard.sector_number,
  735. req->u.discard.nr_sectors,
  736. GFP_KERNEL, secure);
  737. if (err == -EOPNOTSUPP) {
  738. pr_debug(DRV_PFX "discard op failed, not supported\n");
  739. status = BLKIF_RSP_EOPNOTSUPP;
  740. } else if (err)
  741. status = BLKIF_RSP_ERROR;
  742. make_response(blkif, req->u.discard.id, req->operation, status);
  743. xen_blkif_put(blkif);
  744. return err;
  745. }
  746. static int dispatch_other_io(struct xen_blkif *blkif,
  747. struct blkif_request *req,
  748. struct pending_req *pending_req)
  749. {
  750. free_req(blkif, pending_req);
  751. make_response(blkif, req->u.other.id, req->operation,
  752. BLKIF_RSP_EOPNOTSUPP);
  753. return -EIO;
  754. }
  755. static void xen_blk_drain_io(struct xen_blkif *blkif)
  756. {
  757. atomic_set(&blkif->drain, 1);
  758. do {
  759. /* The initial value is one, and one refcnt taken at the
  760. * start of the xen_blkif_schedule thread. */
  761. if (atomic_read(&blkif->refcnt) <= 2)
  762. break;
  763. wait_for_completion_interruptible_timeout(
  764. &blkif->drain_complete, HZ);
  765. if (!atomic_read(&blkif->drain))
  766. break;
  767. } while (!kthread_should_stop());
  768. atomic_set(&blkif->drain, 0);
  769. }
  770. /*
  771. * Completion callback on the bio's. Called as bh->b_end_io()
  772. */
  773. static void __end_block_io_op(struct pending_req *pending_req, int error)
  774. {
  775. /* An error fails the entire request. */
  776. if ((pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE) &&
  777. (error == -EOPNOTSUPP)) {
  778. pr_debug(DRV_PFX "flush diskcache op failed, not supported\n");
  779. xen_blkbk_flush_diskcache(XBT_NIL, pending_req->blkif->be, 0);
  780. pending_req->status = BLKIF_RSP_EOPNOTSUPP;
  781. } else if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) &&
  782. (error == -EOPNOTSUPP)) {
  783. pr_debug(DRV_PFX "write barrier op failed, not supported\n");
  784. xen_blkbk_barrier(XBT_NIL, pending_req->blkif->be, 0);
  785. pending_req->status = BLKIF_RSP_EOPNOTSUPP;
  786. } else if (error) {
  787. pr_debug(DRV_PFX "Buffer not up-to-date at end of operation,"
  788. " error=%d\n", error);
  789. pending_req->status = BLKIF_RSP_ERROR;
  790. }
  791. /*
  792. * If all of the bio's have completed it is time to unmap
  793. * the grant references associated with 'request' and provide
  794. * the proper response on the ring.
  795. */
  796. if (atomic_dec_and_test(&pending_req->pendcnt)) {
  797. xen_blkbk_unmap(pending_req);
  798. make_response(pending_req->blkif, pending_req->id,
  799. pending_req->operation, pending_req->status);
  800. xen_blkif_put(pending_req->blkif);
  801. if (atomic_read(&pending_req->blkif->refcnt) <= 2) {
  802. if (atomic_read(&pending_req->blkif->drain))
  803. complete(&pending_req->blkif->drain_complete);
  804. }
  805. free_req(pending_req->blkif, pending_req);
  806. }
  807. }
  808. /*
  809. * bio callback.
  810. */
  811. static void end_block_io_op(struct bio *bio, int error)
  812. {
  813. __end_block_io_op(bio->bi_private, error);
  814. bio_put(bio);
  815. }
  816. /*
  817. * Function to copy the from the ring buffer the 'struct blkif_request'
  818. * (which has the sectors we want, number of them, grant references, etc),
  819. * and transmute it to the block API to hand it over to the proper block disk.
  820. */
  821. static int
  822. __do_block_io_op(struct xen_blkif *blkif)
  823. {
  824. union blkif_back_rings *blk_rings = &blkif->blk_rings;
  825. struct blkif_request req;
  826. struct pending_req *pending_req;
  827. RING_IDX rc, rp;
  828. int more_to_do = 0;
  829. rc = blk_rings->common.req_cons;
  830. rp = blk_rings->common.sring->req_prod;
  831. rmb(); /* Ensure we see queued requests up to 'rp'. */
  832. while (rc != rp) {
  833. if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
  834. break;
  835. if (kthread_should_stop()) {
  836. more_to_do = 1;
  837. break;
  838. }
  839. pending_req = alloc_req(blkif);
  840. if (NULL == pending_req) {
  841. blkif->st_oo_req++;
  842. more_to_do = 1;
  843. break;
  844. }
  845. switch (blkif->blk_protocol) {
  846. case BLKIF_PROTOCOL_NATIVE:
  847. memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req));
  848. break;
  849. case BLKIF_PROTOCOL_X86_32:
  850. blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
  851. break;
  852. case BLKIF_PROTOCOL_X86_64:
  853. blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
  854. break;
  855. default:
  856. BUG();
  857. }
  858. blk_rings->common.req_cons = ++rc; /* before make_response() */
  859. /* Apply all sanity checks to /private copy/ of request. */
  860. barrier();
  861. switch (req.operation) {
  862. case BLKIF_OP_READ:
  863. case BLKIF_OP_WRITE:
  864. case BLKIF_OP_WRITE_BARRIER:
  865. case BLKIF_OP_FLUSH_DISKCACHE:
  866. if (dispatch_rw_block_io(blkif, &req, pending_req))
  867. goto done;
  868. break;
  869. case BLKIF_OP_DISCARD:
  870. free_req(blkif, pending_req);
  871. if (dispatch_discard_io(blkif, &req))
  872. goto done;
  873. break;
  874. default:
  875. if (dispatch_other_io(blkif, &req, pending_req))
  876. goto done;
  877. break;
  878. }
  879. /* Yield point for this unbounded loop. */
  880. cond_resched();
  881. }
  882. done:
  883. return more_to_do;
  884. }
  885. static int
  886. do_block_io_op(struct xen_blkif *blkif)
  887. {
  888. union blkif_back_rings *blk_rings = &blkif->blk_rings;
  889. int more_to_do;
  890. do {
  891. more_to_do = __do_block_io_op(blkif);
  892. if (more_to_do)
  893. break;
  894. RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
  895. } while (more_to_do);
  896. return more_to_do;
  897. }
  898. /*
  899. * Transmutation of the 'struct blkif_request' to a proper 'struct bio'
  900. * and call the 'submit_bio' to pass it to the underlying storage.
  901. */
  902. static int dispatch_rw_block_io(struct xen_blkif *blkif,
  903. struct blkif_request *req,
  904. struct pending_req *pending_req)
  905. {
  906. struct phys_req preq;
  907. struct seg_buf seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
  908. unsigned int nseg;
  909. struct bio *bio = NULL;
  910. struct bio *biolist[BLKIF_MAX_SEGMENTS_PER_REQUEST];
  911. int i, nbio = 0;
  912. int operation;
  913. struct blk_plug plug;
  914. bool drain = false;
  915. struct page **pages = pending_req->pages;
  916. switch (req->operation) {
  917. case BLKIF_OP_READ:
  918. blkif->st_rd_req++;
  919. operation = READ;
  920. break;
  921. case BLKIF_OP_WRITE:
  922. blkif->st_wr_req++;
  923. operation = WRITE_ODIRECT;
  924. break;
  925. case BLKIF_OP_WRITE_BARRIER:
  926. drain = true;
  927. case BLKIF_OP_FLUSH_DISKCACHE:
  928. blkif->st_f_req++;
  929. operation = WRITE_FLUSH;
  930. break;
  931. default:
  932. operation = 0; /* make gcc happy */
  933. goto fail_response;
  934. break;
  935. }
  936. /* Check that the number of segments is sane. */
  937. nseg = req->u.rw.nr_segments;
  938. if (unlikely(nseg == 0 && operation != WRITE_FLUSH) ||
  939. unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
  940. pr_debug(DRV_PFX "Bad number of segments in request (%d)\n",
  941. nseg);
  942. /* Haven't submitted any bio's yet. */
  943. goto fail_response;
  944. }
  945. preq.sector_number = req->u.rw.sector_number;
  946. preq.nr_sects = 0;
  947. pending_req->blkif = blkif;
  948. pending_req->id = req->u.rw.id;
  949. pending_req->operation = req->operation;
  950. pending_req->status = BLKIF_RSP_OKAY;
  951. pending_req->nr_pages = nseg;
  952. for (i = 0; i < nseg; i++) {
  953. seg[i].nsec = req->u.rw.seg[i].last_sect -
  954. req->u.rw.seg[i].first_sect + 1;
  955. if ((req->u.rw.seg[i].last_sect >= (PAGE_SIZE >> 9)) ||
  956. (req->u.rw.seg[i].last_sect < req->u.rw.seg[i].first_sect))
  957. goto fail_response;
  958. preq.nr_sects += seg[i].nsec;
  959. }
  960. if (xen_vbd_translate(&preq, blkif, operation) != 0) {
  961. pr_debug(DRV_PFX "access denied: %s of [%llu,%llu] on dev=%04x\n",
  962. operation == READ ? "read" : "write",
  963. preq.sector_number,
  964. preq.sector_number + preq.nr_sects,
  965. blkif->vbd.pdevice);
  966. goto fail_response;
  967. }
  968. /*
  969. * This check _MUST_ be done after xen_vbd_translate as the preq.bdev
  970. * is set there.
  971. */
  972. for (i = 0; i < nseg; i++) {
  973. if (((int)preq.sector_number|(int)seg[i].nsec) &
  974. ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) {
  975. pr_debug(DRV_PFX "Misaligned I/O request from domain %d",
  976. blkif->domid);
  977. goto fail_response;
  978. }
  979. }
  980. /* Wait on all outstanding I/O's and once that has been completed
  981. * issue the WRITE_FLUSH.
  982. */
  983. if (drain)
  984. xen_blk_drain_io(pending_req->blkif);
  985. /*
  986. * If we have failed at this point, we need to undo the M2P override,
  987. * set gnttab_set_unmap_op on all of the grant references and perform
  988. * the hypercall to unmap the grants - that is all done in
  989. * xen_blkbk_unmap.
  990. */
  991. if (xen_blkbk_map(req, pending_req, seg, pages))
  992. goto fail_flush;
  993. /*
  994. * This corresponding xen_blkif_put is done in __end_block_io_op, or
  995. * below (in "!bio") if we are handling a BLKIF_OP_DISCARD.
  996. */
  997. xen_blkif_get(blkif);
  998. for (i = 0; i < nseg; i++) {
  999. while ((bio == NULL) ||
  1000. (bio_add_page(bio,
  1001. pages[i],
  1002. seg[i].nsec << 9,
  1003. seg[i].offset) == 0)) {
  1004. bio = bio_alloc(GFP_KERNEL, nseg-i);
  1005. if (unlikely(bio == NULL))
  1006. goto fail_put_bio;
  1007. biolist[nbio++] = bio;
  1008. bio->bi_bdev = preq.bdev;
  1009. bio->bi_private = pending_req;
  1010. bio->bi_end_io = end_block_io_op;
  1011. bio->bi_sector = preq.sector_number;
  1012. }
  1013. preq.sector_number += seg[i].nsec;
  1014. }
  1015. /* This will be hit if the operation was a flush or discard. */
  1016. if (!bio) {
  1017. BUG_ON(operation != WRITE_FLUSH);
  1018. bio = bio_alloc(GFP_KERNEL, 0);
  1019. if (unlikely(bio == NULL))
  1020. goto fail_put_bio;
  1021. biolist[nbio++] = bio;
  1022. bio->bi_bdev = preq.bdev;
  1023. bio->bi_private = pending_req;
  1024. bio->bi_end_io = end_block_io_op;
  1025. }
  1026. atomic_set(&pending_req->pendcnt, nbio);
  1027. blk_start_plug(&plug);
  1028. for (i = 0; i < nbio; i++)
  1029. submit_bio(operation, biolist[i]);
  1030. /* Let the I/Os go.. */
  1031. blk_finish_plug(&plug);
  1032. if (operation == READ)
  1033. blkif->st_rd_sect += preq.nr_sects;
  1034. else if (operation & WRITE)
  1035. blkif->st_wr_sect += preq.nr_sects;
  1036. return 0;
  1037. fail_flush:
  1038. xen_blkbk_unmap(pending_req);
  1039. fail_response:
  1040. /* Haven't submitted any bio's yet. */
  1041. make_response(blkif, req->u.rw.id, req->operation, BLKIF_RSP_ERROR);
  1042. free_req(blkif, pending_req);
  1043. msleep(1); /* back off a bit */
  1044. return -EIO;
  1045. fail_put_bio:
  1046. for (i = 0; i < nbio; i++)
  1047. bio_put(biolist[i]);
  1048. atomic_set(&pending_req->pendcnt, 1);
  1049. __end_block_io_op(pending_req, -EINVAL);
  1050. msleep(1); /* back off a bit */
  1051. return -EIO;
  1052. }
  1053. /*
  1054. * Put a response on the ring on how the operation fared.
  1055. */
  1056. static void make_response(struct xen_blkif *blkif, u64 id,
  1057. unsigned short op, int st)
  1058. {
  1059. struct blkif_response resp;
  1060. unsigned long flags;
  1061. union blkif_back_rings *blk_rings = &blkif->blk_rings;
  1062. int notify;
  1063. resp.id = id;
  1064. resp.operation = op;
  1065. resp.status = st;
  1066. spin_lock_irqsave(&blkif->blk_ring_lock, flags);
  1067. /* Place on the response ring for the relevant domain. */
  1068. switch (blkif->blk_protocol) {
  1069. case BLKIF_PROTOCOL_NATIVE:
  1070. memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt),
  1071. &resp, sizeof(resp));
  1072. break;
  1073. case BLKIF_PROTOCOL_X86_32:
  1074. memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt),
  1075. &resp, sizeof(resp));
  1076. break;
  1077. case BLKIF_PROTOCOL_X86_64:
  1078. memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt),
  1079. &resp, sizeof(resp));
  1080. break;
  1081. default:
  1082. BUG();
  1083. }
  1084. blk_rings->common.rsp_prod_pvt++;
  1085. RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
  1086. spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
  1087. if (notify)
  1088. notify_remote_via_irq(blkif->irq);
  1089. }
  1090. static int __init xen_blkif_init(void)
  1091. {
  1092. int rc = 0;
  1093. if (!xen_domain())
  1094. return -ENODEV;
  1095. rc = xen_blkif_interface_init();
  1096. if (rc)
  1097. goto failed_init;
  1098. rc = xen_blkif_xenbus_init();
  1099. if (rc)
  1100. goto failed_init;
  1101. failed_init:
  1102. return rc;
  1103. }
  1104. module_init(xen_blkif_init);
  1105. MODULE_LICENSE("Dual BSD/GPL");
  1106. MODULE_ALIAS("xen-backend:vbd");