blkback.c 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362
  1. /******************************************************************************
  2. *
  3. * Back-end of the driver for virtual block devices. This portion of the
  4. * driver exports a 'unified' block-device interface that can be accessed
  5. * by any operating system that implements a compatible front end. A
  6. * reference front-end implementation can be found in:
  7. * drivers/block/xen-blkfront.c
  8. *
  9. * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
  10. * Copyright (c) 2005, Christopher Clark
  11. *
  12. * This program is free software; you can redistribute it and/or
  13. * modify it under the terms of the GNU General Public License version 2
  14. * as published by the Free Software Foundation; or, when distributed
  15. * separately from the Linux kernel or incorporated into other
  16. * software packages, subject to the following license:
  17. *
  18. * Permission is hereby granted, free of charge, to any person obtaining a copy
  19. * of this source file (the "Software"), to deal in the Software without
  20. * restriction, including without limitation the rights to use, copy, modify,
  21. * merge, publish, distribute, sublicense, and/or sell copies of the Software,
  22. * and to permit persons to whom the Software is furnished to do so, subject to
  23. * the following conditions:
  24. *
  25. * The above copyright notice and this permission notice shall be included in
  26. * all copies or substantial portions of the Software.
  27. *
  28. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  29. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  30. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  31. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  32. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  33. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  34. * IN THE SOFTWARE.
  35. */
  36. #include <linux/spinlock.h>
  37. #include <linux/kthread.h>
  38. #include <linux/list.h>
  39. #include <linux/delay.h>
  40. #include <linux/freezer.h>
  41. #include <linux/bitmap.h>
  42. #include <xen/events.h>
  43. #include <xen/page.h>
  44. #include <xen/xen.h>
  45. #include <asm/xen/hypervisor.h>
  46. #include <asm/xen/hypercall.h>
  47. #include <xen/balloon.h>
  48. #include "common.h"
  49. /*
  50. * Maximum number of unused free pages to keep in the internal buffer.
  51. * Setting this to a value too low will reduce memory used in each backend,
  52. * but can have a performance penalty.
  53. *
  54. * A sane value is xen_blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST, but can
  55. * be set to a lower value that might degrade performance on some intensive
  56. * IO workloads.
  57. */
  58. static int xen_blkif_max_buffer_pages = 1024;
  59. module_param_named(max_buffer_pages, xen_blkif_max_buffer_pages, int, 0644);
  60. MODULE_PARM_DESC(max_buffer_pages,
  61. "Maximum number of free pages to keep in each block backend buffer");
  62. /*
  63. * Maximum number of grants to map persistently in blkback. For maximum
  64. * performance this should be the total numbers of grants that can be used
  65. * to fill the ring, but since this might become too high, specially with
  66. * the use of indirect descriptors, we set it to a value that provides good
  67. * performance without using too much memory.
  68. *
  69. * When the list of persistent grants is full we clean it up using a LRU
  70. * algorithm.
  71. */
  72. static int xen_blkif_max_pgrants = 1056;
  73. module_param_named(max_persistent_grants, xen_blkif_max_pgrants, int, 0644);
  74. MODULE_PARM_DESC(max_persistent_grants,
  75. "Maximum number of grants to map persistently");
  76. /*
  77. * The LRU mechanism to clean the lists of persistent grants needs to
  78. * be executed periodically. The time interval between consecutive executions
  79. * of the purge mechanism is set in ms.
  80. */
  81. #define LRU_INTERVAL 100
  82. /*
  83. * When the persistent grants list is full we will remove unused grants
  84. * from the list. The percent number of grants to be removed at each LRU
  85. * execution.
  86. */
  87. #define LRU_PERCENT_CLEAN 5
  88. /* Run-time switchable: /sys/module/blkback/parameters/ */
  89. static unsigned int log_stats;
  90. module_param(log_stats, int, 0644);
  91. #define BLKBACK_INVALID_HANDLE (~0)
  92. /* Number of free pages to remove on each call to free_xenballooned_pages */
  93. #define NUM_BATCH_FREE_PAGES 10
  94. static inline int get_free_page(struct xen_blkif *blkif, struct page **page)
  95. {
  96. unsigned long flags;
  97. spin_lock_irqsave(&blkif->free_pages_lock, flags);
  98. if (list_empty(&blkif->free_pages)) {
  99. BUG_ON(blkif->free_pages_num != 0);
  100. spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
  101. return alloc_xenballooned_pages(1, page, false);
  102. }
  103. BUG_ON(blkif->free_pages_num == 0);
  104. page[0] = list_first_entry(&blkif->free_pages, struct page, lru);
  105. list_del(&page[0]->lru);
  106. blkif->free_pages_num--;
  107. spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
  108. return 0;
  109. }
  110. static inline void put_free_pages(struct xen_blkif *blkif, struct page **page,
  111. int num)
  112. {
  113. unsigned long flags;
  114. int i;
  115. spin_lock_irqsave(&blkif->free_pages_lock, flags);
  116. for (i = 0; i < num; i++)
  117. list_add(&page[i]->lru, &blkif->free_pages);
  118. blkif->free_pages_num += num;
  119. spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
  120. }
  121. static inline void shrink_free_pagepool(struct xen_blkif *blkif, int num)
  122. {
  123. /* Remove requested pages in batches of NUM_BATCH_FREE_PAGES */
  124. struct page *page[NUM_BATCH_FREE_PAGES];
  125. unsigned int num_pages = 0;
  126. unsigned long flags;
  127. spin_lock_irqsave(&blkif->free_pages_lock, flags);
  128. while (blkif->free_pages_num > num) {
  129. BUG_ON(list_empty(&blkif->free_pages));
  130. page[num_pages] = list_first_entry(&blkif->free_pages,
  131. struct page, lru);
  132. list_del(&page[num_pages]->lru);
  133. blkif->free_pages_num--;
  134. if (++num_pages == NUM_BATCH_FREE_PAGES) {
  135. spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
  136. free_xenballooned_pages(num_pages, page);
  137. spin_lock_irqsave(&blkif->free_pages_lock, flags);
  138. num_pages = 0;
  139. }
  140. }
  141. spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
  142. if (num_pages != 0)
  143. free_xenballooned_pages(num_pages, page);
  144. }
  145. #define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page)))
  146. static int do_block_io_op(struct xen_blkif *blkif);
  147. static int dispatch_rw_block_io(struct xen_blkif *blkif,
  148. struct blkif_request *req,
  149. struct pending_req *pending_req);
  150. static void make_response(struct xen_blkif *blkif, u64 id,
  151. unsigned short op, int st);
  152. #define foreach_grant_safe(pos, n, rbtree, node) \
  153. for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \
  154. (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL; \
  155. &(pos)->node != NULL; \
  156. (pos) = container_of(n, typeof(*(pos)), node), \
  157. (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL)
  158. /*
  159. * We don't need locking around the persistent grant helpers
  160. * because blkback uses a single-thread for each backed, so we
  161. * can be sure that this functions will never be called recursively.
  162. *
  163. * The only exception to that is put_persistent_grant, that can be called
  164. * from interrupt context (by xen_blkbk_unmap), so we have to use atomic
  165. * bit operations to modify the flags of a persistent grant and to count
  166. * the number of used grants.
  167. */
  168. static int add_persistent_gnt(struct xen_blkif *blkif,
  169. struct persistent_gnt *persistent_gnt)
  170. {
  171. struct rb_node **new = NULL, *parent = NULL;
  172. struct persistent_gnt *this;
  173. if (blkif->persistent_gnt_c >= xen_blkif_max_pgrants) {
  174. if (!blkif->vbd.overflow_max_grants)
  175. blkif->vbd.overflow_max_grants = 1;
  176. return -EBUSY;
  177. }
  178. /* Figure out where to put new node */
  179. new = &blkif->persistent_gnts.rb_node;
  180. while (*new) {
  181. this = container_of(*new, struct persistent_gnt, node);
  182. parent = *new;
  183. if (persistent_gnt->gnt < this->gnt)
  184. new = &((*new)->rb_left);
  185. else if (persistent_gnt->gnt > this->gnt)
  186. new = &((*new)->rb_right);
  187. else {
  188. pr_alert_ratelimited(DRV_PFX " trying to add a gref that's already in the tree\n");
  189. return -EINVAL;
  190. }
  191. }
  192. bitmap_zero(persistent_gnt->flags, PERSISTENT_GNT_FLAGS_SIZE);
  193. set_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags);
  194. /* Add new node and rebalance tree. */
  195. rb_link_node(&(persistent_gnt->node), parent, new);
  196. rb_insert_color(&(persistent_gnt->node), &blkif->persistent_gnts);
  197. blkif->persistent_gnt_c++;
  198. atomic_inc(&blkif->persistent_gnt_in_use);
  199. return 0;
  200. }
  201. static struct persistent_gnt *get_persistent_gnt(struct xen_blkif *blkif,
  202. grant_ref_t gref)
  203. {
  204. struct persistent_gnt *data;
  205. struct rb_node *node = NULL;
  206. node = blkif->persistent_gnts.rb_node;
  207. while (node) {
  208. data = container_of(node, struct persistent_gnt, node);
  209. if (gref < data->gnt)
  210. node = node->rb_left;
  211. else if (gref > data->gnt)
  212. node = node->rb_right;
  213. else {
  214. if(test_bit(PERSISTENT_GNT_ACTIVE, data->flags)) {
  215. pr_alert_ratelimited(DRV_PFX " requesting a grant already in use\n");
  216. return NULL;
  217. }
  218. set_bit(PERSISTENT_GNT_ACTIVE, data->flags);
  219. atomic_inc(&blkif->persistent_gnt_in_use);
  220. return data;
  221. }
  222. }
  223. return NULL;
  224. }
  225. static void put_persistent_gnt(struct xen_blkif *blkif,
  226. struct persistent_gnt *persistent_gnt)
  227. {
  228. if(!test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags))
  229. pr_alert_ratelimited(DRV_PFX " freeing a grant already unused");
  230. set_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags);
  231. clear_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags);
  232. atomic_dec(&blkif->persistent_gnt_in_use);
  233. }
  234. static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
  235. unsigned int num)
  236. {
  237. struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
  238. struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
  239. struct persistent_gnt *persistent_gnt;
  240. struct rb_node *n;
  241. int ret = 0;
  242. int segs_to_unmap = 0;
  243. foreach_grant_safe(persistent_gnt, n, root, node) {
  244. BUG_ON(persistent_gnt->handle ==
  245. BLKBACK_INVALID_HANDLE);
  246. gnttab_set_unmap_op(&unmap[segs_to_unmap],
  247. (unsigned long) pfn_to_kaddr(page_to_pfn(
  248. persistent_gnt->page)),
  249. GNTMAP_host_map,
  250. persistent_gnt->handle);
  251. pages[segs_to_unmap] = persistent_gnt->page;
  252. if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST ||
  253. !rb_next(&persistent_gnt->node)) {
  254. ret = gnttab_unmap_refs(unmap, NULL, pages,
  255. segs_to_unmap);
  256. BUG_ON(ret);
  257. put_free_pages(blkif, pages, segs_to_unmap);
  258. segs_to_unmap = 0;
  259. }
  260. rb_erase(&persistent_gnt->node, root);
  261. kfree(persistent_gnt);
  262. num--;
  263. }
  264. BUG_ON(num != 0);
  265. }
  266. static void unmap_purged_grants(struct work_struct *work)
  267. {
  268. struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
  269. struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
  270. struct persistent_gnt *persistent_gnt;
  271. int ret, segs_to_unmap = 0;
  272. struct xen_blkif *blkif = container_of(work, typeof(*blkif), persistent_purge_work);
  273. while(!list_empty(&blkif->persistent_purge_list)) {
  274. persistent_gnt = list_first_entry(&blkif->persistent_purge_list,
  275. struct persistent_gnt,
  276. remove_node);
  277. list_del(&persistent_gnt->remove_node);
  278. gnttab_set_unmap_op(&unmap[segs_to_unmap],
  279. vaddr(persistent_gnt->page),
  280. GNTMAP_host_map,
  281. persistent_gnt->handle);
  282. pages[segs_to_unmap] = persistent_gnt->page;
  283. if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
  284. ret = gnttab_unmap_refs(unmap, NULL, pages,
  285. segs_to_unmap);
  286. BUG_ON(ret);
  287. put_free_pages(blkif, pages, segs_to_unmap);
  288. segs_to_unmap = 0;
  289. }
  290. kfree(persistent_gnt);
  291. }
  292. if (segs_to_unmap > 0) {
  293. ret = gnttab_unmap_refs(unmap, NULL, pages, segs_to_unmap);
  294. BUG_ON(ret);
  295. put_free_pages(blkif, pages, segs_to_unmap);
  296. }
  297. }
  298. static void purge_persistent_gnt(struct xen_blkif *blkif)
  299. {
  300. struct persistent_gnt *persistent_gnt;
  301. struct rb_node *n;
  302. unsigned int num_clean, total;
  303. bool scan_used = false;
  304. struct rb_root *root;
  305. if (blkif->persistent_gnt_c < xen_blkif_max_pgrants ||
  306. (blkif->persistent_gnt_c == xen_blkif_max_pgrants &&
  307. !blkif->vbd.overflow_max_grants)) {
  308. return;
  309. }
  310. if (work_pending(&blkif->persistent_purge_work)) {
  311. pr_alert_ratelimited(DRV_PFX "Scheduled work from previous purge is still pending, cannot purge list\n");
  312. return;
  313. }
  314. num_clean = (xen_blkif_max_pgrants / 100) * LRU_PERCENT_CLEAN;
  315. num_clean = blkif->persistent_gnt_c - xen_blkif_max_pgrants + num_clean;
  316. num_clean = min(blkif->persistent_gnt_c, num_clean);
  317. if (num_clean >
  318. (blkif->persistent_gnt_c -
  319. atomic_read(&blkif->persistent_gnt_in_use)))
  320. return;
  321. /*
  322. * At this point, we can assure that there will be no calls
  323. * to get_persistent_grant (because we are executing this code from
  324. * xen_blkif_schedule), there can only be calls to put_persistent_gnt,
  325. * which means that the number of currently used grants will go down,
  326. * but never up, so we will always be able to remove the requested
  327. * number of grants.
  328. */
  329. total = num_clean;
  330. pr_debug(DRV_PFX "Going to purge %u persistent grants\n", num_clean);
  331. INIT_LIST_HEAD(&blkif->persistent_purge_list);
  332. root = &blkif->persistent_gnts;
  333. purge_list:
  334. foreach_grant_safe(persistent_gnt, n, root, node) {
  335. BUG_ON(persistent_gnt->handle ==
  336. BLKBACK_INVALID_HANDLE);
  337. if (test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags))
  338. continue;
  339. if (!scan_used &&
  340. (test_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags)))
  341. continue;
  342. rb_erase(&persistent_gnt->node, root);
  343. list_add(&persistent_gnt->remove_node,
  344. &blkif->persistent_purge_list);
  345. if (--num_clean == 0)
  346. goto finished;
  347. }
  348. /*
  349. * If we get here it means we also need to start cleaning
  350. * grants that were used since last purge in order to cope
  351. * with the requested num
  352. */
  353. if (!scan_used) {
  354. pr_debug(DRV_PFX "Still missing %u purged frames\n", num_clean);
  355. scan_used = true;
  356. goto purge_list;
  357. }
  358. finished:
  359. /* Remove the "used" flag from all the persistent grants */
  360. foreach_grant_safe(persistent_gnt, n, root, node) {
  361. BUG_ON(persistent_gnt->handle ==
  362. BLKBACK_INVALID_HANDLE);
  363. clear_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags);
  364. }
  365. blkif->persistent_gnt_c -= (total - num_clean);
  366. blkif->vbd.overflow_max_grants = 0;
  367. /* We can defer this work */
  368. INIT_WORK(&blkif->persistent_purge_work, unmap_purged_grants);
  369. schedule_work(&blkif->persistent_purge_work);
  370. pr_debug(DRV_PFX "Purged %u/%u\n", (total - num_clean), total);
  371. return;
  372. }
  373. /*
  374. * Retrieve from the 'pending_reqs' a free pending_req structure to be used.
  375. */
  376. static struct pending_req *alloc_req(struct xen_blkif *blkif)
  377. {
  378. struct pending_req *req = NULL;
  379. unsigned long flags;
  380. spin_lock_irqsave(&blkif->pending_free_lock, flags);
  381. if (!list_empty(&blkif->pending_free)) {
  382. req = list_entry(blkif->pending_free.next, struct pending_req,
  383. free_list);
  384. list_del(&req->free_list);
  385. }
  386. spin_unlock_irqrestore(&blkif->pending_free_lock, flags);
  387. return req;
  388. }
  389. /*
  390. * Return the 'pending_req' structure back to the freepool. We also
  391. * wake up the thread if it was waiting for a free page.
  392. */
  393. static void free_req(struct xen_blkif *blkif, struct pending_req *req)
  394. {
  395. unsigned long flags;
  396. int was_empty;
  397. spin_lock_irqsave(&blkif->pending_free_lock, flags);
  398. was_empty = list_empty(&blkif->pending_free);
  399. list_add(&req->free_list, &blkif->pending_free);
  400. spin_unlock_irqrestore(&blkif->pending_free_lock, flags);
  401. if (was_empty)
  402. wake_up(&blkif->pending_free_wq);
  403. }
  404. /*
  405. * Routines for managing virtual block devices (vbds).
  406. */
  407. static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif,
  408. int operation)
  409. {
  410. struct xen_vbd *vbd = &blkif->vbd;
  411. int rc = -EACCES;
  412. if ((operation != READ) && vbd->readonly)
  413. goto out;
  414. if (likely(req->nr_sects)) {
  415. blkif_sector_t end = req->sector_number + req->nr_sects;
  416. if (unlikely(end < req->sector_number))
  417. goto out;
  418. if (unlikely(end > vbd_sz(vbd)))
  419. goto out;
  420. }
  421. req->dev = vbd->pdevice;
  422. req->bdev = vbd->bdev;
  423. rc = 0;
  424. out:
  425. return rc;
  426. }
  427. static void xen_vbd_resize(struct xen_blkif *blkif)
  428. {
  429. struct xen_vbd *vbd = &blkif->vbd;
  430. struct xenbus_transaction xbt;
  431. int err;
  432. struct xenbus_device *dev = xen_blkbk_xenbus(blkif->be);
  433. unsigned long long new_size = vbd_sz(vbd);
  434. pr_info(DRV_PFX "VBD Resize: Domid: %d, Device: (%d, %d)\n",
  435. blkif->domid, MAJOR(vbd->pdevice), MINOR(vbd->pdevice));
  436. pr_info(DRV_PFX "VBD Resize: new size %llu\n", new_size);
  437. vbd->size = new_size;
  438. again:
  439. err = xenbus_transaction_start(&xbt);
  440. if (err) {
  441. pr_warn(DRV_PFX "Error starting transaction");
  442. return;
  443. }
  444. err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
  445. (unsigned long long)vbd_sz(vbd));
  446. if (err) {
  447. pr_warn(DRV_PFX "Error writing new size");
  448. goto abort;
  449. }
  450. /*
  451. * Write the current state; we will use this to synchronize
  452. * the front-end. If the current state is "connected" the
  453. * front-end will get the new size information online.
  454. */
  455. err = xenbus_printf(xbt, dev->nodename, "state", "%d", dev->state);
  456. if (err) {
  457. pr_warn(DRV_PFX "Error writing the state");
  458. goto abort;
  459. }
  460. err = xenbus_transaction_end(xbt, 0);
  461. if (err == -EAGAIN)
  462. goto again;
  463. if (err)
  464. pr_warn(DRV_PFX "Error ending transaction");
  465. return;
  466. abort:
  467. xenbus_transaction_end(xbt, 1);
  468. }
  469. /*
  470. * Notification from the guest OS.
  471. */
  472. static void blkif_notify_work(struct xen_blkif *blkif)
  473. {
  474. blkif->waiting_reqs = 1;
  475. wake_up(&blkif->wq);
  476. }
  477. irqreturn_t xen_blkif_be_int(int irq, void *dev_id)
  478. {
  479. blkif_notify_work(dev_id);
  480. return IRQ_HANDLED;
  481. }
  482. /*
  483. * SCHEDULER FUNCTIONS
  484. */
  485. static void print_stats(struct xen_blkif *blkif)
  486. {
  487. pr_info("xen-blkback (%s): oo %3llu | rd %4llu | wr %4llu | f %4llu"
  488. " | ds %4llu | pg: %4u/%4d\n",
  489. current->comm, blkif->st_oo_req,
  490. blkif->st_rd_req, blkif->st_wr_req,
  491. blkif->st_f_req, blkif->st_ds_req,
  492. blkif->persistent_gnt_c,
  493. xen_blkif_max_pgrants);
  494. blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000);
  495. blkif->st_rd_req = 0;
  496. blkif->st_wr_req = 0;
  497. blkif->st_oo_req = 0;
  498. blkif->st_ds_req = 0;
  499. }
  500. int xen_blkif_schedule(void *arg)
  501. {
  502. struct xen_blkif *blkif = arg;
  503. struct xen_vbd *vbd = &blkif->vbd;
  504. unsigned long timeout;
  505. xen_blkif_get(blkif);
  506. while (!kthread_should_stop()) {
  507. if (try_to_freeze())
  508. continue;
  509. if (unlikely(vbd->size != vbd_sz(vbd)))
  510. xen_vbd_resize(blkif);
  511. timeout = msecs_to_jiffies(LRU_INTERVAL);
  512. timeout = wait_event_interruptible_timeout(
  513. blkif->wq,
  514. blkif->waiting_reqs || kthread_should_stop(),
  515. timeout);
  516. if (timeout == 0)
  517. goto purge_gnt_list;
  518. timeout = wait_event_interruptible_timeout(
  519. blkif->pending_free_wq,
  520. !list_empty(&blkif->pending_free) ||
  521. kthread_should_stop(),
  522. timeout);
  523. if (timeout == 0)
  524. goto purge_gnt_list;
  525. blkif->waiting_reqs = 0;
  526. smp_mb(); /* clear flag *before* checking for work */
  527. if (do_block_io_op(blkif))
  528. blkif->waiting_reqs = 1;
  529. purge_gnt_list:
  530. if (blkif->vbd.feature_gnt_persistent &&
  531. time_after(jiffies, blkif->next_lru)) {
  532. purge_persistent_gnt(blkif);
  533. blkif->next_lru = jiffies + msecs_to_jiffies(LRU_INTERVAL);
  534. }
  535. /* Shrink if we have more than xen_blkif_max_buffer_pages */
  536. shrink_free_pagepool(blkif, xen_blkif_max_buffer_pages);
  537. if (log_stats && time_after(jiffies, blkif->st_print))
  538. print_stats(blkif);
  539. }
  540. /* Since we are shutting down remove all pages from the buffer */
  541. shrink_free_pagepool(blkif, 0 /* All */);
  542. /* Free all persistent grant pages */
  543. if (!RB_EMPTY_ROOT(&blkif->persistent_gnts))
  544. free_persistent_gnts(blkif, &blkif->persistent_gnts,
  545. blkif->persistent_gnt_c);
  546. BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts));
  547. blkif->persistent_gnt_c = 0;
  548. if (log_stats)
  549. print_stats(blkif);
  550. blkif->xenblkd = NULL;
  551. xen_blkif_put(blkif);
  552. return 0;
  553. }
  554. /*
  555. * Unmap the grant references, and also remove the M2P over-rides
  556. * used in the 'pending_req'.
  557. */
  558. static void xen_blkbk_unmap(struct xen_blkif *blkif,
  559. struct grant_page *pages[],
  560. int num)
  561. {
  562. struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
  563. struct page *unmap_pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
  564. unsigned int i, invcount = 0;
  565. int ret;
  566. for (i = 0; i < num; i++) {
  567. if (pages[i]->persistent_gnt != NULL) {
  568. put_persistent_gnt(blkif, pages[i]->persistent_gnt);
  569. continue;
  570. }
  571. if (pages[i]->handle == BLKBACK_INVALID_HANDLE)
  572. continue;
  573. unmap_pages[invcount] = pages[i]->page;
  574. gnttab_set_unmap_op(&unmap[invcount], vaddr(pages[i]->page),
  575. GNTMAP_host_map, pages[i]->handle);
  576. pages[i]->handle = BLKBACK_INVALID_HANDLE;
  577. if (++invcount == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
  578. ret = gnttab_unmap_refs(unmap, NULL, unmap_pages,
  579. invcount);
  580. BUG_ON(ret);
  581. put_free_pages(blkif, unmap_pages, invcount);
  582. invcount = 0;
  583. }
  584. }
  585. if (invcount) {
  586. ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount);
  587. BUG_ON(ret);
  588. put_free_pages(blkif, unmap_pages, invcount);
  589. }
  590. }
  591. static int xen_blkbk_map(struct xen_blkif *blkif,
  592. struct grant_page *pages[],
  593. int num, bool ro)
  594. {
  595. struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
  596. struct page *pages_to_gnt[BLKIF_MAX_SEGMENTS_PER_REQUEST];
  597. struct persistent_gnt *persistent_gnt = NULL;
  598. phys_addr_t addr = 0;
  599. int i, seg_idx, new_map_idx;
  600. int segs_to_map = 0;
  601. int ret = 0;
  602. int last_map = 0, map_until = 0;
  603. int use_persistent_gnts;
  604. use_persistent_gnts = (blkif->vbd.feature_gnt_persistent);
  605. /*
  606. * Fill out preq.nr_sects with proper amount of sectors, and setup
  607. * assign map[..] with the PFN of the page in our domain with the
  608. * corresponding grant reference for each page.
  609. */
  610. again:
  611. for (i = map_until; i < num; i++) {
  612. uint32_t flags;
  613. if (use_persistent_gnts)
  614. persistent_gnt = get_persistent_gnt(
  615. blkif,
  616. pages[i]->gref);
  617. if (persistent_gnt) {
  618. /*
  619. * We are using persistent grants and
  620. * the grant is already mapped
  621. */
  622. pages[i]->page = persistent_gnt->page;
  623. pages[i]->persistent_gnt = persistent_gnt;
  624. } else {
  625. if (get_free_page(blkif, &pages[i]->page))
  626. goto out_of_memory;
  627. addr = vaddr(pages[i]->page);
  628. pages_to_gnt[segs_to_map] = pages[i]->page;
  629. pages[i]->persistent_gnt = NULL;
  630. flags = GNTMAP_host_map;
  631. if (!use_persistent_gnts && ro)
  632. flags |= GNTMAP_readonly;
  633. gnttab_set_map_op(&map[segs_to_map++], addr,
  634. flags, pages[i]->gref,
  635. blkif->domid);
  636. }
  637. map_until = i + 1;
  638. if (segs_to_map == BLKIF_MAX_SEGMENTS_PER_REQUEST)
  639. break;
  640. }
  641. if (segs_to_map) {
  642. ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map);
  643. BUG_ON(ret);
  644. }
  645. /*
  646. * Now swizzle the MFN in our domain with the MFN from the other domain
  647. * so that when we access vaddr(pending_req,i) it has the contents of
  648. * the page from the other domain.
  649. */
  650. for (seg_idx = last_map, new_map_idx = 0; seg_idx < map_until; seg_idx++) {
  651. if (!pages[seg_idx]->persistent_gnt) {
  652. /* This is a newly mapped grant */
  653. BUG_ON(new_map_idx >= segs_to_map);
  654. if (unlikely(map[new_map_idx].status != 0)) {
  655. pr_debug(DRV_PFX "invalid buffer -- could not remap it\n");
  656. pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE;
  657. ret |= 1;
  658. goto next;
  659. }
  660. pages[seg_idx]->handle = map[new_map_idx].handle;
  661. } else {
  662. continue;
  663. }
  664. if (use_persistent_gnts &&
  665. blkif->persistent_gnt_c < xen_blkif_max_pgrants) {
  666. /*
  667. * We are using persistent grants, the grant is
  668. * not mapped but we might have room for it.
  669. */
  670. persistent_gnt = kmalloc(sizeof(struct persistent_gnt),
  671. GFP_KERNEL);
  672. if (!persistent_gnt) {
  673. /*
  674. * If we don't have enough memory to
  675. * allocate the persistent_gnt struct
  676. * map this grant non-persistenly
  677. */
  678. goto next;
  679. }
  680. persistent_gnt->gnt = map[new_map_idx].ref;
  681. persistent_gnt->handle = map[new_map_idx].handle;
  682. persistent_gnt->page = pages[seg_idx]->page;
  683. if (add_persistent_gnt(blkif,
  684. persistent_gnt)) {
  685. kfree(persistent_gnt);
  686. persistent_gnt = NULL;
  687. goto next;
  688. }
  689. pages[seg_idx]->persistent_gnt = persistent_gnt;
  690. pr_debug(DRV_PFX " grant %u added to the tree of persistent grants, using %u/%u\n",
  691. persistent_gnt->gnt, blkif->persistent_gnt_c,
  692. xen_blkif_max_pgrants);
  693. goto next;
  694. }
  695. if (use_persistent_gnts && !blkif->vbd.overflow_max_grants) {
  696. blkif->vbd.overflow_max_grants = 1;
  697. pr_debug(DRV_PFX " domain %u, device %#x is using maximum number of persistent grants\n",
  698. blkif->domid, blkif->vbd.handle);
  699. }
  700. /*
  701. * We could not map this grant persistently, so use it as
  702. * a non-persistent grant.
  703. */
  704. next:
  705. new_map_idx++;
  706. }
  707. segs_to_map = 0;
  708. last_map = map_until;
  709. if (map_until != num)
  710. goto again;
  711. return ret;
  712. out_of_memory:
  713. pr_alert(DRV_PFX "%s: out of memory\n", __func__);
  714. put_free_pages(blkif, pages_to_gnt, segs_to_map);
  715. return -ENOMEM;
  716. }
  717. static int xen_blkbk_map_seg(struct pending_req *pending_req)
  718. {
  719. int rc;
  720. rc = xen_blkbk_map(pending_req->blkif, pending_req->segments,
  721. pending_req->nr_pages,
  722. (pending_req->operation != BLKIF_OP_READ));
  723. return rc;
  724. }
  725. static int xen_blkbk_parse_indirect(struct blkif_request *req,
  726. struct pending_req *pending_req,
  727. struct seg_buf seg[],
  728. struct phys_req *preq)
  729. {
  730. struct grant_page **pages = pending_req->indirect_pages;
  731. struct xen_blkif *blkif = pending_req->blkif;
  732. int indirect_grefs, rc, n, nseg, i;
  733. struct blkif_request_segment_aligned *segments = NULL;
  734. nseg = pending_req->nr_pages;
  735. indirect_grefs = INDIRECT_PAGES(nseg);
  736. BUG_ON(indirect_grefs > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST);
  737. for (i = 0; i < indirect_grefs; i++)
  738. pages[i]->gref = req->u.indirect.indirect_grefs[i];
  739. rc = xen_blkbk_map(blkif, pages, indirect_grefs, true);
  740. if (rc)
  741. goto unmap;
  742. for (n = 0, i = 0; n < nseg; n++) {
  743. if ((n % SEGS_PER_INDIRECT_FRAME) == 0) {
  744. /* Map indirect segments */
  745. if (segments)
  746. kunmap_atomic(segments);
  747. segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]->page);
  748. }
  749. i = n % SEGS_PER_INDIRECT_FRAME;
  750. pending_req->segments[n]->gref = segments[i].gref;
  751. seg[n].nsec = segments[i].last_sect -
  752. segments[i].first_sect + 1;
  753. seg[n].offset = (segments[i].first_sect << 9);
  754. if ((segments[i].last_sect >= (PAGE_SIZE >> 9)) ||
  755. (segments[i].last_sect < segments[i].first_sect)) {
  756. rc = -EINVAL;
  757. goto unmap;
  758. }
  759. preq->nr_sects += seg[n].nsec;
  760. }
  761. unmap:
  762. if (segments)
  763. kunmap_atomic(segments);
  764. xen_blkbk_unmap(blkif, pages, indirect_grefs);
  765. return rc;
  766. }
  767. static int dispatch_discard_io(struct xen_blkif *blkif,
  768. struct blkif_request *req)
  769. {
  770. int err = 0;
  771. int status = BLKIF_RSP_OKAY;
  772. struct block_device *bdev = blkif->vbd.bdev;
  773. unsigned long secure;
  774. struct phys_req preq;
  775. preq.sector_number = req->u.discard.sector_number;
  776. preq.nr_sects = req->u.discard.nr_sectors;
  777. err = xen_vbd_translate(&preq, blkif, WRITE);
  778. if (err) {
  779. pr_warn(DRV_PFX "access denied: DISCARD [%llu->%llu] on dev=%04x\n",
  780. preq.sector_number,
  781. preq.sector_number + preq.nr_sects, blkif->vbd.pdevice);
  782. goto fail_response;
  783. }
  784. blkif->st_ds_req++;
  785. xen_blkif_get(blkif);
  786. secure = (blkif->vbd.discard_secure &&
  787. (req->u.discard.flag & BLKIF_DISCARD_SECURE)) ?
  788. BLKDEV_DISCARD_SECURE : 0;
  789. err = blkdev_issue_discard(bdev, req->u.discard.sector_number,
  790. req->u.discard.nr_sectors,
  791. GFP_KERNEL, secure);
  792. fail_response:
  793. if (err == -EOPNOTSUPP) {
  794. pr_debug(DRV_PFX "discard op failed, not supported\n");
  795. status = BLKIF_RSP_EOPNOTSUPP;
  796. } else if (err)
  797. status = BLKIF_RSP_ERROR;
  798. make_response(blkif, req->u.discard.id, req->operation, status);
  799. xen_blkif_put(blkif);
  800. return err;
  801. }
  802. static int dispatch_other_io(struct xen_blkif *blkif,
  803. struct blkif_request *req,
  804. struct pending_req *pending_req)
  805. {
  806. free_req(blkif, pending_req);
  807. make_response(blkif, req->u.other.id, req->operation,
  808. BLKIF_RSP_EOPNOTSUPP);
  809. return -EIO;
  810. }
  811. static void xen_blk_drain_io(struct xen_blkif *blkif)
  812. {
  813. atomic_set(&blkif->drain, 1);
  814. do {
  815. /* The initial value is one, and one refcnt taken at the
  816. * start of the xen_blkif_schedule thread. */
  817. if (atomic_read(&blkif->refcnt) <= 2)
  818. break;
  819. wait_for_completion_interruptible_timeout(
  820. &blkif->drain_complete, HZ);
  821. if (!atomic_read(&blkif->drain))
  822. break;
  823. } while (!kthread_should_stop());
  824. atomic_set(&blkif->drain, 0);
  825. }
  826. /*
  827. * Completion callback on the bio's. Called as bh->b_end_io()
  828. */
  829. static void __end_block_io_op(struct pending_req *pending_req, int error)
  830. {
  831. /* An error fails the entire request. */
  832. if ((pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE) &&
  833. (error == -EOPNOTSUPP)) {
  834. pr_debug(DRV_PFX "flush diskcache op failed, not supported\n");
  835. xen_blkbk_flush_diskcache(XBT_NIL, pending_req->blkif->be, 0);
  836. pending_req->status = BLKIF_RSP_EOPNOTSUPP;
  837. } else if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) &&
  838. (error == -EOPNOTSUPP)) {
  839. pr_debug(DRV_PFX "write barrier op failed, not supported\n");
  840. xen_blkbk_barrier(XBT_NIL, pending_req->blkif->be, 0);
  841. pending_req->status = BLKIF_RSP_EOPNOTSUPP;
  842. } else if (error) {
  843. pr_debug(DRV_PFX "Buffer not up-to-date at end of operation,"
  844. " error=%d\n", error);
  845. pending_req->status = BLKIF_RSP_ERROR;
  846. }
  847. /*
  848. * If all of the bio's have completed it is time to unmap
  849. * the grant references associated with 'request' and provide
  850. * the proper response on the ring.
  851. */
  852. if (atomic_dec_and_test(&pending_req->pendcnt)) {
  853. xen_blkbk_unmap(pending_req->blkif,
  854. pending_req->segments,
  855. pending_req->nr_pages);
  856. make_response(pending_req->blkif, pending_req->id,
  857. pending_req->operation, pending_req->status);
  858. xen_blkif_put(pending_req->blkif);
  859. if (atomic_read(&pending_req->blkif->refcnt) <= 2) {
  860. if (atomic_read(&pending_req->blkif->drain))
  861. complete(&pending_req->blkif->drain_complete);
  862. }
  863. free_req(pending_req->blkif, pending_req);
  864. }
  865. }
  866. /*
  867. * bio callback.
  868. */
  869. static void end_block_io_op(struct bio *bio, int error)
  870. {
  871. __end_block_io_op(bio->bi_private, error);
  872. bio_put(bio);
  873. }
  874. /*
  875. * Function to copy the from the ring buffer the 'struct blkif_request'
  876. * (which has the sectors we want, number of them, grant references, etc),
  877. * and transmute it to the block API to hand it over to the proper block disk.
  878. */
  879. static int
  880. __do_block_io_op(struct xen_blkif *blkif)
  881. {
  882. union blkif_back_rings *blk_rings = &blkif->blk_rings;
  883. struct blkif_request req;
  884. struct pending_req *pending_req;
  885. RING_IDX rc, rp;
  886. int more_to_do = 0;
  887. rc = blk_rings->common.req_cons;
  888. rp = blk_rings->common.sring->req_prod;
  889. rmb(); /* Ensure we see queued requests up to 'rp'. */
  890. while (rc != rp) {
  891. if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
  892. break;
  893. if (kthread_should_stop()) {
  894. more_to_do = 1;
  895. break;
  896. }
  897. pending_req = alloc_req(blkif);
  898. if (NULL == pending_req) {
  899. blkif->st_oo_req++;
  900. more_to_do = 1;
  901. break;
  902. }
  903. switch (blkif->blk_protocol) {
  904. case BLKIF_PROTOCOL_NATIVE:
  905. memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req));
  906. break;
  907. case BLKIF_PROTOCOL_X86_32:
  908. blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
  909. break;
  910. case BLKIF_PROTOCOL_X86_64:
  911. blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
  912. break;
  913. default:
  914. BUG();
  915. }
  916. blk_rings->common.req_cons = ++rc; /* before make_response() */
  917. /* Apply all sanity checks to /private copy/ of request. */
  918. barrier();
  919. switch (req.operation) {
  920. case BLKIF_OP_READ:
  921. case BLKIF_OP_WRITE:
  922. case BLKIF_OP_WRITE_BARRIER:
  923. case BLKIF_OP_FLUSH_DISKCACHE:
  924. case BLKIF_OP_INDIRECT:
  925. if (dispatch_rw_block_io(blkif, &req, pending_req))
  926. goto done;
  927. break;
  928. case BLKIF_OP_DISCARD:
  929. free_req(blkif, pending_req);
  930. if (dispatch_discard_io(blkif, &req))
  931. goto done;
  932. break;
  933. default:
  934. if (dispatch_other_io(blkif, &req, pending_req))
  935. goto done;
  936. break;
  937. }
  938. /* Yield point for this unbounded loop. */
  939. cond_resched();
  940. }
  941. done:
  942. return more_to_do;
  943. }
  944. static int
  945. do_block_io_op(struct xen_blkif *blkif)
  946. {
  947. union blkif_back_rings *blk_rings = &blkif->blk_rings;
  948. int more_to_do;
  949. do {
  950. more_to_do = __do_block_io_op(blkif);
  951. if (more_to_do)
  952. break;
  953. RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
  954. } while (more_to_do);
  955. return more_to_do;
  956. }
  957. /*
  958. * Transmutation of the 'struct blkif_request' to a proper 'struct bio'
  959. * and call the 'submit_bio' to pass it to the underlying storage.
  960. */
  961. static int dispatch_rw_block_io(struct xen_blkif *blkif,
  962. struct blkif_request *req,
  963. struct pending_req *pending_req)
  964. {
  965. struct phys_req preq;
  966. struct seg_buf *seg = pending_req->seg;
  967. unsigned int nseg;
  968. struct bio *bio = NULL;
  969. struct bio **biolist = pending_req->biolist;
  970. int i, nbio = 0;
  971. int operation;
  972. struct blk_plug plug;
  973. bool drain = false;
  974. struct grant_page **pages = pending_req->segments;
  975. unsigned short req_operation;
  976. req_operation = req->operation == BLKIF_OP_INDIRECT ?
  977. req->u.indirect.indirect_op : req->operation;
  978. if ((req->operation == BLKIF_OP_INDIRECT) &&
  979. (req_operation != BLKIF_OP_READ) &&
  980. (req_operation != BLKIF_OP_WRITE)) {
  981. pr_debug(DRV_PFX "Invalid indirect operation (%u)\n",
  982. req_operation);
  983. goto fail_response;
  984. }
  985. switch (req_operation) {
  986. case BLKIF_OP_READ:
  987. blkif->st_rd_req++;
  988. operation = READ;
  989. break;
  990. case BLKIF_OP_WRITE:
  991. blkif->st_wr_req++;
  992. operation = WRITE_ODIRECT;
  993. break;
  994. case BLKIF_OP_WRITE_BARRIER:
  995. drain = true;
  996. case BLKIF_OP_FLUSH_DISKCACHE:
  997. blkif->st_f_req++;
  998. operation = WRITE_FLUSH;
  999. break;
  1000. default:
  1001. operation = 0; /* make gcc happy */
  1002. goto fail_response;
  1003. break;
  1004. }
  1005. /* Check that the number of segments is sane. */
  1006. nseg = req->operation == BLKIF_OP_INDIRECT ?
  1007. req->u.indirect.nr_segments : req->u.rw.nr_segments;
  1008. if (unlikely(nseg == 0 && operation != WRITE_FLUSH) ||
  1009. unlikely((req->operation != BLKIF_OP_INDIRECT) &&
  1010. (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) ||
  1011. unlikely((req->operation == BLKIF_OP_INDIRECT) &&
  1012. (nseg > MAX_INDIRECT_SEGMENTS))) {
  1013. pr_debug(DRV_PFX "Bad number of segments in request (%d)\n",
  1014. nseg);
  1015. /* Haven't submitted any bio's yet. */
  1016. goto fail_response;
  1017. }
  1018. preq.nr_sects = 0;
  1019. pending_req->blkif = blkif;
  1020. pending_req->id = req->u.rw.id;
  1021. pending_req->operation = req_operation;
  1022. pending_req->status = BLKIF_RSP_OKAY;
  1023. pending_req->nr_pages = nseg;
  1024. if (req->operation != BLKIF_OP_INDIRECT) {
  1025. preq.dev = req->u.rw.handle;
  1026. preq.sector_number = req->u.rw.sector_number;
  1027. for (i = 0; i < nseg; i++) {
  1028. pages[i]->gref = req->u.rw.seg[i].gref;
  1029. seg[i].nsec = req->u.rw.seg[i].last_sect -
  1030. req->u.rw.seg[i].first_sect + 1;
  1031. seg[i].offset = (req->u.rw.seg[i].first_sect << 9);
  1032. if ((req->u.rw.seg[i].last_sect >= (PAGE_SIZE >> 9)) ||
  1033. (req->u.rw.seg[i].last_sect <
  1034. req->u.rw.seg[i].first_sect))
  1035. goto fail_response;
  1036. preq.nr_sects += seg[i].nsec;
  1037. }
  1038. } else {
  1039. preq.dev = req->u.indirect.handle;
  1040. preq.sector_number = req->u.indirect.sector_number;
  1041. if (xen_blkbk_parse_indirect(req, pending_req, seg, &preq))
  1042. goto fail_response;
  1043. }
  1044. if (xen_vbd_translate(&preq, blkif, operation) != 0) {
  1045. pr_debug(DRV_PFX "access denied: %s of [%llu,%llu] on dev=%04x\n",
  1046. operation == READ ? "read" : "write",
  1047. preq.sector_number,
  1048. preq.sector_number + preq.nr_sects,
  1049. blkif->vbd.pdevice);
  1050. goto fail_response;
  1051. }
  1052. /*
  1053. * This check _MUST_ be done after xen_vbd_translate as the preq.bdev
  1054. * is set there.
  1055. */
  1056. for (i = 0; i < nseg; i++) {
  1057. if (((int)preq.sector_number|(int)seg[i].nsec) &
  1058. ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) {
  1059. pr_debug(DRV_PFX "Misaligned I/O request from domain %d",
  1060. blkif->domid);
  1061. goto fail_response;
  1062. }
  1063. }
  1064. /* Wait on all outstanding I/O's and once that has been completed
  1065. * issue the WRITE_FLUSH.
  1066. */
  1067. if (drain)
  1068. xen_blk_drain_io(pending_req->blkif);
  1069. /*
  1070. * If we have failed at this point, we need to undo the M2P override,
  1071. * set gnttab_set_unmap_op on all of the grant references and perform
  1072. * the hypercall to unmap the grants - that is all done in
  1073. * xen_blkbk_unmap.
  1074. */
  1075. if (xen_blkbk_map_seg(pending_req))
  1076. goto fail_flush;
  1077. /*
  1078. * This corresponding xen_blkif_put is done in __end_block_io_op, or
  1079. * below (in "!bio") if we are handling a BLKIF_OP_DISCARD.
  1080. */
  1081. xen_blkif_get(blkif);
  1082. for (i = 0; i < nseg; i++) {
  1083. while ((bio == NULL) ||
  1084. (bio_add_page(bio,
  1085. pages[i]->page,
  1086. seg[i].nsec << 9,
  1087. seg[i].offset) == 0)) {
  1088. bio = bio_alloc(GFP_KERNEL, nseg-i);
  1089. if (unlikely(bio == NULL))
  1090. goto fail_put_bio;
  1091. biolist[nbio++] = bio;
  1092. bio->bi_bdev = preq.bdev;
  1093. bio->bi_private = pending_req;
  1094. bio->bi_end_io = end_block_io_op;
  1095. bio->bi_sector = preq.sector_number;
  1096. }
  1097. preq.sector_number += seg[i].nsec;
  1098. }
  1099. /* This will be hit if the operation was a flush or discard. */
  1100. if (!bio) {
  1101. BUG_ON(operation != WRITE_FLUSH);
  1102. bio = bio_alloc(GFP_KERNEL, 0);
  1103. if (unlikely(bio == NULL))
  1104. goto fail_put_bio;
  1105. biolist[nbio++] = bio;
  1106. bio->bi_bdev = preq.bdev;
  1107. bio->bi_private = pending_req;
  1108. bio->bi_end_io = end_block_io_op;
  1109. }
  1110. atomic_set(&pending_req->pendcnt, nbio);
  1111. blk_start_plug(&plug);
  1112. for (i = 0; i < nbio; i++)
  1113. submit_bio(operation, biolist[i]);
  1114. /* Let the I/Os go.. */
  1115. blk_finish_plug(&plug);
  1116. if (operation == READ)
  1117. blkif->st_rd_sect += preq.nr_sects;
  1118. else if (operation & WRITE)
  1119. blkif->st_wr_sect += preq.nr_sects;
  1120. return 0;
  1121. fail_flush:
  1122. xen_blkbk_unmap(blkif, pending_req->segments,
  1123. pending_req->nr_pages);
  1124. fail_response:
  1125. /* Haven't submitted any bio's yet. */
  1126. make_response(blkif, req->u.rw.id, req_operation, BLKIF_RSP_ERROR);
  1127. free_req(blkif, pending_req);
  1128. msleep(1); /* back off a bit */
  1129. return -EIO;
  1130. fail_put_bio:
  1131. for (i = 0; i < nbio; i++)
  1132. bio_put(biolist[i]);
  1133. atomic_set(&pending_req->pendcnt, 1);
  1134. __end_block_io_op(pending_req, -EINVAL);
  1135. msleep(1); /* back off a bit */
  1136. return -EIO;
  1137. }
  1138. /*
  1139. * Put a response on the ring on how the operation fared.
  1140. */
  1141. static void make_response(struct xen_blkif *blkif, u64 id,
  1142. unsigned short op, int st)
  1143. {
  1144. struct blkif_response resp;
  1145. unsigned long flags;
  1146. union blkif_back_rings *blk_rings = &blkif->blk_rings;
  1147. int notify;
  1148. resp.id = id;
  1149. resp.operation = op;
  1150. resp.status = st;
  1151. spin_lock_irqsave(&blkif->blk_ring_lock, flags);
  1152. /* Place on the response ring for the relevant domain. */
  1153. switch (blkif->blk_protocol) {
  1154. case BLKIF_PROTOCOL_NATIVE:
  1155. memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt),
  1156. &resp, sizeof(resp));
  1157. break;
  1158. case BLKIF_PROTOCOL_X86_32:
  1159. memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt),
  1160. &resp, sizeof(resp));
  1161. break;
  1162. case BLKIF_PROTOCOL_X86_64:
  1163. memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt),
  1164. &resp, sizeof(resp));
  1165. break;
  1166. default:
  1167. BUG();
  1168. }
  1169. blk_rings->common.rsp_prod_pvt++;
  1170. RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
  1171. spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
  1172. if (notify)
  1173. notify_remote_via_irq(blkif->irq);
  1174. }
  1175. static int __init xen_blkif_init(void)
  1176. {
  1177. int rc = 0;
  1178. if (!xen_domain())
  1179. return -ENODEV;
  1180. rc = xen_blkif_interface_init();
  1181. if (rc)
  1182. goto failed_init;
  1183. rc = xen_blkif_xenbus_init();
  1184. if (rc)
  1185. goto failed_init;
  1186. failed_init:
  1187. return rc;
  1188. }
  1189. module_init(xen_blkif_init);
  1190. MODULE_LICENSE("Dual BSD/GPL");
  1191. MODULE_ALIAS("xen-backend:vbd");