blkback.c 37 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301
  1. /******************************************************************************
  2. *
  3. * Back-end of the driver for virtual block devices. This portion of the
  4. * driver exports a 'unified' block-device interface that can be accessed
  5. * by any operating system that implements a compatible front end. A
  6. * reference front-end implementation can be found in:
  7. * drivers/block/xen-blkfront.c
  8. *
  9. * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
  10. * Copyright (c) 2005, Christopher Clark
  11. *
  12. * This program is free software; you can redistribute it and/or
  13. * modify it under the terms of the GNU General Public License version 2
  14. * as published by the Free Software Foundation; or, when distributed
  15. * separately from the Linux kernel or incorporated into other
  16. * software packages, subject to the following license:
  17. *
  18. * Permission is hereby granted, free of charge, to any person obtaining a copy
  19. * of this source file (the "Software"), to deal in the Software without
  20. * restriction, including without limitation the rights to use, copy, modify,
  21. * merge, publish, distribute, sublicense, and/or sell copies of the Software,
  22. * and to permit persons to whom the Software is furnished to do so, subject to
  23. * the following conditions:
  24. *
  25. * The above copyright notice and this permission notice shall be included in
  26. * all copies or substantial portions of the Software.
  27. *
  28. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  29. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  30. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  31. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  32. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  33. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  34. * IN THE SOFTWARE.
  35. */
  36. #include <linux/spinlock.h>
  37. #include <linux/kthread.h>
  38. #include <linux/list.h>
  39. #include <linux/delay.h>
  40. #include <linux/freezer.h>
  41. #include <linux/bitmap.h>
  42. #include <xen/events.h>
  43. #include <xen/page.h>
  44. #include <xen/xen.h>
  45. #include <asm/xen/hypervisor.h>
  46. #include <asm/xen/hypercall.h>
  47. #include <xen/balloon.h>
  48. #include "common.h"
  49. /*
  50. * Maximum number of unused free pages to keep in the internal buffer.
  51. * Setting this to a value too low will reduce memory used in each backend,
  52. * but can have a performance penalty.
  53. *
  54. * A sane value is xen_blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST, but can
  55. * be set to a lower value that might degrade performance on some intensive
  56. * IO workloads.
  57. */
  58. static int xen_blkif_max_buffer_pages = 704;
  59. module_param_named(max_buffer_pages, xen_blkif_max_buffer_pages, int, 0644);
  60. MODULE_PARM_DESC(max_buffer_pages,
  61. "Maximum number of free pages to keep in each block backend buffer");
  62. /*
  63. * Maximum number of grants to map persistently in blkback. For maximum
  64. * performance this should be the total numbers of grants that can be used
  65. * to fill the ring, but since this might become too high, specially with
  66. * the use of indirect descriptors, we set it to a value that provides good
  67. * performance without using too much memory.
  68. *
  69. * When the list of persistent grants is full we clean it up using a LRU
  70. * algorithm.
  71. */
  72. static int xen_blkif_max_pgrants = 352;
  73. module_param_named(max_persistent_grants, xen_blkif_max_pgrants, int, 0644);
  74. MODULE_PARM_DESC(max_persistent_grants,
  75. "Maximum number of grants to map persistently");
  76. /*
  77. * The LRU mechanism to clean the lists of persistent grants needs to
  78. * be executed periodically. The time interval between consecutive executions
  79. * of the purge mechanism is set in ms.
  80. */
  81. #define LRU_INTERVAL 100
  82. /*
  83. * When the persistent grants list is full we will remove unused grants
  84. * from the list. The percent number of grants to be removed at each LRU
  85. * execution.
  86. */
  87. #define LRU_PERCENT_CLEAN 5
  88. /* Run-time switchable: /sys/module/blkback/parameters/ */
  89. static unsigned int log_stats;
  90. module_param(log_stats, int, 0644);
  91. #define BLKBACK_INVALID_HANDLE (~0)
  92. /* Number of free pages to remove on each call to free_xenballooned_pages */
  93. #define NUM_BATCH_FREE_PAGES 10
  94. static inline int get_free_page(struct xen_blkif *blkif, struct page **page)
  95. {
  96. unsigned long flags;
  97. spin_lock_irqsave(&blkif->free_pages_lock, flags);
  98. if (list_empty(&blkif->free_pages)) {
  99. BUG_ON(blkif->free_pages_num != 0);
  100. spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
  101. return alloc_xenballooned_pages(1, page, false);
  102. }
  103. BUG_ON(blkif->free_pages_num == 0);
  104. page[0] = list_first_entry(&blkif->free_pages, struct page, lru);
  105. list_del(&page[0]->lru);
  106. blkif->free_pages_num--;
  107. spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
  108. return 0;
  109. }
  110. static inline void put_free_pages(struct xen_blkif *blkif, struct page **page,
  111. int num)
  112. {
  113. unsigned long flags;
  114. int i;
  115. spin_lock_irqsave(&blkif->free_pages_lock, flags);
  116. for (i = 0; i < num; i++)
  117. list_add(&page[i]->lru, &blkif->free_pages);
  118. blkif->free_pages_num += num;
  119. spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
  120. }
  121. static inline void shrink_free_pagepool(struct xen_blkif *blkif, int num)
  122. {
  123. /* Remove requested pages in batches of NUM_BATCH_FREE_PAGES */
  124. struct page *page[NUM_BATCH_FREE_PAGES];
  125. unsigned int num_pages = 0;
  126. unsigned long flags;
  127. spin_lock_irqsave(&blkif->free_pages_lock, flags);
  128. while (blkif->free_pages_num > num) {
  129. BUG_ON(list_empty(&blkif->free_pages));
  130. page[num_pages] = list_first_entry(&blkif->free_pages,
  131. struct page, lru);
  132. list_del(&page[num_pages]->lru);
  133. blkif->free_pages_num--;
  134. if (++num_pages == NUM_BATCH_FREE_PAGES) {
  135. spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
  136. free_xenballooned_pages(num_pages, page);
  137. spin_lock_irqsave(&blkif->free_pages_lock, flags);
  138. num_pages = 0;
  139. }
  140. }
  141. spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
  142. if (num_pages != 0)
  143. free_xenballooned_pages(num_pages, page);
  144. }
  145. #define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page)))
  146. static int do_block_io_op(struct xen_blkif *blkif);
  147. static int dispatch_rw_block_io(struct xen_blkif *blkif,
  148. struct blkif_request *req,
  149. struct pending_req *pending_req);
  150. static void make_response(struct xen_blkif *blkif, u64 id,
  151. unsigned short op, int st);
  152. #define foreach_grant_safe(pos, n, rbtree, node) \
  153. for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \
  154. (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL; \
  155. &(pos)->node != NULL; \
  156. (pos) = container_of(n, typeof(*(pos)), node), \
  157. (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL)
  158. /*
  159. * We don't need locking around the persistent grant helpers
  160. * because blkback uses a single-thread for each backed, so we
  161. * can be sure that this functions will never be called recursively.
  162. *
  163. * The only exception to that is put_persistent_grant, that can be called
  164. * from interrupt context (by xen_blkbk_unmap), so we have to use atomic
  165. * bit operations to modify the flags of a persistent grant and to count
  166. * the number of used grants.
  167. */
  168. static int add_persistent_gnt(struct xen_blkif *blkif,
  169. struct persistent_gnt *persistent_gnt)
  170. {
  171. struct rb_node **new = NULL, *parent = NULL;
  172. struct persistent_gnt *this;
  173. if (blkif->persistent_gnt_c >= xen_blkif_max_pgrants) {
  174. if (!blkif->vbd.overflow_max_grants)
  175. blkif->vbd.overflow_max_grants = 1;
  176. return -EBUSY;
  177. }
  178. /* Figure out where to put new node */
  179. new = &blkif->persistent_gnts.rb_node;
  180. while (*new) {
  181. this = container_of(*new, struct persistent_gnt, node);
  182. parent = *new;
  183. if (persistent_gnt->gnt < this->gnt)
  184. new = &((*new)->rb_left);
  185. else if (persistent_gnt->gnt > this->gnt)
  186. new = &((*new)->rb_right);
  187. else {
  188. pr_alert_ratelimited(DRV_PFX " trying to add a gref that's already in the tree\n");
  189. return -EINVAL;
  190. }
  191. }
  192. bitmap_zero(persistent_gnt->flags, PERSISTENT_GNT_FLAGS_SIZE);
  193. set_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags);
  194. /* Add new node and rebalance tree. */
  195. rb_link_node(&(persistent_gnt->node), parent, new);
  196. rb_insert_color(&(persistent_gnt->node), &blkif->persistent_gnts);
  197. blkif->persistent_gnt_c++;
  198. atomic_inc(&blkif->persistent_gnt_in_use);
  199. return 0;
  200. }
  201. static struct persistent_gnt *get_persistent_gnt(struct xen_blkif *blkif,
  202. grant_ref_t gref)
  203. {
  204. struct persistent_gnt *data;
  205. struct rb_node *node = NULL;
  206. node = blkif->persistent_gnts.rb_node;
  207. while (node) {
  208. data = container_of(node, struct persistent_gnt, node);
  209. if (gref < data->gnt)
  210. node = node->rb_left;
  211. else if (gref > data->gnt)
  212. node = node->rb_right;
  213. else {
  214. if(test_bit(PERSISTENT_GNT_ACTIVE, data->flags)) {
  215. pr_alert_ratelimited(DRV_PFX " requesting a grant already in use\n");
  216. return NULL;
  217. }
  218. set_bit(PERSISTENT_GNT_ACTIVE, data->flags);
  219. atomic_inc(&blkif->persistent_gnt_in_use);
  220. return data;
  221. }
  222. }
  223. return NULL;
  224. }
  225. static void put_persistent_gnt(struct xen_blkif *blkif,
  226. struct persistent_gnt *persistent_gnt)
  227. {
  228. if(!test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags))
  229. pr_alert_ratelimited(DRV_PFX " freeing a grant already unused");
  230. set_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags);
  231. clear_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags);
  232. atomic_dec(&blkif->persistent_gnt_in_use);
  233. }
  234. static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
  235. unsigned int num)
  236. {
  237. struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
  238. struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
  239. struct persistent_gnt *persistent_gnt;
  240. struct rb_node *n;
  241. int ret = 0;
  242. int segs_to_unmap = 0;
  243. foreach_grant_safe(persistent_gnt, n, root, node) {
  244. BUG_ON(persistent_gnt->handle ==
  245. BLKBACK_INVALID_HANDLE);
  246. gnttab_set_unmap_op(&unmap[segs_to_unmap],
  247. (unsigned long) pfn_to_kaddr(page_to_pfn(
  248. persistent_gnt->page)),
  249. GNTMAP_host_map,
  250. persistent_gnt->handle);
  251. pages[segs_to_unmap] = persistent_gnt->page;
  252. if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST ||
  253. !rb_next(&persistent_gnt->node)) {
  254. ret = gnttab_unmap_refs(unmap, NULL, pages,
  255. segs_to_unmap);
  256. BUG_ON(ret);
  257. put_free_pages(blkif, pages, segs_to_unmap);
  258. segs_to_unmap = 0;
  259. }
  260. rb_erase(&persistent_gnt->node, root);
  261. kfree(persistent_gnt);
  262. num--;
  263. }
  264. BUG_ON(num != 0);
  265. }
  266. static void unmap_purged_grants(struct work_struct *work)
  267. {
  268. struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
  269. struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
  270. struct persistent_gnt *persistent_gnt;
  271. int ret, segs_to_unmap = 0;
  272. struct xen_blkif *blkif = container_of(work, typeof(*blkif), persistent_purge_work);
  273. while(!list_empty(&blkif->persistent_purge_list)) {
  274. persistent_gnt = list_first_entry(&blkif->persistent_purge_list,
  275. struct persistent_gnt,
  276. remove_node);
  277. list_del(&persistent_gnt->remove_node);
  278. gnttab_set_unmap_op(&unmap[segs_to_unmap],
  279. vaddr(persistent_gnt->page),
  280. GNTMAP_host_map,
  281. persistent_gnt->handle);
  282. pages[segs_to_unmap] = persistent_gnt->page;
  283. if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
  284. ret = gnttab_unmap_refs(unmap, NULL, pages,
  285. segs_to_unmap);
  286. BUG_ON(ret);
  287. put_free_pages(blkif, pages, segs_to_unmap);
  288. segs_to_unmap = 0;
  289. }
  290. kfree(persistent_gnt);
  291. }
  292. if (segs_to_unmap > 0) {
  293. ret = gnttab_unmap_refs(unmap, NULL, pages, segs_to_unmap);
  294. BUG_ON(ret);
  295. put_free_pages(blkif, pages, segs_to_unmap);
  296. }
  297. }
  298. static void purge_persistent_gnt(struct xen_blkif *blkif)
  299. {
  300. struct persistent_gnt *persistent_gnt;
  301. struct rb_node *n;
  302. unsigned int num_clean, total;
  303. bool scan_used = false;
  304. struct rb_root *root;
  305. if (blkif->persistent_gnt_c < xen_blkif_max_pgrants ||
  306. (blkif->persistent_gnt_c == xen_blkif_max_pgrants &&
  307. !blkif->vbd.overflow_max_grants)) {
  308. return;
  309. }
  310. if (work_pending(&blkif->persistent_purge_work)) {
  311. pr_alert_ratelimited(DRV_PFX "Scheduled work from previous purge is still pending, cannot purge list\n");
  312. return;
  313. }
  314. num_clean = (xen_blkif_max_pgrants / 100) * LRU_PERCENT_CLEAN;
  315. num_clean = blkif->persistent_gnt_c - xen_blkif_max_pgrants + num_clean;
  316. num_clean = min(blkif->persistent_gnt_c, num_clean);
  317. if (num_clean >
  318. (blkif->persistent_gnt_c -
  319. atomic_read(&blkif->persistent_gnt_in_use)))
  320. return;
  321. /*
  322. * At this point, we can assure that there will be no calls
  323. * to get_persistent_grant (because we are executing this code from
  324. * xen_blkif_schedule), there can only be calls to put_persistent_gnt,
  325. * which means that the number of currently used grants will go down,
  326. * but never up, so we will always be able to remove the requested
  327. * number of grants.
  328. */
  329. total = num_clean;
  330. pr_debug(DRV_PFX "Going to purge %u persistent grants\n", num_clean);
  331. INIT_LIST_HEAD(&blkif->persistent_purge_list);
  332. root = &blkif->persistent_gnts;
  333. purge_list:
  334. foreach_grant_safe(persistent_gnt, n, root, node) {
  335. BUG_ON(persistent_gnt->handle ==
  336. BLKBACK_INVALID_HANDLE);
  337. if (test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags))
  338. continue;
  339. if (!scan_used &&
  340. (test_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags)))
  341. continue;
  342. rb_erase(&persistent_gnt->node, root);
  343. list_add(&persistent_gnt->remove_node,
  344. &blkif->persistent_purge_list);
  345. if (--num_clean == 0)
  346. goto finished;
  347. }
  348. /*
  349. * If we get here it means we also need to start cleaning
  350. * grants that were used since last purge in order to cope
  351. * with the requested num
  352. */
  353. if (!scan_used) {
  354. pr_debug(DRV_PFX "Still missing %u purged frames\n", num_clean);
  355. scan_used = true;
  356. goto purge_list;
  357. }
  358. finished:
  359. /* Remove the "used" flag from all the persistent grants */
  360. foreach_grant_safe(persistent_gnt, n, root, node) {
  361. BUG_ON(persistent_gnt->handle ==
  362. BLKBACK_INVALID_HANDLE);
  363. clear_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags);
  364. }
  365. blkif->persistent_gnt_c -= (total - num_clean);
  366. blkif->vbd.overflow_max_grants = 0;
  367. /* We can defer this work */
  368. INIT_WORK(&blkif->persistent_purge_work, unmap_purged_grants);
  369. schedule_work(&blkif->persistent_purge_work);
  370. pr_debug(DRV_PFX "Purged %u/%u\n", (total - num_clean), total);
  371. return;
  372. }
  373. /*
  374. * Retrieve from the 'pending_reqs' a free pending_req structure to be used.
  375. */
  376. static struct pending_req *alloc_req(struct xen_blkif *blkif)
  377. {
  378. struct pending_req *req = NULL;
  379. unsigned long flags;
  380. spin_lock_irqsave(&blkif->pending_free_lock, flags);
  381. if (!list_empty(&blkif->pending_free)) {
  382. req = list_entry(blkif->pending_free.next, struct pending_req,
  383. free_list);
  384. list_del(&req->free_list);
  385. }
  386. spin_unlock_irqrestore(&blkif->pending_free_lock, flags);
  387. return req;
  388. }
  389. /*
  390. * Return the 'pending_req' structure back to the freepool. We also
  391. * wake up the thread if it was waiting for a free page.
  392. */
  393. static void free_req(struct xen_blkif *blkif, struct pending_req *req)
  394. {
  395. unsigned long flags;
  396. int was_empty;
  397. spin_lock_irqsave(&blkif->pending_free_lock, flags);
  398. was_empty = list_empty(&blkif->pending_free);
  399. list_add(&req->free_list, &blkif->pending_free);
  400. spin_unlock_irqrestore(&blkif->pending_free_lock, flags);
  401. if (was_empty)
  402. wake_up(&blkif->pending_free_wq);
  403. }
  404. /*
  405. * Routines for managing virtual block devices (vbds).
  406. */
  407. static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif,
  408. int operation)
  409. {
  410. struct xen_vbd *vbd = &blkif->vbd;
  411. int rc = -EACCES;
  412. if ((operation != READ) && vbd->readonly)
  413. goto out;
  414. if (likely(req->nr_sects)) {
  415. blkif_sector_t end = req->sector_number + req->nr_sects;
  416. if (unlikely(end < req->sector_number))
  417. goto out;
  418. if (unlikely(end > vbd_sz(vbd)))
  419. goto out;
  420. }
  421. req->dev = vbd->pdevice;
  422. req->bdev = vbd->bdev;
  423. rc = 0;
  424. out:
  425. return rc;
  426. }
  427. static void xen_vbd_resize(struct xen_blkif *blkif)
  428. {
  429. struct xen_vbd *vbd = &blkif->vbd;
  430. struct xenbus_transaction xbt;
  431. int err;
  432. struct xenbus_device *dev = xen_blkbk_xenbus(blkif->be);
  433. unsigned long long new_size = vbd_sz(vbd);
  434. pr_info(DRV_PFX "VBD Resize: Domid: %d, Device: (%d, %d)\n",
  435. blkif->domid, MAJOR(vbd->pdevice), MINOR(vbd->pdevice));
  436. pr_info(DRV_PFX "VBD Resize: new size %llu\n", new_size);
  437. vbd->size = new_size;
  438. again:
  439. err = xenbus_transaction_start(&xbt);
  440. if (err) {
  441. pr_warn(DRV_PFX "Error starting transaction");
  442. return;
  443. }
  444. err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
  445. (unsigned long long)vbd_sz(vbd));
  446. if (err) {
  447. pr_warn(DRV_PFX "Error writing new size");
  448. goto abort;
  449. }
  450. /*
  451. * Write the current state; we will use this to synchronize
  452. * the front-end. If the current state is "connected" the
  453. * front-end will get the new size information online.
  454. */
  455. err = xenbus_printf(xbt, dev->nodename, "state", "%d", dev->state);
  456. if (err) {
  457. pr_warn(DRV_PFX "Error writing the state");
  458. goto abort;
  459. }
  460. err = xenbus_transaction_end(xbt, 0);
  461. if (err == -EAGAIN)
  462. goto again;
  463. if (err)
  464. pr_warn(DRV_PFX "Error ending transaction");
  465. return;
  466. abort:
  467. xenbus_transaction_end(xbt, 1);
  468. }
  469. /*
  470. * Notification from the guest OS.
  471. */
  472. static void blkif_notify_work(struct xen_blkif *blkif)
  473. {
  474. blkif->waiting_reqs = 1;
  475. wake_up(&blkif->wq);
  476. }
  477. irqreturn_t xen_blkif_be_int(int irq, void *dev_id)
  478. {
  479. blkif_notify_work(dev_id);
  480. return IRQ_HANDLED;
  481. }
  482. /*
  483. * SCHEDULER FUNCTIONS
  484. */
  485. static void print_stats(struct xen_blkif *blkif)
  486. {
  487. pr_info("xen-blkback (%s): oo %3llu | rd %4llu | wr %4llu | f %4llu"
  488. " | ds %4llu | pg: %4u/%4d\n",
  489. current->comm, blkif->st_oo_req,
  490. blkif->st_rd_req, blkif->st_wr_req,
  491. blkif->st_f_req, blkif->st_ds_req,
  492. blkif->persistent_gnt_c,
  493. xen_blkif_max_pgrants);
  494. blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000);
  495. blkif->st_rd_req = 0;
  496. blkif->st_wr_req = 0;
  497. blkif->st_oo_req = 0;
  498. blkif->st_ds_req = 0;
  499. }
  500. int xen_blkif_schedule(void *arg)
  501. {
  502. struct xen_blkif *blkif = arg;
  503. struct xen_vbd *vbd = &blkif->vbd;
  504. unsigned long timeout;
  505. xen_blkif_get(blkif);
  506. while (!kthread_should_stop()) {
  507. if (try_to_freeze())
  508. continue;
  509. if (unlikely(vbd->size != vbd_sz(vbd)))
  510. xen_vbd_resize(blkif);
  511. timeout = msecs_to_jiffies(LRU_INTERVAL);
  512. timeout = wait_event_interruptible_timeout(
  513. blkif->wq,
  514. blkif->waiting_reqs || kthread_should_stop(),
  515. timeout);
  516. if (timeout == 0)
  517. goto purge_gnt_list;
  518. timeout = wait_event_interruptible_timeout(
  519. blkif->pending_free_wq,
  520. !list_empty(&blkif->pending_free) ||
  521. kthread_should_stop(),
  522. timeout);
  523. if (timeout == 0)
  524. goto purge_gnt_list;
  525. blkif->waiting_reqs = 0;
  526. smp_mb(); /* clear flag *before* checking for work */
  527. if (do_block_io_op(blkif))
  528. blkif->waiting_reqs = 1;
  529. purge_gnt_list:
  530. if (blkif->vbd.feature_gnt_persistent &&
  531. time_after(jiffies, blkif->next_lru)) {
  532. purge_persistent_gnt(blkif);
  533. blkif->next_lru = jiffies + msecs_to_jiffies(LRU_INTERVAL);
  534. }
  535. /* Shrink if we have more than xen_blkif_max_buffer_pages */
  536. shrink_free_pagepool(blkif, xen_blkif_max_buffer_pages);
  537. if (log_stats && time_after(jiffies, blkif->st_print))
  538. print_stats(blkif);
  539. }
  540. /* Since we are shutting down remove all pages from the buffer */
  541. shrink_free_pagepool(blkif, 0 /* All */);
  542. /* Free all persistent grant pages */
  543. if (!RB_EMPTY_ROOT(&blkif->persistent_gnts))
  544. free_persistent_gnts(blkif, &blkif->persistent_gnts,
  545. blkif->persistent_gnt_c);
  546. BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts));
  547. blkif->persistent_gnt_c = 0;
  548. if (log_stats)
  549. print_stats(blkif);
  550. blkif->xenblkd = NULL;
  551. xen_blkif_put(blkif);
  552. return 0;
  553. }
  554. struct seg_buf {
  555. unsigned int offset;
  556. unsigned int nsec;
  557. };
  558. /*
  559. * Unmap the grant references, and also remove the M2P over-rides
  560. * used in the 'pending_req'.
  561. */
  562. static void xen_blkbk_unmap(struct xen_blkif *blkif,
  563. grant_handle_t handles[],
  564. struct page *pages[],
  565. struct persistent_gnt *persistent_gnts[],
  566. int num)
  567. {
  568. struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
  569. struct page *unmap_pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
  570. unsigned int i, invcount = 0;
  571. int ret;
  572. for (i = 0; i < num; i++) {
  573. if (persistent_gnts[i] != NULL) {
  574. put_persistent_gnt(blkif, persistent_gnts[i]);
  575. continue;
  576. }
  577. if (handles[i] == BLKBACK_INVALID_HANDLE)
  578. continue;
  579. unmap_pages[invcount] = pages[i];
  580. gnttab_set_unmap_op(&unmap[invcount], vaddr(pages[i]),
  581. GNTMAP_host_map, handles[i]);
  582. handles[i] = BLKBACK_INVALID_HANDLE;
  583. if (++invcount == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
  584. ret = gnttab_unmap_refs(unmap, NULL, unmap_pages,
  585. invcount);
  586. BUG_ON(ret);
  587. put_free_pages(blkif, unmap_pages, invcount);
  588. invcount = 0;
  589. }
  590. }
  591. if (invcount) {
  592. ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount);
  593. BUG_ON(ret);
  594. put_free_pages(blkif, unmap_pages, invcount);
  595. }
  596. }
  597. static int xen_blkbk_map(struct xen_blkif *blkif, grant_ref_t grefs[],
  598. struct persistent_gnt *persistent_gnts[],
  599. grant_handle_t handles[],
  600. struct page *pages[],
  601. int num, bool ro)
  602. {
  603. struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
  604. struct page *pages_to_gnt[BLKIF_MAX_SEGMENTS_PER_REQUEST];
  605. struct persistent_gnt *persistent_gnt = NULL;
  606. phys_addr_t addr = 0;
  607. int i, seg_idx, new_map_idx;
  608. int segs_to_map = 0;
  609. int ret = 0;
  610. int last_map = 0, map_until = 0;
  611. int use_persistent_gnts;
  612. use_persistent_gnts = (blkif->vbd.feature_gnt_persistent);
  613. /*
  614. * Fill out preq.nr_sects with proper amount of sectors, and setup
  615. * assign map[..] with the PFN of the page in our domain with the
  616. * corresponding grant reference for each page.
  617. */
  618. again:
  619. for (i = map_until; i < num; i++) {
  620. uint32_t flags;
  621. if (use_persistent_gnts)
  622. persistent_gnt = get_persistent_gnt(
  623. blkif,
  624. grefs[i]);
  625. if (persistent_gnt) {
  626. /*
  627. * We are using persistent grants and
  628. * the grant is already mapped
  629. */
  630. pages[i] = persistent_gnt->page;
  631. persistent_gnts[i] = persistent_gnt;
  632. } else {
  633. if (get_free_page(blkif, &pages[i]))
  634. goto out_of_memory;
  635. addr = vaddr(pages[i]);
  636. pages_to_gnt[segs_to_map] = pages[i];
  637. persistent_gnts[i] = NULL;
  638. flags = GNTMAP_host_map;
  639. if (!use_persistent_gnts && ro)
  640. flags |= GNTMAP_readonly;
  641. gnttab_set_map_op(&map[segs_to_map++], addr,
  642. flags, grefs[i],
  643. blkif->domid);
  644. }
  645. map_until = i + 1;
  646. if (segs_to_map == BLKIF_MAX_SEGMENTS_PER_REQUEST)
  647. break;
  648. }
  649. if (segs_to_map) {
  650. ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map);
  651. BUG_ON(ret);
  652. }
  653. /*
  654. * Now swizzle the MFN in our domain with the MFN from the other domain
  655. * so that when we access vaddr(pending_req,i) it has the contents of
  656. * the page from the other domain.
  657. */
  658. for (seg_idx = last_map, new_map_idx = 0; seg_idx < map_until; seg_idx++) {
  659. if (!persistent_gnts[seg_idx]) {
  660. /* This is a newly mapped grant */
  661. BUG_ON(new_map_idx >= segs_to_map);
  662. if (unlikely(map[new_map_idx].status != 0)) {
  663. pr_debug(DRV_PFX "invalid buffer -- could not remap it\n");
  664. handles[seg_idx] = BLKBACK_INVALID_HANDLE;
  665. ret |= 1;
  666. goto next;
  667. }
  668. handles[seg_idx] = map[new_map_idx].handle;
  669. } else {
  670. continue;
  671. }
  672. if (use_persistent_gnts &&
  673. blkif->persistent_gnt_c < xen_blkif_max_pgrants) {
  674. /*
  675. * We are using persistent grants, the grant is
  676. * not mapped but we might have room for it.
  677. */
  678. persistent_gnt = kmalloc(sizeof(struct persistent_gnt),
  679. GFP_KERNEL);
  680. if (!persistent_gnt) {
  681. /*
  682. * If we don't have enough memory to
  683. * allocate the persistent_gnt struct
  684. * map this grant non-persistenly
  685. */
  686. goto next;
  687. }
  688. persistent_gnt->gnt = map[new_map_idx].ref;
  689. persistent_gnt->handle = map[new_map_idx].handle;
  690. persistent_gnt->page = pages[seg_idx];
  691. if (add_persistent_gnt(blkif,
  692. persistent_gnt)) {
  693. kfree(persistent_gnt);
  694. persistent_gnt = NULL;
  695. goto next;
  696. }
  697. persistent_gnts[seg_idx] = persistent_gnt;
  698. pr_debug(DRV_PFX " grant %u added to the tree of persistent grants, using %u/%u\n",
  699. persistent_gnt->gnt, blkif->persistent_gnt_c,
  700. xen_blkif_max_pgrants);
  701. goto next;
  702. }
  703. if (use_persistent_gnts && !blkif->vbd.overflow_max_grants) {
  704. blkif->vbd.overflow_max_grants = 1;
  705. pr_debug(DRV_PFX " domain %u, device %#x is using maximum number of persistent grants\n",
  706. blkif->domid, blkif->vbd.handle);
  707. }
  708. /*
  709. * We could not map this grant persistently, so use it as
  710. * a non-persistent grant.
  711. */
  712. next:
  713. new_map_idx++;
  714. }
  715. segs_to_map = 0;
  716. last_map = map_until;
  717. if (map_until != num)
  718. goto again;
  719. return ret;
  720. out_of_memory:
  721. pr_alert(DRV_PFX "%s: out of memory\n", __func__);
  722. put_free_pages(blkif, pages_to_gnt, segs_to_map);
  723. return -ENOMEM;
  724. }
  725. static int xen_blkbk_map_seg(struct blkif_request *req,
  726. struct pending_req *pending_req,
  727. struct seg_buf seg[],
  728. struct page *pages[])
  729. {
  730. int i, rc;
  731. grant_ref_t grefs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
  732. for (i = 0; i < req->u.rw.nr_segments; i++)
  733. grefs[i] = req->u.rw.seg[i].gref;
  734. rc = xen_blkbk_map(pending_req->blkif, grefs,
  735. pending_req->persistent_gnts,
  736. pending_req->grant_handles, pending_req->pages,
  737. req->u.rw.nr_segments,
  738. (pending_req->operation != BLKIF_OP_READ));
  739. if (rc)
  740. return rc;
  741. for (i = 0; i < req->u.rw.nr_segments; i++)
  742. seg[i].offset = (req->u.rw.seg[i].first_sect << 9);
  743. return 0;
  744. }
  745. static int dispatch_discard_io(struct xen_blkif *blkif,
  746. struct blkif_request *req)
  747. {
  748. int err = 0;
  749. int status = BLKIF_RSP_OKAY;
  750. struct block_device *bdev = blkif->vbd.bdev;
  751. unsigned long secure;
  752. blkif->st_ds_req++;
  753. xen_blkif_get(blkif);
  754. secure = (blkif->vbd.discard_secure &&
  755. (req->u.discard.flag & BLKIF_DISCARD_SECURE)) ?
  756. BLKDEV_DISCARD_SECURE : 0;
  757. err = blkdev_issue_discard(bdev, req->u.discard.sector_number,
  758. req->u.discard.nr_sectors,
  759. GFP_KERNEL, secure);
  760. if (err == -EOPNOTSUPP) {
  761. pr_debug(DRV_PFX "discard op failed, not supported\n");
  762. status = BLKIF_RSP_EOPNOTSUPP;
  763. } else if (err)
  764. status = BLKIF_RSP_ERROR;
  765. make_response(blkif, req->u.discard.id, req->operation, status);
  766. xen_blkif_put(blkif);
  767. return err;
  768. }
  769. static int dispatch_other_io(struct xen_blkif *blkif,
  770. struct blkif_request *req,
  771. struct pending_req *pending_req)
  772. {
  773. free_req(blkif, pending_req);
  774. make_response(blkif, req->u.other.id, req->operation,
  775. BLKIF_RSP_EOPNOTSUPP);
  776. return -EIO;
  777. }
  778. static void xen_blk_drain_io(struct xen_blkif *blkif)
  779. {
  780. atomic_set(&blkif->drain, 1);
  781. do {
  782. /* The initial value is one, and one refcnt taken at the
  783. * start of the xen_blkif_schedule thread. */
  784. if (atomic_read(&blkif->refcnt) <= 2)
  785. break;
  786. wait_for_completion_interruptible_timeout(
  787. &blkif->drain_complete, HZ);
  788. if (!atomic_read(&blkif->drain))
  789. break;
  790. } while (!kthread_should_stop());
  791. atomic_set(&blkif->drain, 0);
  792. }
  793. /*
  794. * Completion callback on the bio's. Called as bh->b_end_io()
  795. */
  796. static void __end_block_io_op(struct pending_req *pending_req, int error)
  797. {
  798. /* An error fails the entire request. */
  799. if ((pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE) &&
  800. (error == -EOPNOTSUPP)) {
  801. pr_debug(DRV_PFX "flush diskcache op failed, not supported\n");
  802. xen_blkbk_flush_diskcache(XBT_NIL, pending_req->blkif->be, 0);
  803. pending_req->status = BLKIF_RSP_EOPNOTSUPP;
  804. } else if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) &&
  805. (error == -EOPNOTSUPP)) {
  806. pr_debug(DRV_PFX "write barrier op failed, not supported\n");
  807. xen_blkbk_barrier(XBT_NIL, pending_req->blkif->be, 0);
  808. pending_req->status = BLKIF_RSP_EOPNOTSUPP;
  809. } else if (error) {
  810. pr_debug(DRV_PFX "Buffer not up-to-date at end of operation,"
  811. " error=%d\n", error);
  812. pending_req->status = BLKIF_RSP_ERROR;
  813. }
  814. /*
  815. * If all of the bio's have completed it is time to unmap
  816. * the grant references associated with 'request' and provide
  817. * the proper response on the ring.
  818. */
  819. if (atomic_dec_and_test(&pending_req->pendcnt)) {
  820. xen_blkbk_unmap(pending_req->blkif, pending_req->grant_handles,
  821. pending_req->pages,
  822. pending_req->persistent_gnts,
  823. pending_req->nr_pages);
  824. make_response(pending_req->blkif, pending_req->id,
  825. pending_req->operation, pending_req->status);
  826. xen_blkif_put(pending_req->blkif);
  827. if (atomic_read(&pending_req->blkif->refcnt) <= 2) {
  828. if (atomic_read(&pending_req->blkif->drain))
  829. complete(&pending_req->blkif->drain_complete);
  830. }
  831. free_req(pending_req->blkif, pending_req);
  832. }
  833. }
  834. /*
  835. * bio callback.
  836. */
  837. static void end_block_io_op(struct bio *bio, int error)
  838. {
  839. __end_block_io_op(bio->bi_private, error);
  840. bio_put(bio);
  841. }
  842. /*
  843. * Function to copy the from the ring buffer the 'struct blkif_request'
  844. * (which has the sectors we want, number of them, grant references, etc),
  845. * and transmute it to the block API to hand it over to the proper block disk.
  846. */
  847. static int
  848. __do_block_io_op(struct xen_blkif *blkif)
  849. {
  850. union blkif_back_rings *blk_rings = &blkif->blk_rings;
  851. struct blkif_request req;
  852. struct pending_req *pending_req;
  853. RING_IDX rc, rp;
  854. int more_to_do = 0;
  855. rc = blk_rings->common.req_cons;
  856. rp = blk_rings->common.sring->req_prod;
  857. rmb(); /* Ensure we see queued requests up to 'rp'. */
  858. while (rc != rp) {
  859. if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
  860. break;
  861. if (kthread_should_stop()) {
  862. more_to_do = 1;
  863. break;
  864. }
  865. pending_req = alloc_req(blkif);
  866. if (NULL == pending_req) {
  867. blkif->st_oo_req++;
  868. more_to_do = 1;
  869. break;
  870. }
  871. switch (blkif->blk_protocol) {
  872. case BLKIF_PROTOCOL_NATIVE:
  873. memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req));
  874. break;
  875. case BLKIF_PROTOCOL_X86_32:
  876. blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
  877. break;
  878. case BLKIF_PROTOCOL_X86_64:
  879. blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
  880. break;
  881. default:
  882. BUG();
  883. }
  884. blk_rings->common.req_cons = ++rc; /* before make_response() */
  885. /* Apply all sanity checks to /private copy/ of request. */
  886. barrier();
  887. switch (req.operation) {
  888. case BLKIF_OP_READ:
  889. case BLKIF_OP_WRITE:
  890. case BLKIF_OP_WRITE_BARRIER:
  891. case BLKIF_OP_FLUSH_DISKCACHE:
  892. if (dispatch_rw_block_io(blkif, &req, pending_req))
  893. goto done;
  894. break;
  895. case BLKIF_OP_DISCARD:
  896. free_req(blkif, pending_req);
  897. if (dispatch_discard_io(blkif, &req))
  898. goto done;
  899. break;
  900. default:
  901. if (dispatch_other_io(blkif, &req, pending_req))
  902. goto done;
  903. break;
  904. }
  905. /* Yield point for this unbounded loop. */
  906. cond_resched();
  907. }
  908. done:
  909. return more_to_do;
  910. }
  911. static int
  912. do_block_io_op(struct xen_blkif *blkif)
  913. {
  914. union blkif_back_rings *blk_rings = &blkif->blk_rings;
  915. int more_to_do;
  916. do {
  917. more_to_do = __do_block_io_op(blkif);
  918. if (more_to_do)
  919. break;
  920. RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
  921. } while (more_to_do);
  922. return more_to_do;
  923. }
  924. /*
  925. * Transmutation of the 'struct blkif_request' to a proper 'struct bio'
  926. * and call the 'submit_bio' to pass it to the underlying storage.
  927. */
  928. static int dispatch_rw_block_io(struct xen_blkif *blkif,
  929. struct blkif_request *req,
  930. struct pending_req *pending_req)
  931. {
  932. struct phys_req preq;
  933. struct seg_buf seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
  934. unsigned int nseg;
  935. struct bio *bio = NULL;
  936. struct bio *biolist[BLKIF_MAX_SEGMENTS_PER_REQUEST];
  937. int i, nbio = 0;
  938. int operation;
  939. struct blk_plug plug;
  940. bool drain = false;
  941. struct page **pages = pending_req->pages;
  942. switch (req->operation) {
  943. case BLKIF_OP_READ:
  944. blkif->st_rd_req++;
  945. operation = READ;
  946. break;
  947. case BLKIF_OP_WRITE:
  948. blkif->st_wr_req++;
  949. operation = WRITE_ODIRECT;
  950. break;
  951. case BLKIF_OP_WRITE_BARRIER:
  952. drain = true;
  953. case BLKIF_OP_FLUSH_DISKCACHE:
  954. blkif->st_f_req++;
  955. operation = WRITE_FLUSH;
  956. break;
  957. default:
  958. operation = 0; /* make gcc happy */
  959. goto fail_response;
  960. break;
  961. }
  962. /* Check that the number of segments is sane. */
  963. nseg = req->u.rw.nr_segments;
  964. if (unlikely(nseg == 0 && operation != WRITE_FLUSH) ||
  965. unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
  966. pr_debug(DRV_PFX "Bad number of segments in request (%d)\n",
  967. nseg);
  968. /* Haven't submitted any bio's yet. */
  969. goto fail_response;
  970. }
  971. preq.sector_number = req->u.rw.sector_number;
  972. preq.nr_sects = 0;
  973. pending_req->blkif = blkif;
  974. pending_req->id = req->u.rw.id;
  975. pending_req->operation = req->operation;
  976. pending_req->status = BLKIF_RSP_OKAY;
  977. pending_req->nr_pages = nseg;
  978. for (i = 0; i < nseg; i++) {
  979. seg[i].nsec = req->u.rw.seg[i].last_sect -
  980. req->u.rw.seg[i].first_sect + 1;
  981. if ((req->u.rw.seg[i].last_sect >= (PAGE_SIZE >> 9)) ||
  982. (req->u.rw.seg[i].last_sect < req->u.rw.seg[i].first_sect))
  983. goto fail_response;
  984. preq.nr_sects += seg[i].nsec;
  985. }
  986. if (xen_vbd_translate(&preq, blkif, operation) != 0) {
  987. pr_debug(DRV_PFX "access denied: %s of [%llu,%llu] on dev=%04x\n",
  988. operation == READ ? "read" : "write",
  989. preq.sector_number,
  990. preq.sector_number + preq.nr_sects,
  991. blkif->vbd.pdevice);
  992. goto fail_response;
  993. }
  994. /*
  995. * This check _MUST_ be done after xen_vbd_translate as the preq.bdev
  996. * is set there.
  997. */
  998. for (i = 0; i < nseg; i++) {
  999. if (((int)preq.sector_number|(int)seg[i].nsec) &
  1000. ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) {
  1001. pr_debug(DRV_PFX "Misaligned I/O request from domain %d",
  1002. blkif->domid);
  1003. goto fail_response;
  1004. }
  1005. }
  1006. /* Wait on all outstanding I/O's and once that has been completed
  1007. * issue the WRITE_FLUSH.
  1008. */
  1009. if (drain)
  1010. xen_blk_drain_io(pending_req->blkif);
  1011. /*
  1012. * If we have failed at this point, we need to undo the M2P override,
  1013. * set gnttab_set_unmap_op on all of the grant references and perform
  1014. * the hypercall to unmap the grants - that is all done in
  1015. * xen_blkbk_unmap.
  1016. */
  1017. if (xen_blkbk_map_seg(req, pending_req, seg, pages))
  1018. goto fail_flush;
  1019. /*
  1020. * This corresponding xen_blkif_put is done in __end_block_io_op, or
  1021. * below (in "!bio") if we are handling a BLKIF_OP_DISCARD.
  1022. */
  1023. xen_blkif_get(blkif);
  1024. for (i = 0; i < nseg; i++) {
  1025. while ((bio == NULL) ||
  1026. (bio_add_page(bio,
  1027. pages[i],
  1028. seg[i].nsec << 9,
  1029. seg[i].offset) == 0)) {
  1030. bio = bio_alloc(GFP_KERNEL, nseg-i);
  1031. if (unlikely(bio == NULL))
  1032. goto fail_put_bio;
  1033. biolist[nbio++] = bio;
  1034. bio->bi_bdev = preq.bdev;
  1035. bio->bi_private = pending_req;
  1036. bio->bi_end_io = end_block_io_op;
  1037. bio->bi_sector = preq.sector_number;
  1038. }
  1039. preq.sector_number += seg[i].nsec;
  1040. }
  1041. /* This will be hit if the operation was a flush or discard. */
  1042. if (!bio) {
  1043. BUG_ON(operation != WRITE_FLUSH);
  1044. bio = bio_alloc(GFP_KERNEL, 0);
  1045. if (unlikely(bio == NULL))
  1046. goto fail_put_bio;
  1047. biolist[nbio++] = bio;
  1048. bio->bi_bdev = preq.bdev;
  1049. bio->bi_private = pending_req;
  1050. bio->bi_end_io = end_block_io_op;
  1051. }
  1052. atomic_set(&pending_req->pendcnt, nbio);
  1053. blk_start_plug(&plug);
  1054. for (i = 0; i < nbio; i++)
  1055. submit_bio(operation, biolist[i]);
  1056. /* Let the I/Os go.. */
  1057. blk_finish_plug(&plug);
  1058. if (operation == READ)
  1059. blkif->st_rd_sect += preq.nr_sects;
  1060. else if (operation & WRITE)
  1061. blkif->st_wr_sect += preq.nr_sects;
  1062. return 0;
  1063. fail_flush:
  1064. xen_blkbk_unmap(blkif, pending_req->grant_handles,
  1065. pending_req->pages, pending_req->persistent_gnts,
  1066. pending_req->nr_pages);
  1067. fail_response:
  1068. /* Haven't submitted any bio's yet. */
  1069. make_response(blkif, req->u.rw.id, req->operation, BLKIF_RSP_ERROR);
  1070. free_req(blkif, pending_req);
  1071. msleep(1); /* back off a bit */
  1072. return -EIO;
  1073. fail_put_bio:
  1074. for (i = 0; i < nbio; i++)
  1075. bio_put(biolist[i]);
  1076. atomic_set(&pending_req->pendcnt, 1);
  1077. __end_block_io_op(pending_req, -EINVAL);
  1078. msleep(1); /* back off a bit */
  1079. return -EIO;
  1080. }
  1081. /*
  1082. * Put a response on the ring on how the operation fared.
  1083. */
  1084. static void make_response(struct xen_blkif *blkif, u64 id,
  1085. unsigned short op, int st)
  1086. {
  1087. struct blkif_response resp;
  1088. unsigned long flags;
  1089. union blkif_back_rings *blk_rings = &blkif->blk_rings;
  1090. int notify;
  1091. resp.id = id;
  1092. resp.operation = op;
  1093. resp.status = st;
  1094. spin_lock_irqsave(&blkif->blk_ring_lock, flags);
  1095. /* Place on the response ring for the relevant domain. */
  1096. switch (blkif->blk_protocol) {
  1097. case BLKIF_PROTOCOL_NATIVE:
  1098. memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt),
  1099. &resp, sizeof(resp));
  1100. break;
  1101. case BLKIF_PROTOCOL_X86_32:
  1102. memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt),
  1103. &resp, sizeof(resp));
  1104. break;
  1105. case BLKIF_PROTOCOL_X86_64:
  1106. memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt),
  1107. &resp, sizeof(resp));
  1108. break;
  1109. default:
  1110. BUG();
  1111. }
  1112. blk_rings->common.rsp_prod_pvt++;
  1113. RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
  1114. spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
  1115. if (notify)
  1116. notify_remote_via_irq(blkif->irq);
  1117. }
  1118. static int __init xen_blkif_init(void)
  1119. {
  1120. int rc = 0;
  1121. if (!xen_domain())
  1122. return -ENODEV;
  1123. rc = xen_blkif_interface_init();
  1124. if (rc)
  1125. goto failed_init;
  1126. rc = xen_blkif_xenbus_init();
  1127. if (rc)
  1128. goto failed_init;
  1129. failed_init:
  1130. return rc;
  1131. }
  1132. module_init(xen_blkif_init);
  1133. MODULE_LICENSE("Dual BSD/GPL");
  1134. MODULE_ALIAS("xen-backend:vbd");