blkback.c 39 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373
  1. /******************************************************************************
  2. *
  3. * Back-end of the driver for virtual block devices. This portion of the
  4. * driver exports a 'unified' block-device interface that can be accessed
  5. * by any operating system that implements a compatible front end. A
  6. * reference front-end implementation can be found in:
  7. * drivers/block/xen-blkfront.c
  8. *
  9. * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
  10. * Copyright (c) 2005, Christopher Clark
  11. *
  12. * This program is free software; you can redistribute it and/or
  13. * modify it under the terms of the GNU General Public License version 2
  14. * as published by the Free Software Foundation; or, when distributed
  15. * separately from the Linux kernel or incorporated into other
  16. * software packages, subject to the following license:
  17. *
  18. * Permission is hereby granted, free of charge, to any person obtaining a copy
  19. * of this source file (the "Software"), to deal in the Software without
  20. * restriction, including without limitation the rights to use, copy, modify,
  21. * merge, publish, distribute, sublicense, and/or sell copies of the Software,
  22. * and to permit persons to whom the Software is furnished to do so, subject to
  23. * the following conditions:
  24. *
  25. * The above copyright notice and this permission notice shall be included in
  26. * all copies or substantial portions of the Software.
  27. *
  28. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  29. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  30. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  31. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  32. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  33. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  34. * IN THE SOFTWARE.
  35. */
  36. #include <linux/spinlock.h>
  37. #include <linux/kthread.h>
  38. #include <linux/list.h>
  39. #include <linux/delay.h>
  40. #include <linux/freezer.h>
  41. #include <linux/bitmap.h>
  42. #include <xen/events.h>
  43. #include <xen/page.h>
  44. #include <xen/xen.h>
  45. #include <asm/xen/hypervisor.h>
  46. #include <asm/xen/hypercall.h>
  47. #include <xen/balloon.h>
  48. #include "common.h"
  49. /*
  50. * Maximum number of unused free pages to keep in the internal buffer.
  51. * Setting this to a value too low will reduce memory used in each backend,
  52. * but can have a performance penalty.
  53. *
  54. * A sane value is xen_blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST, but can
  55. * be set to a lower value that might degrade performance on some intensive
  56. * IO workloads.
  57. */
  58. static int xen_blkif_max_buffer_pages = 1024;
  59. module_param_named(max_buffer_pages, xen_blkif_max_buffer_pages, int, 0644);
  60. MODULE_PARM_DESC(max_buffer_pages,
  61. "Maximum number of free pages to keep in each block backend buffer");
  62. /*
  63. * Maximum number of grants to map persistently in blkback. For maximum
  64. * performance this should be the total numbers of grants that can be used
  65. * to fill the ring, but since this might become too high, specially with
  66. * the use of indirect descriptors, we set it to a value that provides good
  67. * performance without using too much memory.
  68. *
  69. * When the list of persistent grants is full we clean it up using a LRU
  70. * algorithm.
  71. */
  72. static int xen_blkif_max_pgrants = 1056;
  73. module_param_named(max_persistent_grants, xen_blkif_max_pgrants, int, 0644);
  74. MODULE_PARM_DESC(max_persistent_grants,
  75. "Maximum number of grants to map persistently");
  76. /*
  77. * The LRU mechanism to clean the lists of persistent grants needs to
  78. * be executed periodically. The time interval between consecutive executions
  79. * of the purge mechanism is set in ms.
  80. */
  81. #define LRU_INTERVAL 100
  82. /*
  83. * When the persistent grants list is full we will remove unused grants
  84. * from the list. The percent number of grants to be removed at each LRU
  85. * execution.
  86. */
  87. #define LRU_PERCENT_CLEAN 5
  88. /* Run-time switchable: /sys/module/blkback/parameters/ */
  89. static unsigned int log_stats;
  90. module_param(log_stats, int, 0644);
  91. #define BLKBACK_INVALID_HANDLE (~0)
  92. /* Number of free pages to remove on each call to free_xenballooned_pages */
  93. #define NUM_BATCH_FREE_PAGES 10
  94. static inline int get_free_page(struct xen_blkif *blkif, struct page **page)
  95. {
  96. unsigned long flags;
  97. spin_lock_irqsave(&blkif->free_pages_lock, flags);
  98. if (list_empty(&blkif->free_pages)) {
  99. BUG_ON(blkif->free_pages_num != 0);
  100. spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
  101. return alloc_xenballooned_pages(1, page, false);
  102. }
  103. BUG_ON(blkif->free_pages_num == 0);
  104. page[0] = list_first_entry(&blkif->free_pages, struct page, lru);
  105. list_del(&page[0]->lru);
  106. blkif->free_pages_num--;
  107. spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
  108. return 0;
  109. }
  110. static inline void put_free_pages(struct xen_blkif *blkif, struct page **page,
  111. int num)
  112. {
  113. unsigned long flags;
  114. int i;
  115. spin_lock_irqsave(&blkif->free_pages_lock, flags);
  116. for (i = 0; i < num; i++)
  117. list_add(&page[i]->lru, &blkif->free_pages);
  118. blkif->free_pages_num += num;
  119. spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
  120. }
  121. static inline void shrink_free_pagepool(struct xen_blkif *blkif, int num)
  122. {
  123. /* Remove requested pages in batches of NUM_BATCH_FREE_PAGES */
  124. struct page *page[NUM_BATCH_FREE_PAGES];
  125. unsigned int num_pages = 0;
  126. unsigned long flags;
  127. spin_lock_irqsave(&blkif->free_pages_lock, flags);
  128. while (blkif->free_pages_num > num) {
  129. BUG_ON(list_empty(&blkif->free_pages));
  130. page[num_pages] = list_first_entry(&blkif->free_pages,
  131. struct page, lru);
  132. list_del(&page[num_pages]->lru);
  133. blkif->free_pages_num--;
  134. if (++num_pages == NUM_BATCH_FREE_PAGES) {
  135. spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
  136. free_xenballooned_pages(num_pages, page);
  137. spin_lock_irqsave(&blkif->free_pages_lock, flags);
  138. num_pages = 0;
  139. }
  140. }
  141. spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
  142. if (num_pages != 0)
  143. free_xenballooned_pages(num_pages, page);
  144. }
  145. #define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page)))
  146. static int do_block_io_op(struct xen_blkif *blkif);
  147. static int dispatch_rw_block_io(struct xen_blkif *blkif,
  148. struct blkif_request *req,
  149. struct pending_req *pending_req);
  150. static void make_response(struct xen_blkif *blkif, u64 id,
  151. unsigned short op, int st);
  152. #define foreach_grant_safe(pos, n, rbtree, node) \
  153. for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \
  154. (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL; \
  155. &(pos)->node != NULL; \
  156. (pos) = container_of(n, typeof(*(pos)), node), \
  157. (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL)
  158. /*
  159. * We don't need locking around the persistent grant helpers
  160. * because blkback uses a single-thread for each backed, so we
  161. * can be sure that this functions will never be called recursively.
  162. *
  163. * The only exception to that is put_persistent_grant, that can be called
  164. * from interrupt context (by xen_blkbk_unmap), so we have to use atomic
  165. * bit operations to modify the flags of a persistent grant and to count
  166. * the number of used grants.
  167. */
  168. static int add_persistent_gnt(struct xen_blkif *blkif,
  169. struct persistent_gnt *persistent_gnt)
  170. {
  171. struct rb_node **new = NULL, *parent = NULL;
  172. struct persistent_gnt *this;
  173. if (blkif->persistent_gnt_c >= xen_blkif_max_pgrants) {
  174. if (!blkif->vbd.overflow_max_grants)
  175. blkif->vbd.overflow_max_grants = 1;
  176. return -EBUSY;
  177. }
  178. /* Figure out where to put new node */
  179. new = &blkif->persistent_gnts.rb_node;
  180. while (*new) {
  181. this = container_of(*new, struct persistent_gnt, node);
  182. parent = *new;
  183. if (persistent_gnt->gnt < this->gnt)
  184. new = &((*new)->rb_left);
  185. else if (persistent_gnt->gnt > this->gnt)
  186. new = &((*new)->rb_right);
  187. else {
  188. pr_alert_ratelimited(DRV_PFX " trying to add a gref that's already in the tree\n");
  189. return -EINVAL;
  190. }
  191. }
  192. bitmap_zero(persistent_gnt->flags, PERSISTENT_GNT_FLAGS_SIZE);
  193. set_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags);
  194. /* Add new node and rebalance tree. */
  195. rb_link_node(&(persistent_gnt->node), parent, new);
  196. rb_insert_color(&(persistent_gnt->node), &blkif->persistent_gnts);
  197. blkif->persistent_gnt_c++;
  198. atomic_inc(&blkif->persistent_gnt_in_use);
  199. return 0;
  200. }
  201. static struct persistent_gnt *get_persistent_gnt(struct xen_blkif *blkif,
  202. grant_ref_t gref)
  203. {
  204. struct persistent_gnt *data;
  205. struct rb_node *node = NULL;
  206. node = blkif->persistent_gnts.rb_node;
  207. while (node) {
  208. data = container_of(node, struct persistent_gnt, node);
  209. if (gref < data->gnt)
  210. node = node->rb_left;
  211. else if (gref > data->gnt)
  212. node = node->rb_right;
  213. else {
  214. if(test_bit(PERSISTENT_GNT_ACTIVE, data->flags)) {
  215. pr_alert_ratelimited(DRV_PFX " requesting a grant already in use\n");
  216. return NULL;
  217. }
  218. set_bit(PERSISTENT_GNT_ACTIVE, data->flags);
  219. atomic_inc(&blkif->persistent_gnt_in_use);
  220. return data;
  221. }
  222. }
  223. return NULL;
  224. }
  225. static void put_persistent_gnt(struct xen_blkif *blkif,
  226. struct persistent_gnt *persistent_gnt)
  227. {
  228. if(!test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags))
  229. pr_alert_ratelimited(DRV_PFX " freeing a grant already unused");
  230. set_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags);
  231. clear_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags);
  232. atomic_dec(&blkif->persistent_gnt_in_use);
  233. }
  234. static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
  235. unsigned int num)
  236. {
  237. struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
  238. struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
  239. struct persistent_gnt *persistent_gnt;
  240. struct rb_node *n;
  241. int ret = 0;
  242. int segs_to_unmap = 0;
  243. foreach_grant_safe(persistent_gnt, n, root, node) {
  244. BUG_ON(persistent_gnt->handle ==
  245. BLKBACK_INVALID_HANDLE);
  246. gnttab_set_unmap_op(&unmap[segs_to_unmap],
  247. (unsigned long) pfn_to_kaddr(page_to_pfn(
  248. persistent_gnt->page)),
  249. GNTMAP_host_map,
  250. persistent_gnt->handle);
  251. pages[segs_to_unmap] = persistent_gnt->page;
  252. if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST ||
  253. !rb_next(&persistent_gnt->node)) {
  254. ret = gnttab_unmap_refs(unmap, NULL, pages,
  255. segs_to_unmap);
  256. BUG_ON(ret);
  257. put_free_pages(blkif, pages, segs_to_unmap);
  258. segs_to_unmap = 0;
  259. }
  260. rb_erase(&persistent_gnt->node, root);
  261. kfree(persistent_gnt);
  262. num--;
  263. }
  264. BUG_ON(num != 0);
  265. }
  266. static void unmap_purged_grants(struct work_struct *work)
  267. {
  268. struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
  269. struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
  270. struct persistent_gnt *persistent_gnt;
  271. int ret, segs_to_unmap = 0;
  272. struct xen_blkif *blkif = container_of(work, typeof(*blkif), persistent_purge_work);
  273. while(!list_empty(&blkif->persistent_purge_list)) {
  274. persistent_gnt = list_first_entry(&blkif->persistent_purge_list,
  275. struct persistent_gnt,
  276. remove_node);
  277. list_del(&persistent_gnt->remove_node);
  278. gnttab_set_unmap_op(&unmap[segs_to_unmap],
  279. vaddr(persistent_gnt->page),
  280. GNTMAP_host_map,
  281. persistent_gnt->handle);
  282. pages[segs_to_unmap] = persistent_gnt->page;
  283. if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
  284. ret = gnttab_unmap_refs(unmap, NULL, pages,
  285. segs_to_unmap);
  286. BUG_ON(ret);
  287. put_free_pages(blkif, pages, segs_to_unmap);
  288. segs_to_unmap = 0;
  289. }
  290. kfree(persistent_gnt);
  291. }
  292. if (segs_to_unmap > 0) {
  293. ret = gnttab_unmap_refs(unmap, NULL, pages, segs_to_unmap);
  294. BUG_ON(ret);
  295. put_free_pages(blkif, pages, segs_to_unmap);
  296. }
  297. }
  298. static void purge_persistent_gnt(struct xen_blkif *blkif)
  299. {
  300. struct persistent_gnt *persistent_gnt;
  301. struct rb_node *n;
  302. unsigned int num_clean, total;
  303. bool scan_used = false;
  304. struct rb_root *root;
  305. if (blkif->persistent_gnt_c < xen_blkif_max_pgrants ||
  306. (blkif->persistent_gnt_c == xen_blkif_max_pgrants &&
  307. !blkif->vbd.overflow_max_grants)) {
  308. return;
  309. }
  310. if (work_pending(&blkif->persistent_purge_work)) {
  311. pr_alert_ratelimited(DRV_PFX "Scheduled work from previous purge is still pending, cannot purge list\n");
  312. return;
  313. }
  314. num_clean = (xen_blkif_max_pgrants / 100) * LRU_PERCENT_CLEAN;
  315. num_clean = blkif->persistent_gnt_c - xen_blkif_max_pgrants + num_clean;
  316. num_clean = min(blkif->persistent_gnt_c, num_clean);
  317. if (num_clean >
  318. (blkif->persistent_gnt_c -
  319. atomic_read(&blkif->persistent_gnt_in_use)))
  320. return;
  321. /*
  322. * At this point, we can assure that there will be no calls
  323. * to get_persistent_grant (because we are executing this code from
  324. * xen_blkif_schedule), there can only be calls to put_persistent_gnt,
  325. * which means that the number of currently used grants will go down,
  326. * but never up, so we will always be able to remove the requested
  327. * number of grants.
  328. */
  329. total = num_clean;
  330. pr_debug(DRV_PFX "Going to purge %u persistent grants\n", num_clean);
  331. INIT_LIST_HEAD(&blkif->persistent_purge_list);
  332. root = &blkif->persistent_gnts;
  333. purge_list:
  334. foreach_grant_safe(persistent_gnt, n, root, node) {
  335. BUG_ON(persistent_gnt->handle ==
  336. BLKBACK_INVALID_HANDLE);
  337. if (test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags))
  338. continue;
  339. if (!scan_used &&
  340. (test_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags)))
  341. continue;
  342. rb_erase(&persistent_gnt->node, root);
  343. list_add(&persistent_gnt->remove_node,
  344. &blkif->persistent_purge_list);
  345. if (--num_clean == 0)
  346. goto finished;
  347. }
  348. /*
  349. * If we get here it means we also need to start cleaning
  350. * grants that were used since last purge in order to cope
  351. * with the requested num
  352. */
  353. if (!scan_used) {
  354. pr_debug(DRV_PFX "Still missing %u purged frames\n", num_clean);
  355. scan_used = true;
  356. goto purge_list;
  357. }
  358. finished:
  359. /* Remove the "used" flag from all the persistent grants */
  360. foreach_grant_safe(persistent_gnt, n, root, node) {
  361. BUG_ON(persistent_gnt->handle ==
  362. BLKBACK_INVALID_HANDLE);
  363. clear_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags);
  364. }
  365. blkif->persistent_gnt_c -= (total - num_clean);
  366. blkif->vbd.overflow_max_grants = 0;
  367. /* We can defer this work */
  368. INIT_WORK(&blkif->persistent_purge_work, unmap_purged_grants);
  369. schedule_work(&blkif->persistent_purge_work);
  370. pr_debug(DRV_PFX "Purged %u/%u\n", (total - num_clean), total);
  371. return;
  372. }
  373. /*
  374. * Retrieve from the 'pending_reqs' a free pending_req structure to be used.
  375. */
  376. static struct pending_req *alloc_req(struct xen_blkif *blkif)
  377. {
  378. struct pending_req *req = NULL;
  379. unsigned long flags;
  380. spin_lock_irqsave(&blkif->pending_free_lock, flags);
  381. if (!list_empty(&blkif->pending_free)) {
  382. req = list_entry(blkif->pending_free.next, struct pending_req,
  383. free_list);
  384. list_del(&req->free_list);
  385. }
  386. spin_unlock_irqrestore(&blkif->pending_free_lock, flags);
  387. return req;
  388. }
  389. /*
  390. * Return the 'pending_req' structure back to the freepool. We also
  391. * wake up the thread if it was waiting for a free page.
  392. */
  393. static void free_req(struct xen_blkif *blkif, struct pending_req *req)
  394. {
  395. unsigned long flags;
  396. int was_empty;
  397. spin_lock_irqsave(&blkif->pending_free_lock, flags);
  398. was_empty = list_empty(&blkif->pending_free);
  399. list_add(&req->free_list, &blkif->pending_free);
  400. spin_unlock_irqrestore(&blkif->pending_free_lock, flags);
  401. if (was_empty)
  402. wake_up(&blkif->pending_free_wq);
  403. }
  404. /*
  405. * Routines for managing virtual block devices (vbds).
  406. */
  407. static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif,
  408. int operation)
  409. {
  410. struct xen_vbd *vbd = &blkif->vbd;
  411. int rc = -EACCES;
  412. if ((operation != READ) && vbd->readonly)
  413. goto out;
  414. if (likely(req->nr_sects)) {
  415. blkif_sector_t end = req->sector_number + req->nr_sects;
  416. if (unlikely(end < req->sector_number))
  417. goto out;
  418. if (unlikely(end > vbd_sz(vbd)))
  419. goto out;
  420. }
  421. req->dev = vbd->pdevice;
  422. req->bdev = vbd->bdev;
  423. rc = 0;
  424. out:
  425. return rc;
  426. }
  427. static void xen_vbd_resize(struct xen_blkif *blkif)
  428. {
  429. struct xen_vbd *vbd = &blkif->vbd;
  430. struct xenbus_transaction xbt;
  431. int err;
  432. struct xenbus_device *dev = xen_blkbk_xenbus(blkif->be);
  433. unsigned long long new_size = vbd_sz(vbd);
  434. pr_info(DRV_PFX "VBD Resize: Domid: %d, Device: (%d, %d)\n",
  435. blkif->domid, MAJOR(vbd->pdevice), MINOR(vbd->pdevice));
  436. pr_info(DRV_PFX "VBD Resize: new size %llu\n", new_size);
  437. vbd->size = new_size;
  438. again:
  439. err = xenbus_transaction_start(&xbt);
  440. if (err) {
  441. pr_warn(DRV_PFX "Error starting transaction");
  442. return;
  443. }
  444. err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
  445. (unsigned long long)vbd_sz(vbd));
  446. if (err) {
  447. pr_warn(DRV_PFX "Error writing new size");
  448. goto abort;
  449. }
  450. /*
  451. * Write the current state; we will use this to synchronize
  452. * the front-end. If the current state is "connected" the
  453. * front-end will get the new size information online.
  454. */
  455. err = xenbus_printf(xbt, dev->nodename, "state", "%d", dev->state);
  456. if (err) {
  457. pr_warn(DRV_PFX "Error writing the state");
  458. goto abort;
  459. }
  460. err = xenbus_transaction_end(xbt, 0);
  461. if (err == -EAGAIN)
  462. goto again;
  463. if (err)
  464. pr_warn(DRV_PFX "Error ending transaction");
  465. return;
  466. abort:
  467. xenbus_transaction_end(xbt, 1);
  468. }
  469. /*
  470. * Notification from the guest OS.
  471. */
  472. static void blkif_notify_work(struct xen_blkif *blkif)
  473. {
  474. blkif->waiting_reqs = 1;
  475. wake_up(&blkif->wq);
  476. }
  477. irqreturn_t xen_blkif_be_int(int irq, void *dev_id)
  478. {
  479. blkif_notify_work(dev_id);
  480. return IRQ_HANDLED;
  481. }
  482. /*
  483. * SCHEDULER FUNCTIONS
  484. */
  485. static void print_stats(struct xen_blkif *blkif)
  486. {
  487. pr_info("xen-blkback (%s): oo %3llu | rd %4llu | wr %4llu | f %4llu"
  488. " | ds %4llu | pg: %4u/%4d\n",
  489. current->comm, blkif->st_oo_req,
  490. blkif->st_rd_req, blkif->st_wr_req,
  491. blkif->st_f_req, blkif->st_ds_req,
  492. blkif->persistent_gnt_c,
  493. xen_blkif_max_pgrants);
  494. blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000);
  495. blkif->st_rd_req = 0;
  496. blkif->st_wr_req = 0;
  497. blkif->st_oo_req = 0;
  498. blkif->st_ds_req = 0;
  499. }
  500. int xen_blkif_schedule(void *arg)
  501. {
  502. struct xen_blkif *blkif = arg;
  503. struct xen_vbd *vbd = &blkif->vbd;
  504. unsigned long timeout;
  505. int ret;
  506. xen_blkif_get(blkif);
  507. while (!kthread_should_stop()) {
  508. if (try_to_freeze())
  509. continue;
  510. if (unlikely(vbd->size != vbd_sz(vbd)))
  511. xen_vbd_resize(blkif);
  512. timeout = msecs_to_jiffies(LRU_INTERVAL);
  513. timeout = wait_event_interruptible_timeout(
  514. blkif->wq,
  515. blkif->waiting_reqs || kthread_should_stop(),
  516. timeout);
  517. if (timeout == 0)
  518. goto purge_gnt_list;
  519. timeout = wait_event_interruptible_timeout(
  520. blkif->pending_free_wq,
  521. !list_empty(&blkif->pending_free) ||
  522. kthread_should_stop(),
  523. timeout);
  524. if (timeout == 0)
  525. goto purge_gnt_list;
  526. blkif->waiting_reqs = 0;
  527. smp_mb(); /* clear flag *before* checking for work */
  528. ret = do_block_io_op(blkif);
  529. if (ret > 0)
  530. blkif->waiting_reqs = 1;
  531. if (ret == -EACCES)
  532. wait_event_interruptible(blkif->shutdown_wq,
  533. kthread_should_stop());
  534. purge_gnt_list:
  535. if (blkif->vbd.feature_gnt_persistent &&
  536. time_after(jiffies, blkif->next_lru)) {
  537. purge_persistent_gnt(blkif);
  538. blkif->next_lru = jiffies + msecs_to_jiffies(LRU_INTERVAL);
  539. }
  540. /* Shrink if we have more than xen_blkif_max_buffer_pages */
  541. shrink_free_pagepool(blkif, xen_blkif_max_buffer_pages);
  542. if (log_stats && time_after(jiffies, blkif->st_print))
  543. print_stats(blkif);
  544. }
  545. /* Since we are shutting down remove all pages from the buffer */
  546. shrink_free_pagepool(blkif, 0 /* All */);
  547. /* Free all persistent grant pages */
  548. if (!RB_EMPTY_ROOT(&blkif->persistent_gnts))
  549. free_persistent_gnts(blkif, &blkif->persistent_gnts,
  550. blkif->persistent_gnt_c);
  551. BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts));
  552. blkif->persistent_gnt_c = 0;
  553. if (log_stats)
  554. print_stats(blkif);
  555. blkif->xenblkd = NULL;
  556. xen_blkif_put(blkif);
  557. return 0;
  558. }
  559. /*
  560. * Unmap the grant references, and also remove the M2P over-rides
  561. * used in the 'pending_req'.
  562. */
  563. static void xen_blkbk_unmap(struct xen_blkif *blkif,
  564. struct grant_page *pages[],
  565. int num)
  566. {
  567. struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
  568. struct page *unmap_pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
  569. unsigned int i, invcount = 0;
  570. int ret;
  571. for (i = 0; i < num; i++) {
  572. if (pages[i]->persistent_gnt != NULL) {
  573. put_persistent_gnt(blkif, pages[i]->persistent_gnt);
  574. continue;
  575. }
  576. if (pages[i]->handle == BLKBACK_INVALID_HANDLE)
  577. continue;
  578. unmap_pages[invcount] = pages[i]->page;
  579. gnttab_set_unmap_op(&unmap[invcount], vaddr(pages[i]->page),
  580. GNTMAP_host_map, pages[i]->handle);
  581. pages[i]->handle = BLKBACK_INVALID_HANDLE;
  582. if (++invcount == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
  583. ret = gnttab_unmap_refs(unmap, NULL, unmap_pages,
  584. invcount);
  585. BUG_ON(ret);
  586. put_free_pages(blkif, unmap_pages, invcount);
  587. invcount = 0;
  588. }
  589. }
  590. if (invcount) {
  591. ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount);
  592. BUG_ON(ret);
  593. put_free_pages(blkif, unmap_pages, invcount);
  594. }
  595. }
  596. static int xen_blkbk_map(struct xen_blkif *blkif,
  597. struct grant_page *pages[],
  598. int num, bool ro)
  599. {
  600. struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
  601. struct page *pages_to_gnt[BLKIF_MAX_SEGMENTS_PER_REQUEST];
  602. struct persistent_gnt *persistent_gnt = NULL;
  603. phys_addr_t addr = 0;
  604. int i, seg_idx, new_map_idx;
  605. int segs_to_map = 0;
  606. int ret = 0;
  607. int last_map = 0, map_until = 0;
  608. int use_persistent_gnts;
  609. use_persistent_gnts = (blkif->vbd.feature_gnt_persistent);
  610. /*
  611. * Fill out preq.nr_sects with proper amount of sectors, and setup
  612. * assign map[..] with the PFN of the page in our domain with the
  613. * corresponding grant reference for each page.
  614. */
  615. again:
  616. for (i = map_until; i < num; i++) {
  617. uint32_t flags;
  618. if (use_persistent_gnts)
  619. persistent_gnt = get_persistent_gnt(
  620. blkif,
  621. pages[i]->gref);
  622. if (persistent_gnt) {
  623. /*
  624. * We are using persistent grants and
  625. * the grant is already mapped
  626. */
  627. pages[i]->page = persistent_gnt->page;
  628. pages[i]->persistent_gnt = persistent_gnt;
  629. } else {
  630. if (get_free_page(blkif, &pages[i]->page))
  631. goto out_of_memory;
  632. addr = vaddr(pages[i]->page);
  633. pages_to_gnt[segs_to_map] = pages[i]->page;
  634. pages[i]->persistent_gnt = NULL;
  635. flags = GNTMAP_host_map;
  636. if (!use_persistent_gnts && ro)
  637. flags |= GNTMAP_readonly;
  638. gnttab_set_map_op(&map[segs_to_map++], addr,
  639. flags, pages[i]->gref,
  640. blkif->domid);
  641. }
  642. map_until = i + 1;
  643. if (segs_to_map == BLKIF_MAX_SEGMENTS_PER_REQUEST)
  644. break;
  645. }
  646. if (segs_to_map) {
  647. ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map);
  648. BUG_ON(ret);
  649. }
  650. /*
  651. * Now swizzle the MFN in our domain with the MFN from the other domain
  652. * so that when we access vaddr(pending_req,i) it has the contents of
  653. * the page from the other domain.
  654. */
  655. for (seg_idx = last_map, new_map_idx = 0; seg_idx < map_until; seg_idx++) {
  656. if (!pages[seg_idx]->persistent_gnt) {
  657. /* This is a newly mapped grant */
  658. BUG_ON(new_map_idx >= segs_to_map);
  659. if (unlikely(map[new_map_idx].status != 0)) {
  660. pr_debug(DRV_PFX "invalid buffer -- could not remap it\n");
  661. pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE;
  662. ret |= 1;
  663. goto next;
  664. }
  665. pages[seg_idx]->handle = map[new_map_idx].handle;
  666. } else {
  667. continue;
  668. }
  669. if (use_persistent_gnts &&
  670. blkif->persistent_gnt_c < xen_blkif_max_pgrants) {
  671. /*
  672. * We are using persistent grants, the grant is
  673. * not mapped but we might have room for it.
  674. */
  675. persistent_gnt = kmalloc(sizeof(struct persistent_gnt),
  676. GFP_KERNEL);
  677. if (!persistent_gnt) {
  678. /*
  679. * If we don't have enough memory to
  680. * allocate the persistent_gnt struct
  681. * map this grant non-persistenly
  682. */
  683. goto next;
  684. }
  685. persistent_gnt->gnt = map[new_map_idx].ref;
  686. persistent_gnt->handle = map[new_map_idx].handle;
  687. persistent_gnt->page = pages[seg_idx]->page;
  688. if (add_persistent_gnt(blkif,
  689. persistent_gnt)) {
  690. kfree(persistent_gnt);
  691. persistent_gnt = NULL;
  692. goto next;
  693. }
  694. pages[seg_idx]->persistent_gnt = persistent_gnt;
  695. pr_debug(DRV_PFX " grant %u added to the tree of persistent grants, using %u/%u\n",
  696. persistent_gnt->gnt, blkif->persistent_gnt_c,
  697. xen_blkif_max_pgrants);
  698. goto next;
  699. }
  700. if (use_persistent_gnts && !blkif->vbd.overflow_max_grants) {
  701. blkif->vbd.overflow_max_grants = 1;
  702. pr_debug(DRV_PFX " domain %u, device %#x is using maximum number of persistent grants\n",
  703. blkif->domid, blkif->vbd.handle);
  704. }
  705. /*
  706. * We could not map this grant persistently, so use it as
  707. * a non-persistent grant.
  708. */
  709. next:
  710. new_map_idx++;
  711. }
  712. segs_to_map = 0;
  713. last_map = map_until;
  714. if (map_until != num)
  715. goto again;
  716. return ret;
  717. out_of_memory:
  718. pr_alert(DRV_PFX "%s: out of memory\n", __func__);
  719. put_free_pages(blkif, pages_to_gnt, segs_to_map);
  720. return -ENOMEM;
  721. }
  722. static int xen_blkbk_map_seg(struct pending_req *pending_req)
  723. {
  724. int rc;
  725. rc = xen_blkbk_map(pending_req->blkif, pending_req->segments,
  726. pending_req->nr_pages,
  727. (pending_req->operation != BLKIF_OP_READ));
  728. return rc;
  729. }
  730. static int xen_blkbk_parse_indirect(struct blkif_request *req,
  731. struct pending_req *pending_req,
  732. struct seg_buf seg[],
  733. struct phys_req *preq)
  734. {
  735. struct grant_page **pages = pending_req->indirect_pages;
  736. struct xen_blkif *blkif = pending_req->blkif;
  737. int indirect_grefs, rc, n, nseg, i;
  738. struct blkif_request_segment_aligned *segments = NULL;
  739. nseg = pending_req->nr_pages;
  740. indirect_grefs = INDIRECT_PAGES(nseg);
  741. BUG_ON(indirect_grefs > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST);
  742. for (i = 0; i < indirect_grefs; i++)
  743. pages[i]->gref = req->u.indirect.indirect_grefs[i];
  744. rc = xen_blkbk_map(blkif, pages, indirect_grefs, true);
  745. if (rc)
  746. goto unmap;
  747. for (n = 0, i = 0; n < nseg; n++) {
  748. if ((n % SEGS_PER_INDIRECT_FRAME) == 0) {
  749. /* Map indirect segments */
  750. if (segments)
  751. kunmap_atomic(segments);
  752. segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]->page);
  753. }
  754. i = n % SEGS_PER_INDIRECT_FRAME;
  755. pending_req->segments[n]->gref = segments[i].gref;
  756. seg[n].nsec = segments[i].last_sect -
  757. segments[i].first_sect + 1;
  758. seg[n].offset = (segments[i].first_sect << 9);
  759. if ((segments[i].last_sect >= (PAGE_SIZE >> 9)) ||
  760. (segments[i].last_sect < segments[i].first_sect)) {
  761. rc = -EINVAL;
  762. goto unmap;
  763. }
  764. preq->nr_sects += seg[n].nsec;
  765. }
  766. unmap:
  767. if (segments)
  768. kunmap_atomic(segments);
  769. xen_blkbk_unmap(blkif, pages, indirect_grefs);
  770. return rc;
  771. }
  772. static int dispatch_discard_io(struct xen_blkif *blkif,
  773. struct blkif_request *req)
  774. {
  775. int err = 0;
  776. int status = BLKIF_RSP_OKAY;
  777. struct block_device *bdev = blkif->vbd.bdev;
  778. unsigned long secure;
  779. struct phys_req preq;
  780. preq.sector_number = req->u.discard.sector_number;
  781. preq.nr_sects = req->u.discard.nr_sectors;
  782. err = xen_vbd_translate(&preq, blkif, WRITE);
  783. if (err) {
  784. pr_warn(DRV_PFX "access denied: DISCARD [%llu->%llu] on dev=%04x\n",
  785. preq.sector_number,
  786. preq.sector_number + preq.nr_sects, blkif->vbd.pdevice);
  787. goto fail_response;
  788. }
  789. blkif->st_ds_req++;
  790. xen_blkif_get(blkif);
  791. secure = (blkif->vbd.discard_secure &&
  792. (req->u.discard.flag & BLKIF_DISCARD_SECURE)) ?
  793. BLKDEV_DISCARD_SECURE : 0;
  794. err = blkdev_issue_discard(bdev, req->u.discard.sector_number,
  795. req->u.discard.nr_sectors,
  796. GFP_KERNEL, secure);
  797. fail_response:
  798. if (err == -EOPNOTSUPP) {
  799. pr_debug(DRV_PFX "discard op failed, not supported\n");
  800. status = BLKIF_RSP_EOPNOTSUPP;
  801. } else if (err)
  802. status = BLKIF_RSP_ERROR;
  803. make_response(blkif, req->u.discard.id, req->operation, status);
  804. xen_blkif_put(blkif);
  805. return err;
  806. }
  807. static int dispatch_other_io(struct xen_blkif *blkif,
  808. struct blkif_request *req,
  809. struct pending_req *pending_req)
  810. {
  811. free_req(blkif, pending_req);
  812. make_response(blkif, req->u.other.id, req->operation,
  813. BLKIF_RSP_EOPNOTSUPP);
  814. return -EIO;
  815. }
  816. static void xen_blk_drain_io(struct xen_blkif *blkif)
  817. {
  818. atomic_set(&blkif->drain, 1);
  819. do {
  820. /* The initial value is one, and one refcnt taken at the
  821. * start of the xen_blkif_schedule thread. */
  822. if (atomic_read(&blkif->refcnt) <= 2)
  823. break;
  824. wait_for_completion_interruptible_timeout(
  825. &blkif->drain_complete, HZ);
  826. if (!atomic_read(&blkif->drain))
  827. break;
  828. } while (!kthread_should_stop());
  829. atomic_set(&blkif->drain, 0);
  830. }
  831. /*
  832. * Completion callback on the bio's. Called as bh->b_end_io()
  833. */
  834. static void __end_block_io_op(struct pending_req *pending_req, int error)
  835. {
  836. /* An error fails the entire request. */
  837. if ((pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE) &&
  838. (error == -EOPNOTSUPP)) {
  839. pr_debug(DRV_PFX "flush diskcache op failed, not supported\n");
  840. xen_blkbk_flush_diskcache(XBT_NIL, pending_req->blkif->be, 0);
  841. pending_req->status = BLKIF_RSP_EOPNOTSUPP;
  842. } else if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) &&
  843. (error == -EOPNOTSUPP)) {
  844. pr_debug(DRV_PFX "write barrier op failed, not supported\n");
  845. xen_blkbk_barrier(XBT_NIL, pending_req->blkif->be, 0);
  846. pending_req->status = BLKIF_RSP_EOPNOTSUPP;
  847. } else if (error) {
  848. pr_debug(DRV_PFX "Buffer not up-to-date at end of operation,"
  849. " error=%d\n", error);
  850. pending_req->status = BLKIF_RSP_ERROR;
  851. }
  852. /*
  853. * If all of the bio's have completed it is time to unmap
  854. * the grant references associated with 'request' and provide
  855. * the proper response on the ring.
  856. */
  857. if (atomic_dec_and_test(&pending_req->pendcnt)) {
  858. xen_blkbk_unmap(pending_req->blkif,
  859. pending_req->segments,
  860. pending_req->nr_pages);
  861. make_response(pending_req->blkif, pending_req->id,
  862. pending_req->operation, pending_req->status);
  863. xen_blkif_put(pending_req->blkif);
  864. if (atomic_read(&pending_req->blkif->refcnt) <= 2) {
  865. if (atomic_read(&pending_req->blkif->drain))
  866. complete(&pending_req->blkif->drain_complete);
  867. }
  868. free_req(pending_req->blkif, pending_req);
  869. }
  870. }
  871. /*
  872. * bio callback.
  873. */
  874. static void end_block_io_op(struct bio *bio, int error)
  875. {
  876. __end_block_io_op(bio->bi_private, error);
  877. bio_put(bio);
  878. }
  879. /*
  880. * Function to copy the from the ring buffer the 'struct blkif_request'
  881. * (which has the sectors we want, number of them, grant references, etc),
  882. * and transmute it to the block API to hand it over to the proper block disk.
  883. */
  884. static int
  885. __do_block_io_op(struct xen_blkif *blkif)
  886. {
  887. union blkif_back_rings *blk_rings = &blkif->blk_rings;
  888. struct blkif_request req;
  889. struct pending_req *pending_req;
  890. RING_IDX rc, rp;
  891. int more_to_do = 0;
  892. rc = blk_rings->common.req_cons;
  893. rp = blk_rings->common.sring->req_prod;
  894. rmb(); /* Ensure we see queued requests up to 'rp'. */
  895. if (RING_REQUEST_PROD_OVERFLOW(&blk_rings->common, rp)) {
  896. rc = blk_rings->common.rsp_prod_pvt;
  897. pr_warn(DRV_PFX "Frontend provided bogus ring requests (%d - %d = %d). Halting ring processing on dev=%04x\n",
  898. rp, rc, rp - rc, blkif->vbd.pdevice);
  899. return -EACCES;
  900. }
  901. while (rc != rp) {
  902. if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
  903. break;
  904. if (kthread_should_stop()) {
  905. more_to_do = 1;
  906. break;
  907. }
  908. pending_req = alloc_req(blkif);
  909. if (NULL == pending_req) {
  910. blkif->st_oo_req++;
  911. more_to_do = 1;
  912. break;
  913. }
  914. switch (blkif->blk_protocol) {
  915. case BLKIF_PROTOCOL_NATIVE:
  916. memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req));
  917. break;
  918. case BLKIF_PROTOCOL_X86_32:
  919. blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
  920. break;
  921. case BLKIF_PROTOCOL_X86_64:
  922. blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
  923. break;
  924. default:
  925. BUG();
  926. }
  927. blk_rings->common.req_cons = ++rc; /* before make_response() */
  928. /* Apply all sanity checks to /private copy/ of request. */
  929. barrier();
  930. switch (req.operation) {
  931. case BLKIF_OP_READ:
  932. case BLKIF_OP_WRITE:
  933. case BLKIF_OP_WRITE_BARRIER:
  934. case BLKIF_OP_FLUSH_DISKCACHE:
  935. case BLKIF_OP_INDIRECT:
  936. if (dispatch_rw_block_io(blkif, &req, pending_req))
  937. goto done;
  938. break;
  939. case BLKIF_OP_DISCARD:
  940. free_req(blkif, pending_req);
  941. if (dispatch_discard_io(blkif, &req))
  942. goto done;
  943. break;
  944. default:
  945. if (dispatch_other_io(blkif, &req, pending_req))
  946. goto done;
  947. break;
  948. }
  949. /* Yield point for this unbounded loop. */
  950. cond_resched();
  951. }
  952. done:
  953. return more_to_do;
  954. }
  955. static int
  956. do_block_io_op(struct xen_blkif *blkif)
  957. {
  958. union blkif_back_rings *blk_rings = &blkif->blk_rings;
  959. int more_to_do;
  960. do {
  961. more_to_do = __do_block_io_op(blkif);
  962. if (more_to_do)
  963. break;
  964. RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
  965. } while (more_to_do);
  966. return more_to_do;
  967. }
  968. /*
  969. * Transmutation of the 'struct blkif_request' to a proper 'struct bio'
  970. * and call the 'submit_bio' to pass it to the underlying storage.
  971. */
  972. static int dispatch_rw_block_io(struct xen_blkif *blkif,
  973. struct blkif_request *req,
  974. struct pending_req *pending_req)
  975. {
  976. struct phys_req preq;
  977. struct seg_buf *seg = pending_req->seg;
  978. unsigned int nseg;
  979. struct bio *bio = NULL;
  980. struct bio **biolist = pending_req->biolist;
  981. int i, nbio = 0;
  982. int operation;
  983. struct blk_plug plug;
  984. bool drain = false;
  985. struct grant_page **pages = pending_req->segments;
  986. unsigned short req_operation;
  987. req_operation = req->operation == BLKIF_OP_INDIRECT ?
  988. req->u.indirect.indirect_op : req->operation;
  989. if ((req->operation == BLKIF_OP_INDIRECT) &&
  990. (req_operation != BLKIF_OP_READ) &&
  991. (req_operation != BLKIF_OP_WRITE)) {
  992. pr_debug(DRV_PFX "Invalid indirect operation (%u)\n",
  993. req_operation);
  994. goto fail_response;
  995. }
  996. switch (req_operation) {
  997. case BLKIF_OP_READ:
  998. blkif->st_rd_req++;
  999. operation = READ;
  1000. break;
  1001. case BLKIF_OP_WRITE:
  1002. blkif->st_wr_req++;
  1003. operation = WRITE_ODIRECT;
  1004. break;
  1005. case BLKIF_OP_WRITE_BARRIER:
  1006. drain = true;
  1007. case BLKIF_OP_FLUSH_DISKCACHE:
  1008. blkif->st_f_req++;
  1009. operation = WRITE_FLUSH;
  1010. break;
  1011. default:
  1012. operation = 0; /* make gcc happy */
  1013. goto fail_response;
  1014. break;
  1015. }
  1016. /* Check that the number of segments is sane. */
  1017. nseg = req->operation == BLKIF_OP_INDIRECT ?
  1018. req->u.indirect.nr_segments : req->u.rw.nr_segments;
  1019. if (unlikely(nseg == 0 && operation != WRITE_FLUSH) ||
  1020. unlikely((req->operation != BLKIF_OP_INDIRECT) &&
  1021. (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) ||
  1022. unlikely((req->operation == BLKIF_OP_INDIRECT) &&
  1023. (nseg > MAX_INDIRECT_SEGMENTS))) {
  1024. pr_debug(DRV_PFX "Bad number of segments in request (%d)\n",
  1025. nseg);
  1026. /* Haven't submitted any bio's yet. */
  1027. goto fail_response;
  1028. }
  1029. preq.nr_sects = 0;
  1030. pending_req->blkif = blkif;
  1031. pending_req->id = req->u.rw.id;
  1032. pending_req->operation = req_operation;
  1033. pending_req->status = BLKIF_RSP_OKAY;
  1034. pending_req->nr_pages = nseg;
  1035. if (req->operation != BLKIF_OP_INDIRECT) {
  1036. preq.dev = req->u.rw.handle;
  1037. preq.sector_number = req->u.rw.sector_number;
  1038. for (i = 0; i < nseg; i++) {
  1039. pages[i]->gref = req->u.rw.seg[i].gref;
  1040. seg[i].nsec = req->u.rw.seg[i].last_sect -
  1041. req->u.rw.seg[i].first_sect + 1;
  1042. seg[i].offset = (req->u.rw.seg[i].first_sect << 9);
  1043. if ((req->u.rw.seg[i].last_sect >= (PAGE_SIZE >> 9)) ||
  1044. (req->u.rw.seg[i].last_sect <
  1045. req->u.rw.seg[i].first_sect))
  1046. goto fail_response;
  1047. preq.nr_sects += seg[i].nsec;
  1048. }
  1049. } else {
  1050. preq.dev = req->u.indirect.handle;
  1051. preq.sector_number = req->u.indirect.sector_number;
  1052. if (xen_blkbk_parse_indirect(req, pending_req, seg, &preq))
  1053. goto fail_response;
  1054. }
  1055. if (xen_vbd_translate(&preq, blkif, operation) != 0) {
  1056. pr_debug(DRV_PFX "access denied: %s of [%llu,%llu] on dev=%04x\n",
  1057. operation == READ ? "read" : "write",
  1058. preq.sector_number,
  1059. preq.sector_number + preq.nr_sects,
  1060. blkif->vbd.pdevice);
  1061. goto fail_response;
  1062. }
  1063. /*
  1064. * This check _MUST_ be done after xen_vbd_translate as the preq.bdev
  1065. * is set there.
  1066. */
  1067. for (i = 0; i < nseg; i++) {
  1068. if (((int)preq.sector_number|(int)seg[i].nsec) &
  1069. ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) {
  1070. pr_debug(DRV_PFX "Misaligned I/O request from domain %d",
  1071. blkif->domid);
  1072. goto fail_response;
  1073. }
  1074. }
  1075. /* Wait on all outstanding I/O's and once that has been completed
  1076. * issue the WRITE_FLUSH.
  1077. */
  1078. if (drain)
  1079. xen_blk_drain_io(pending_req->blkif);
  1080. /*
  1081. * If we have failed at this point, we need to undo the M2P override,
  1082. * set gnttab_set_unmap_op on all of the grant references and perform
  1083. * the hypercall to unmap the grants - that is all done in
  1084. * xen_blkbk_unmap.
  1085. */
  1086. if (xen_blkbk_map_seg(pending_req))
  1087. goto fail_flush;
  1088. /*
  1089. * This corresponding xen_blkif_put is done in __end_block_io_op, or
  1090. * below (in "!bio") if we are handling a BLKIF_OP_DISCARD.
  1091. */
  1092. xen_blkif_get(blkif);
  1093. for (i = 0; i < nseg; i++) {
  1094. while ((bio == NULL) ||
  1095. (bio_add_page(bio,
  1096. pages[i]->page,
  1097. seg[i].nsec << 9,
  1098. seg[i].offset) == 0)) {
  1099. bio = bio_alloc(GFP_KERNEL, nseg-i);
  1100. if (unlikely(bio == NULL))
  1101. goto fail_put_bio;
  1102. biolist[nbio++] = bio;
  1103. bio->bi_bdev = preq.bdev;
  1104. bio->bi_private = pending_req;
  1105. bio->bi_end_io = end_block_io_op;
  1106. bio->bi_sector = preq.sector_number;
  1107. }
  1108. preq.sector_number += seg[i].nsec;
  1109. }
  1110. /* This will be hit if the operation was a flush or discard. */
  1111. if (!bio) {
  1112. BUG_ON(operation != WRITE_FLUSH);
  1113. bio = bio_alloc(GFP_KERNEL, 0);
  1114. if (unlikely(bio == NULL))
  1115. goto fail_put_bio;
  1116. biolist[nbio++] = bio;
  1117. bio->bi_bdev = preq.bdev;
  1118. bio->bi_private = pending_req;
  1119. bio->bi_end_io = end_block_io_op;
  1120. }
  1121. atomic_set(&pending_req->pendcnt, nbio);
  1122. blk_start_plug(&plug);
  1123. for (i = 0; i < nbio; i++)
  1124. submit_bio(operation, biolist[i]);
  1125. /* Let the I/Os go.. */
  1126. blk_finish_plug(&plug);
  1127. if (operation == READ)
  1128. blkif->st_rd_sect += preq.nr_sects;
  1129. else if (operation & WRITE)
  1130. blkif->st_wr_sect += preq.nr_sects;
  1131. return 0;
  1132. fail_flush:
  1133. xen_blkbk_unmap(blkif, pending_req->segments,
  1134. pending_req->nr_pages);
  1135. fail_response:
  1136. /* Haven't submitted any bio's yet. */
  1137. make_response(blkif, req->u.rw.id, req_operation, BLKIF_RSP_ERROR);
  1138. free_req(blkif, pending_req);
  1139. msleep(1); /* back off a bit */
  1140. return -EIO;
  1141. fail_put_bio:
  1142. for (i = 0; i < nbio; i++)
  1143. bio_put(biolist[i]);
  1144. atomic_set(&pending_req->pendcnt, 1);
  1145. __end_block_io_op(pending_req, -EINVAL);
  1146. msleep(1); /* back off a bit */
  1147. return -EIO;
  1148. }
  1149. /*
  1150. * Put a response on the ring on how the operation fared.
  1151. */
  1152. static void make_response(struct xen_blkif *blkif, u64 id,
  1153. unsigned short op, int st)
  1154. {
  1155. struct blkif_response resp;
  1156. unsigned long flags;
  1157. union blkif_back_rings *blk_rings = &blkif->blk_rings;
  1158. int notify;
  1159. resp.id = id;
  1160. resp.operation = op;
  1161. resp.status = st;
  1162. spin_lock_irqsave(&blkif->blk_ring_lock, flags);
  1163. /* Place on the response ring for the relevant domain. */
  1164. switch (blkif->blk_protocol) {
  1165. case BLKIF_PROTOCOL_NATIVE:
  1166. memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt),
  1167. &resp, sizeof(resp));
  1168. break;
  1169. case BLKIF_PROTOCOL_X86_32:
  1170. memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt),
  1171. &resp, sizeof(resp));
  1172. break;
  1173. case BLKIF_PROTOCOL_X86_64:
  1174. memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt),
  1175. &resp, sizeof(resp));
  1176. break;
  1177. default:
  1178. BUG();
  1179. }
  1180. blk_rings->common.rsp_prod_pvt++;
  1181. RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
  1182. spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
  1183. if (notify)
  1184. notify_remote_via_irq(blkif->irq);
  1185. }
  1186. static int __init xen_blkif_init(void)
  1187. {
  1188. int rc = 0;
  1189. if (!xen_domain())
  1190. return -ENODEV;
  1191. rc = xen_blkif_interface_init();
  1192. if (rc)
  1193. goto failed_init;
  1194. rc = xen_blkif_xenbus_init();
  1195. if (rc)
  1196. goto failed_init;
  1197. failed_init:
  1198. return rc;
  1199. }
  1200. module_init(xen_blkif_init);
  1201. MODULE_LICENSE("Dual BSD/GPL");
  1202. MODULE_ALIAS("xen-backend:vbd");