blkback.c 39 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379
  1. /******************************************************************************
  2. *
  3. * Back-end of the driver for virtual block devices. This portion of the
  4. * driver exports a 'unified' block-device interface that can be accessed
  5. * by any operating system that implements a compatible front end. A
  6. * reference front-end implementation can be found in:
  7. * drivers/block/xen-blkfront.c
  8. *
  9. * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
  10. * Copyright (c) 2005, Christopher Clark
  11. *
  12. * This program is free software; you can redistribute it and/or
  13. * modify it under the terms of the GNU General Public License version 2
  14. * as published by the Free Software Foundation; or, when distributed
  15. * separately from the Linux kernel or incorporated into other
  16. * software packages, subject to the following license:
  17. *
  18. * Permission is hereby granted, free of charge, to any person obtaining a copy
  19. * of this source file (the "Software"), to deal in the Software without
  20. * restriction, including without limitation the rights to use, copy, modify,
  21. * merge, publish, distribute, sublicense, and/or sell copies of the Software,
  22. * and to permit persons to whom the Software is furnished to do so, subject to
  23. * the following conditions:
  24. *
  25. * The above copyright notice and this permission notice shall be included in
  26. * all copies or substantial portions of the Software.
  27. *
  28. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  29. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  30. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  31. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  32. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  33. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  34. * IN THE SOFTWARE.
  35. */
  36. #include <linux/spinlock.h>
  37. #include <linux/kthread.h>
  38. #include <linux/list.h>
  39. #include <linux/delay.h>
  40. #include <linux/freezer.h>
  41. #include <linux/bitmap.h>
  42. #include <xen/events.h>
  43. #include <xen/page.h>
  44. #include <xen/xen.h>
  45. #include <asm/xen/hypervisor.h>
  46. #include <asm/xen/hypercall.h>
  47. #include <xen/balloon.h>
  48. #include "common.h"
  49. /*
  50. * Maximum number of unused free pages to keep in the internal buffer.
  51. * Setting this to a value too low will reduce memory used in each backend,
  52. * but can have a performance penalty.
  53. *
  54. * A sane value is xen_blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST, but can
  55. * be set to a lower value that might degrade performance on some intensive
  56. * IO workloads.
  57. */
  58. static int xen_blkif_max_buffer_pages = 1024;
  59. module_param_named(max_buffer_pages, xen_blkif_max_buffer_pages, int, 0644);
  60. MODULE_PARM_DESC(max_buffer_pages,
  61. "Maximum number of free pages to keep in each block backend buffer");
  62. /*
  63. * Maximum number of grants to map persistently in blkback. For maximum
  64. * performance this should be the total numbers of grants that can be used
  65. * to fill the ring, but since this might become too high, specially with
  66. * the use of indirect descriptors, we set it to a value that provides good
  67. * performance without using too much memory.
  68. *
  69. * When the list of persistent grants is full we clean it up using a LRU
  70. * algorithm.
  71. */
  72. static int xen_blkif_max_pgrants = 1056;
  73. module_param_named(max_persistent_grants, xen_blkif_max_pgrants, int, 0644);
  74. MODULE_PARM_DESC(max_persistent_grants,
  75. "Maximum number of grants to map persistently");
  76. /*
  77. * The LRU mechanism to clean the lists of persistent grants needs to
  78. * be executed periodically. The time interval between consecutive executions
  79. * of the purge mechanism is set in ms.
  80. */
  81. #define LRU_INTERVAL 100
  82. /*
  83. * When the persistent grants list is full we will remove unused grants
  84. * from the list. The percent number of grants to be removed at each LRU
  85. * execution.
  86. */
  87. #define LRU_PERCENT_CLEAN 5
  88. /* Run-time switchable: /sys/module/blkback/parameters/ */
  89. static unsigned int log_stats;
  90. module_param(log_stats, int, 0644);
  91. #define BLKBACK_INVALID_HANDLE (~0)
  92. /* Number of free pages to remove on each call to free_xenballooned_pages */
  93. #define NUM_BATCH_FREE_PAGES 10
  94. static inline int get_free_page(struct xen_blkif *blkif, struct page **page)
  95. {
  96. unsigned long flags;
  97. spin_lock_irqsave(&blkif->free_pages_lock, flags);
  98. if (list_empty(&blkif->free_pages)) {
  99. BUG_ON(blkif->free_pages_num != 0);
  100. spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
  101. return alloc_xenballooned_pages(1, page, false);
  102. }
  103. BUG_ON(blkif->free_pages_num == 0);
  104. page[0] = list_first_entry(&blkif->free_pages, struct page, lru);
  105. list_del(&page[0]->lru);
  106. blkif->free_pages_num--;
  107. spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
  108. return 0;
  109. }
  110. static inline void put_free_pages(struct xen_blkif *blkif, struct page **page,
  111. int num)
  112. {
  113. unsigned long flags;
  114. int i;
  115. spin_lock_irqsave(&blkif->free_pages_lock, flags);
  116. for (i = 0; i < num; i++)
  117. list_add(&page[i]->lru, &blkif->free_pages);
  118. blkif->free_pages_num += num;
  119. spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
  120. }
  121. static inline void shrink_free_pagepool(struct xen_blkif *blkif, int num)
  122. {
  123. /* Remove requested pages in batches of NUM_BATCH_FREE_PAGES */
  124. struct page *page[NUM_BATCH_FREE_PAGES];
  125. unsigned int num_pages = 0;
  126. unsigned long flags;
  127. spin_lock_irqsave(&blkif->free_pages_lock, flags);
  128. while (blkif->free_pages_num > num) {
  129. BUG_ON(list_empty(&blkif->free_pages));
  130. page[num_pages] = list_first_entry(&blkif->free_pages,
  131. struct page, lru);
  132. list_del(&page[num_pages]->lru);
  133. blkif->free_pages_num--;
  134. if (++num_pages == NUM_BATCH_FREE_PAGES) {
  135. spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
  136. free_xenballooned_pages(num_pages, page);
  137. spin_lock_irqsave(&blkif->free_pages_lock, flags);
  138. num_pages = 0;
  139. }
  140. }
  141. spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
  142. if (num_pages != 0)
  143. free_xenballooned_pages(num_pages, page);
  144. }
  145. #define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page)))
  146. static int do_block_io_op(struct xen_blkif *blkif);
  147. static int dispatch_rw_block_io(struct xen_blkif *blkif,
  148. struct blkif_request *req,
  149. struct pending_req *pending_req);
  150. static void make_response(struct xen_blkif *blkif, u64 id,
  151. unsigned short op, int st);
  152. #define foreach_grant_safe(pos, n, rbtree, node) \
  153. for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \
  154. (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL; \
  155. &(pos)->node != NULL; \
  156. (pos) = container_of(n, typeof(*(pos)), node), \
  157. (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL)
  158. /*
  159. * We don't need locking around the persistent grant helpers
  160. * because blkback uses a single-thread for each backed, so we
  161. * can be sure that this functions will never be called recursively.
  162. *
  163. * The only exception to that is put_persistent_grant, that can be called
  164. * from interrupt context (by xen_blkbk_unmap), so we have to use atomic
  165. * bit operations to modify the flags of a persistent grant and to count
  166. * the number of used grants.
  167. */
  168. static int add_persistent_gnt(struct xen_blkif *blkif,
  169. struct persistent_gnt *persistent_gnt)
  170. {
  171. struct rb_node **new = NULL, *parent = NULL;
  172. struct persistent_gnt *this;
  173. if (blkif->persistent_gnt_c >= xen_blkif_max_pgrants) {
  174. if (!blkif->vbd.overflow_max_grants)
  175. blkif->vbd.overflow_max_grants = 1;
  176. return -EBUSY;
  177. }
  178. /* Figure out where to put new node */
  179. new = &blkif->persistent_gnts.rb_node;
  180. while (*new) {
  181. this = container_of(*new, struct persistent_gnt, node);
  182. parent = *new;
  183. if (persistent_gnt->gnt < this->gnt)
  184. new = &((*new)->rb_left);
  185. else if (persistent_gnt->gnt > this->gnt)
  186. new = &((*new)->rb_right);
  187. else {
  188. pr_alert_ratelimited(DRV_PFX " trying to add a gref that's already in the tree\n");
  189. return -EINVAL;
  190. }
  191. }
  192. bitmap_zero(persistent_gnt->flags, PERSISTENT_GNT_FLAGS_SIZE);
  193. set_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags);
  194. /* Add new node and rebalance tree. */
  195. rb_link_node(&(persistent_gnt->node), parent, new);
  196. rb_insert_color(&(persistent_gnt->node), &blkif->persistent_gnts);
  197. blkif->persistent_gnt_c++;
  198. atomic_inc(&blkif->persistent_gnt_in_use);
  199. return 0;
  200. }
  201. static struct persistent_gnt *get_persistent_gnt(struct xen_blkif *blkif,
  202. grant_ref_t gref)
  203. {
  204. struct persistent_gnt *data;
  205. struct rb_node *node = NULL;
  206. node = blkif->persistent_gnts.rb_node;
  207. while (node) {
  208. data = container_of(node, struct persistent_gnt, node);
  209. if (gref < data->gnt)
  210. node = node->rb_left;
  211. else if (gref > data->gnt)
  212. node = node->rb_right;
  213. else {
  214. if(test_bit(PERSISTENT_GNT_ACTIVE, data->flags)) {
  215. pr_alert_ratelimited(DRV_PFX " requesting a grant already in use\n");
  216. return NULL;
  217. }
  218. set_bit(PERSISTENT_GNT_ACTIVE, data->flags);
  219. atomic_inc(&blkif->persistent_gnt_in_use);
  220. return data;
  221. }
  222. }
  223. return NULL;
  224. }
  225. static void put_persistent_gnt(struct xen_blkif *blkif,
  226. struct persistent_gnt *persistent_gnt)
  227. {
  228. if(!test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags))
  229. pr_alert_ratelimited(DRV_PFX " freeing a grant already unused");
  230. set_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags);
  231. clear_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags);
  232. atomic_dec(&blkif->persistent_gnt_in_use);
  233. }
  234. static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
  235. unsigned int num)
  236. {
  237. struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
  238. struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
  239. struct persistent_gnt *persistent_gnt;
  240. struct rb_node *n;
  241. int ret = 0;
  242. int segs_to_unmap = 0;
  243. foreach_grant_safe(persistent_gnt, n, root, node) {
  244. BUG_ON(persistent_gnt->handle ==
  245. BLKBACK_INVALID_HANDLE);
  246. gnttab_set_unmap_op(&unmap[segs_to_unmap],
  247. (unsigned long) pfn_to_kaddr(page_to_pfn(
  248. persistent_gnt->page)),
  249. GNTMAP_host_map,
  250. persistent_gnt->handle);
  251. pages[segs_to_unmap] = persistent_gnt->page;
  252. if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST ||
  253. !rb_next(&persistent_gnt->node)) {
  254. ret = gnttab_unmap_refs(unmap, NULL, pages,
  255. segs_to_unmap);
  256. BUG_ON(ret);
  257. put_free_pages(blkif, pages, segs_to_unmap);
  258. segs_to_unmap = 0;
  259. }
  260. rb_erase(&persistent_gnt->node, root);
  261. kfree(persistent_gnt);
  262. num--;
  263. }
  264. BUG_ON(num != 0);
  265. }
  266. static void unmap_purged_grants(struct work_struct *work)
  267. {
  268. struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
  269. struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
  270. struct persistent_gnt *persistent_gnt;
  271. int ret, segs_to_unmap = 0;
  272. struct xen_blkif *blkif = container_of(work, typeof(*blkif), persistent_purge_work);
  273. while(!list_empty(&blkif->persistent_purge_list)) {
  274. persistent_gnt = list_first_entry(&blkif->persistent_purge_list,
  275. struct persistent_gnt,
  276. remove_node);
  277. list_del(&persistent_gnt->remove_node);
  278. gnttab_set_unmap_op(&unmap[segs_to_unmap],
  279. vaddr(persistent_gnt->page),
  280. GNTMAP_host_map,
  281. persistent_gnt->handle);
  282. pages[segs_to_unmap] = persistent_gnt->page;
  283. if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
  284. ret = gnttab_unmap_refs(unmap, NULL, pages,
  285. segs_to_unmap);
  286. BUG_ON(ret);
  287. put_free_pages(blkif, pages, segs_to_unmap);
  288. segs_to_unmap = 0;
  289. }
  290. kfree(persistent_gnt);
  291. }
  292. if (segs_to_unmap > 0) {
  293. ret = gnttab_unmap_refs(unmap, NULL, pages, segs_to_unmap);
  294. BUG_ON(ret);
  295. put_free_pages(blkif, pages, segs_to_unmap);
  296. }
  297. }
  298. static void purge_persistent_gnt(struct xen_blkif *blkif)
  299. {
  300. struct persistent_gnt *persistent_gnt;
  301. struct rb_node *n;
  302. unsigned int num_clean, total;
  303. bool scan_used = false, clean_used = false;
  304. struct rb_root *root;
  305. if (blkif->persistent_gnt_c < xen_blkif_max_pgrants ||
  306. (blkif->persistent_gnt_c == xen_blkif_max_pgrants &&
  307. !blkif->vbd.overflow_max_grants)) {
  308. return;
  309. }
  310. if (work_pending(&blkif->persistent_purge_work)) {
  311. pr_alert_ratelimited(DRV_PFX "Scheduled work from previous purge is still pending, cannot purge list\n");
  312. return;
  313. }
  314. num_clean = (xen_blkif_max_pgrants / 100) * LRU_PERCENT_CLEAN;
  315. num_clean = blkif->persistent_gnt_c - xen_blkif_max_pgrants + num_clean;
  316. num_clean = min(blkif->persistent_gnt_c, num_clean);
  317. if ((num_clean == 0) ||
  318. (num_clean > (blkif->persistent_gnt_c - atomic_read(&blkif->persistent_gnt_in_use))))
  319. return;
  320. /*
  321. * At this point, we can assure that there will be no calls
  322. * to get_persistent_grant (because we are executing this code from
  323. * xen_blkif_schedule), there can only be calls to put_persistent_gnt,
  324. * which means that the number of currently used grants will go down,
  325. * but never up, so we will always be able to remove the requested
  326. * number of grants.
  327. */
  328. total = num_clean;
  329. pr_debug(DRV_PFX "Going to purge %u persistent grants\n", num_clean);
  330. INIT_LIST_HEAD(&blkif->persistent_purge_list);
  331. root = &blkif->persistent_gnts;
  332. purge_list:
  333. foreach_grant_safe(persistent_gnt, n, root, node) {
  334. BUG_ON(persistent_gnt->handle ==
  335. BLKBACK_INVALID_HANDLE);
  336. if (clean_used) {
  337. clear_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags);
  338. continue;
  339. }
  340. if (test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags))
  341. continue;
  342. if (!scan_used &&
  343. (test_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags)))
  344. continue;
  345. rb_erase(&persistent_gnt->node, root);
  346. list_add(&persistent_gnt->remove_node,
  347. &blkif->persistent_purge_list);
  348. if (--num_clean == 0)
  349. goto finished;
  350. }
  351. /*
  352. * If we get here it means we also need to start cleaning
  353. * grants that were used since last purge in order to cope
  354. * with the requested num
  355. */
  356. if (!scan_used && !clean_used) {
  357. pr_debug(DRV_PFX "Still missing %u purged frames\n", num_clean);
  358. scan_used = true;
  359. goto purge_list;
  360. }
  361. finished:
  362. if (!clean_used) {
  363. pr_debug(DRV_PFX "Finished scanning for grants to clean, removing used flag\n");
  364. clean_used = true;
  365. goto purge_list;
  366. }
  367. blkif->persistent_gnt_c -= (total - num_clean);
  368. blkif->vbd.overflow_max_grants = 0;
  369. /* We can defer this work */
  370. INIT_WORK(&blkif->persistent_purge_work, unmap_purged_grants);
  371. schedule_work(&blkif->persistent_purge_work);
  372. pr_debug(DRV_PFX "Purged %u/%u\n", (total - num_clean), total);
  373. return;
  374. }
  375. /*
  376. * Retrieve from the 'pending_reqs' a free pending_req structure to be used.
  377. */
  378. static struct pending_req *alloc_req(struct xen_blkif *blkif)
  379. {
  380. struct pending_req *req = NULL;
  381. unsigned long flags;
  382. spin_lock_irqsave(&blkif->pending_free_lock, flags);
  383. if (!list_empty(&blkif->pending_free)) {
  384. req = list_entry(blkif->pending_free.next, struct pending_req,
  385. free_list);
  386. list_del(&req->free_list);
  387. }
  388. spin_unlock_irqrestore(&blkif->pending_free_lock, flags);
  389. return req;
  390. }
  391. /*
  392. * Return the 'pending_req' structure back to the freepool. We also
  393. * wake up the thread if it was waiting for a free page.
  394. */
  395. static void free_req(struct xen_blkif *blkif, struct pending_req *req)
  396. {
  397. unsigned long flags;
  398. int was_empty;
  399. spin_lock_irqsave(&blkif->pending_free_lock, flags);
  400. was_empty = list_empty(&blkif->pending_free);
  401. list_add(&req->free_list, &blkif->pending_free);
  402. spin_unlock_irqrestore(&blkif->pending_free_lock, flags);
  403. if (was_empty)
  404. wake_up(&blkif->pending_free_wq);
  405. }
  406. /*
  407. * Routines for managing virtual block devices (vbds).
  408. */
  409. static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif,
  410. int operation)
  411. {
  412. struct xen_vbd *vbd = &blkif->vbd;
  413. int rc = -EACCES;
  414. if ((operation != READ) && vbd->readonly)
  415. goto out;
  416. if (likely(req->nr_sects)) {
  417. blkif_sector_t end = req->sector_number + req->nr_sects;
  418. if (unlikely(end < req->sector_number))
  419. goto out;
  420. if (unlikely(end > vbd_sz(vbd)))
  421. goto out;
  422. }
  423. req->dev = vbd->pdevice;
  424. req->bdev = vbd->bdev;
  425. rc = 0;
  426. out:
  427. return rc;
  428. }
  429. static void xen_vbd_resize(struct xen_blkif *blkif)
  430. {
  431. struct xen_vbd *vbd = &blkif->vbd;
  432. struct xenbus_transaction xbt;
  433. int err;
  434. struct xenbus_device *dev = xen_blkbk_xenbus(blkif->be);
  435. unsigned long long new_size = vbd_sz(vbd);
  436. pr_info(DRV_PFX "VBD Resize: Domid: %d, Device: (%d, %d)\n",
  437. blkif->domid, MAJOR(vbd->pdevice), MINOR(vbd->pdevice));
  438. pr_info(DRV_PFX "VBD Resize: new size %llu\n", new_size);
  439. vbd->size = new_size;
  440. again:
  441. err = xenbus_transaction_start(&xbt);
  442. if (err) {
  443. pr_warn(DRV_PFX "Error starting transaction");
  444. return;
  445. }
  446. err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
  447. (unsigned long long)vbd_sz(vbd));
  448. if (err) {
  449. pr_warn(DRV_PFX "Error writing new size");
  450. goto abort;
  451. }
  452. /*
  453. * Write the current state; we will use this to synchronize
  454. * the front-end. If the current state is "connected" the
  455. * front-end will get the new size information online.
  456. */
  457. err = xenbus_printf(xbt, dev->nodename, "state", "%d", dev->state);
  458. if (err) {
  459. pr_warn(DRV_PFX "Error writing the state");
  460. goto abort;
  461. }
  462. err = xenbus_transaction_end(xbt, 0);
  463. if (err == -EAGAIN)
  464. goto again;
  465. if (err)
  466. pr_warn(DRV_PFX "Error ending transaction");
  467. return;
  468. abort:
  469. xenbus_transaction_end(xbt, 1);
  470. }
  471. /*
  472. * Notification from the guest OS.
  473. */
  474. static void blkif_notify_work(struct xen_blkif *blkif)
  475. {
  476. blkif->waiting_reqs = 1;
  477. wake_up(&blkif->wq);
  478. }
  479. irqreturn_t xen_blkif_be_int(int irq, void *dev_id)
  480. {
  481. blkif_notify_work(dev_id);
  482. return IRQ_HANDLED;
  483. }
  484. /*
  485. * SCHEDULER FUNCTIONS
  486. */
  487. static void print_stats(struct xen_blkif *blkif)
  488. {
  489. pr_info("xen-blkback (%s): oo %3llu | rd %4llu | wr %4llu | f %4llu"
  490. " | ds %4llu | pg: %4u/%4d\n",
  491. current->comm, blkif->st_oo_req,
  492. blkif->st_rd_req, blkif->st_wr_req,
  493. blkif->st_f_req, blkif->st_ds_req,
  494. blkif->persistent_gnt_c,
  495. xen_blkif_max_pgrants);
  496. blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000);
  497. blkif->st_rd_req = 0;
  498. blkif->st_wr_req = 0;
  499. blkif->st_oo_req = 0;
  500. blkif->st_ds_req = 0;
  501. }
  502. int xen_blkif_schedule(void *arg)
  503. {
  504. struct xen_blkif *blkif = arg;
  505. struct xen_vbd *vbd = &blkif->vbd;
  506. unsigned long timeout;
  507. int ret;
  508. xen_blkif_get(blkif);
  509. while (!kthread_should_stop()) {
  510. if (try_to_freeze())
  511. continue;
  512. if (unlikely(vbd->size != vbd_sz(vbd)))
  513. xen_vbd_resize(blkif);
  514. timeout = msecs_to_jiffies(LRU_INTERVAL);
  515. timeout = wait_event_interruptible_timeout(
  516. blkif->wq,
  517. blkif->waiting_reqs || kthread_should_stop(),
  518. timeout);
  519. if (timeout == 0)
  520. goto purge_gnt_list;
  521. timeout = wait_event_interruptible_timeout(
  522. blkif->pending_free_wq,
  523. !list_empty(&blkif->pending_free) ||
  524. kthread_should_stop(),
  525. timeout);
  526. if (timeout == 0)
  527. goto purge_gnt_list;
  528. blkif->waiting_reqs = 0;
  529. smp_mb(); /* clear flag *before* checking for work */
  530. ret = do_block_io_op(blkif);
  531. if (ret > 0)
  532. blkif->waiting_reqs = 1;
  533. if (ret == -EACCES)
  534. wait_event_interruptible(blkif->shutdown_wq,
  535. kthread_should_stop());
  536. purge_gnt_list:
  537. if (blkif->vbd.feature_gnt_persistent &&
  538. time_after(jiffies, blkif->next_lru)) {
  539. purge_persistent_gnt(blkif);
  540. blkif->next_lru = jiffies + msecs_to_jiffies(LRU_INTERVAL);
  541. }
  542. /* Shrink if we have more than xen_blkif_max_buffer_pages */
  543. shrink_free_pagepool(blkif, xen_blkif_max_buffer_pages);
  544. if (log_stats && time_after(jiffies, blkif->st_print))
  545. print_stats(blkif);
  546. }
  547. /* Since we are shutting down remove all pages from the buffer */
  548. shrink_free_pagepool(blkif, 0 /* All */);
  549. /* Free all persistent grant pages */
  550. if (!RB_EMPTY_ROOT(&blkif->persistent_gnts))
  551. free_persistent_gnts(blkif, &blkif->persistent_gnts,
  552. blkif->persistent_gnt_c);
  553. BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts));
  554. blkif->persistent_gnt_c = 0;
  555. if (log_stats)
  556. print_stats(blkif);
  557. blkif->xenblkd = NULL;
  558. xen_blkif_put(blkif);
  559. return 0;
  560. }
  561. /*
  562. * Unmap the grant references, and also remove the M2P over-rides
  563. * used in the 'pending_req'.
  564. */
  565. static void xen_blkbk_unmap(struct xen_blkif *blkif,
  566. struct grant_page *pages[],
  567. int num)
  568. {
  569. struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
  570. struct page *unmap_pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
  571. unsigned int i, invcount = 0;
  572. int ret;
  573. for (i = 0; i < num; i++) {
  574. if (pages[i]->persistent_gnt != NULL) {
  575. put_persistent_gnt(blkif, pages[i]->persistent_gnt);
  576. continue;
  577. }
  578. if (pages[i]->handle == BLKBACK_INVALID_HANDLE)
  579. continue;
  580. unmap_pages[invcount] = pages[i]->page;
  581. gnttab_set_unmap_op(&unmap[invcount], vaddr(pages[i]->page),
  582. GNTMAP_host_map, pages[i]->handle);
  583. pages[i]->handle = BLKBACK_INVALID_HANDLE;
  584. if (++invcount == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
  585. ret = gnttab_unmap_refs(unmap, NULL, unmap_pages,
  586. invcount);
  587. BUG_ON(ret);
  588. put_free_pages(blkif, unmap_pages, invcount);
  589. invcount = 0;
  590. }
  591. }
  592. if (invcount) {
  593. ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount);
  594. BUG_ON(ret);
  595. put_free_pages(blkif, unmap_pages, invcount);
  596. }
  597. }
  598. static int xen_blkbk_map(struct xen_blkif *blkif,
  599. struct grant_page *pages[],
  600. int num, bool ro)
  601. {
  602. struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
  603. struct page *pages_to_gnt[BLKIF_MAX_SEGMENTS_PER_REQUEST];
  604. struct persistent_gnt *persistent_gnt = NULL;
  605. phys_addr_t addr = 0;
  606. int i, seg_idx, new_map_idx;
  607. int segs_to_map = 0;
  608. int ret = 0;
  609. int last_map = 0, map_until = 0;
  610. int use_persistent_gnts;
  611. use_persistent_gnts = (blkif->vbd.feature_gnt_persistent);
  612. /*
  613. * Fill out preq.nr_sects with proper amount of sectors, and setup
  614. * assign map[..] with the PFN of the page in our domain with the
  615. * corresponding grant reference for each page.
  616. */
  617. again:
  618. for (i = map_until; i < num; i++) {
  619. uint32_t flags;
  620. if (use_persistent_gnts)
  621. persistent_gnt = get_persistent_gnt(
  622. blkif,
  623. pages[i]->gref);
  624. if (persistent_gnt) {
  625. /*
  626. * We are using persistent grants and
  627. * the grant is already mapped
  628. */
  629. pages[i]->page = persistent_gnt->page;
  630. pages[i]->persistent_gnt = persistent_gnt;
  631. } else {
  632. if (get_free_page(blkif, &pages[i]->page))
  633. goto out_of_memory;
  634. addr = vaddr(pages[i]->page);
  635. pages_to_gnt[segs_to_map] = pages[i]->page;
  636. pages[i]->persistent_gnt = NULL;
  637. flags = GNTMAP_host_map;
  638. if (!use_persistent_gnts && ro)
  639. flags |= GNTMAP_readonly;
  640. gnttab_set_map_op(&map[segs_to_map++], addr,
  641. flags, pages[i]->gref,
  642. blkif->domid);
  643. }
  644. map_until = i + 1;
  645. if (segs_to_map == BLKIF_MAX_SEGMENTS_PER_REQUEST)
  646. break;
  647. }
  648. if (segs_to_map) {
  649. ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map);
  650. BUG_ON(ret);
  651. }
  652. /*
  653. * Now swizzle the MFN in our domain with the MFN from the other domain
  654. * so that when we access vaddr(pending_req,i) it has the contents of
  655. * the page from the other domain.
  656. */
  657. for (seg_idx = last_map, new_map_idx = 0; seg_idx < map_until; seg_idx++) {
  658. if (!pages[seg_idx]->persistent_gnt) {
  659. /* This is a newly mapped grant */
  660. BUG_ON(new_map_idx >= segs_to_map);
  661. if (unlikely(map[new_map_idx].status != 0)) {
  662. pr_debug(DRV_PFX "invalid buffer -- could not remap it\n");
  663. pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE;
  664. ret |= 1;
  665. goto next;
  666. }
  667. pages[seg_idx]->handle = map[new_map_idx].handle;
  668. } else {
  669. continue;
  670. }
  671. if (use_persistent_gnts &&
  672. blkif->persistent_gnt_c < xen_blkif_max_pgrants) {
  673. /*
  674. * We are using persistent grants, the grant is
  675. * not mapped but we might have room for it.
  676. */
  677. persistent_gnt = kmalloc(sizeof(struct persistent_gnt),
  678. GFP_KERNEL);
  679. if (!persistent_gnt) {
  680. /*
  681. * If we don't have enough memory to
  682. * allocate the persistent_gnt struct
  683. * map this grant non-persistenly
  684. */
  685. goto next;
  686. }
  687. persistent_gnt->gnt = map[new_map_idx].ref;
  688. persistent_gnt->handle = map[new_map_idx].handle;
  689. persistent_gnt->page = pages[seg_idx]->page;
  690. if (add_persistent_gnt(blkif,
  691. persistent_gnt)) {
  692. kfree(persistent_gnt);
  693. persistent_gnt = NULL;
  694. goto next;
  695. }
  696. pages[seg_idx]->persistent_gnt = persistent_gnt;
  697. pr_debug(DRV_PFX " grant %u added to the tree of persistent grants, using %u/%u\n",
  698. persistent_gnt->gnt, blkif->persistent_gnt_c,
  699. xen_blkif_max_pgrants);
  700. goto next;
  701. }
  702. if (use_persistent_gnts && !blkif->vbd.overflow_max_grants) {
  703. blkif->vbd.overflow_max_grants = 1;
  704. pr_debug(DRV_PFX " domain %u, device %#x is using maximum number of persistent grants\n",
  705. blkif->domid, blkif->vbd.handle);
  706. }
  707. /*
  708. * We could not map this grant persistently, so use it as
  709. * a non-persistent grant.
  710. */
  711. next:
  712. new_map_idx++;
  713. }
  714. segs_to_map = 0;
  715. last_map = map_until;
  716. if (map_until != num)
  717. goto again;
  718. return ret;
  719. out_of_memory:
  720. pr_alert(DRV_PFX "%s: out of memory\n", __func__);
  721. put_free_pages(blkif, pages_to_gnt, segs_to_map);
  722. return -ENOMEM;
  723. }
  724. static int xen_blkbk_map_seg(struct pending_req *pending_req)
  725. {
  726. int rc;
  727. rc = xen_blkbk_map(pending_req->blkif, pending_req->segments,
  728. pending_req->nr_pages,
  729. (pending_req->operation != BLKIF_OP_READ));
  730. return rc;
  731. }
  732. static int xen_blkbk_parse_indirect(struct blkif_request *req,
  733. struct pending_req *pending_req,
  734. struct seg_buf seg[],
  735. struct phys_req *preq)
  736. {
  737. struct grant_page **pages = pending_req->indirect_pages;
  738. struct xen_blkif *blkif = pending_req->blkif;
  739. int indirect_grefs, rc, n, nseg, i;
  740. struct blkif_request_segment_aligned *segments = NULL;
  741. nseg = pending_req->nr_pages;
  742. indirect_grefs = INDIRECT_PAGES(nseg);
  743. BUG_ON(indirect_grefs > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST);
  744. for (i = 0; i < indirect_grefs; i++)
  745. pages[i]->gref = req->u.indirect.indirect_grefs[i];
  746. rc = xen_blkbk_map(blkif, pages, indirect_grefs, true);
  747. if (rc)
  748. goto unmap;
  749. for (n = 0, i = 0; n < nseg; n++) {
  750. if ((n % SEGS_PER_INDIRECT_FRAME) == 0) {
  751. /* Map indirect segments */
  752. if (segments)
  753. kunmap_atomic(segments);
  754. segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]->page);
  755. }
  756. i = n % SEGS_PER_INDIRECT_FRAME;
  757. pending_req->segments[n]->gref = segments[i].gref;
  758. seg[n].nsec = segments[i].last_sect -
  759. segments[i].first_sect + 1;
  760. seg[n].offset = (segments[i].first_sect << 9);
  761. if ((segments[i].last_sect >= (PAGE_SIZE >> 9)) ||
  762. (segments[i].last_sect < segments[i].first_sect)) {
  763. rc = -EINVAL;
  764. goto unmap;
  765. }
  766. preq->nr_sects += seg[n].nsec;
  767. }
  768. unmap:
  769. if (segments)
  770. kunmap_atomic(segments);
  771. xen_blkbk_unmap(blkif, pages, indirect_grefs);
  772. return rc;
  773. }
  774. static int dispatch_discard_io(struct xen_blkif *blkif,
  775. struct blkif_request *req)
  776. {
  777. int err = 0;
  778. int status = BLKIF_RSP_OKAY;
  779. struct block_device *bdev = blkif->vbd.bdev;
  780. unsigned long secure;
  781. struct phys_req preq;
  782. xen_blkif_get(blkif);
  783. preq.sector_number = req->u.discard.sector_number;
  784. preq.nr_sects = req->u.discard.nr_sectors;
  785. err = xen_vbd_translate(&preq, blkif, WRITE);
  786. if (err) {
  787. pr_warn(DRV_PFX "access denied: DISCARD [%llu->%llu] on dev=%04x\n",
  788. preq.sector_number,
  789. preq.sector_number + preq.nr_sects, blkif->vbd.pdevice);
  790. goto fail_response;
  791. }
  792. blkif->st_ds_req++;
  793. secure = (blkif->vbd.discard_secure &&
  794. (req->u.discard.flag & BLKIF_DISCARD_SECURE)) ?
  795. BLKDEV_DISCARD_SECURE : 0;
  796. err = blkdev_issue_discard(bdev, req->u.discard.sector_number,
  797. req->u.discard.nr_sectors,
  798. GFP_KERNEL, secure);
  799. fail_response:
  800. if (err == -EOPNOTSUPP) {
  801. pr_debug(DRV_PFX "discard op failed, not supported\n");
  802. status = BLKIF_RSP_EOPNOTSUPP;
  803. } else if (err)
  804. status = BLKIF_RSP_ERROR;
  805. make_response(blkif, req->u.discard.id, req->operation, status);
  806. xen_blkif_put(blkif);
  807. return err;
  808. }
  809. static int dispatch_other_io(struct xen_blkif *blkif,
  810. struct blkif_request *req,
  811. struct pending_req *pending_req)
  812. {
  813. free_req(blkif, pending_req);
  814. make_response(blkif, req->u.other.id, req->operation,
  815. BLKIF_RSP_EOPNOTSUPP);
  816. return -EIO;
  817. }
  818. static void xen_blk_drain_io(struct xen_blkif *blkif)
  819. {
  820. atomic_set(&blkif->drain, 1);
  821. do {
  822. /* The initial value is one, and one refcnt taken at the
  823. * start of the xen_blkif_schedule thread. */
  824. if (atomic_read(&blkif->refcnt) <= 2)
  825. break;
  826. wait_for_completion_interruptible_timeout(
  827. &blkif->drain_complete, HZ);
  828. if (!atomic_read(&blkif->drain))
  829. break;
  830. } while (!kthread_should_stop());
  831. atomic_set(&blkif->drain, 0);
  832. }
  833. /*
  834. * Completion callback on the bio's. Called as bh->b_end_io()
  835. */
  836. static void __end_block_io_op(struct pending_req *pending_req, int error)
  837. {
  838. /* An error fails the entire request. */
  839. if ((pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE) &&
  840. (error == -EOPNOTSUPP)) {
  841. pr_debug(DRV_PFX "flush diskcache op failed, not supported\n");
  842. xen_blkbk_flush_diskcache(XBT_NIL, pending_req->blkif->be, 0);
  843. pending_req->status = BLKIF_RSP_EOPNOTSUPP;
  844. } else if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) &&
  845. (error == -EOPNOTSUPP)) {
  846. pr_debug(DRV_PFX "write barrier op failed, not supported\n");
  847. xen_blkbk_barrier(XBT_NIL, pending_req->blkif->be, 0);
  848. pending_req->status = BLKIF_RSP_EOPNOTSUPP;
  849. } else if (error) {
  850. pr_debug(DRV_PFX "Buffer not up-to-date at end of operation,"
  851. " error=%d\n", error);
  852. pending_req->status = BLKIF_RSP_ERROR;
  853. }
  854. /*
  855. * If all of the bio's have completed it is time to unmap
  856. * the grant references associated with 'request' and provide
  857. * the proper response on the ring.
  858. */
  859. if (atomic_dec_and_test(&pending_req->pendcnt)) {
  860. xen_blkbk_unmap(pending_req->blkif,
  861. pending_req->segments,
  862. pending_req->nr_pages);
  863. make_response(pending_req->blkif, pending_req->id,
  864. pending_req->operation, pending_req->status);
  865. xen_blkif_put(pending_req->blkif);
  866. if (atomic_read(&pending_req->blkif->refcnt) <= 2) {
  867. if (atomic_read(&pending_req->blkif->drain))
  868. complete(&pending_req->blkif->drain_complete);
  869. }
  870. free_req(pending_req->blkif, pending_req);
  871. }
  872. }
  873. /*
  874. * bio callback.
  875. */
  876. static void end_block_io_op(struct bio *bio, int error)
  877. {
  878. __end_block_io_op(bio->bi_private, error);
  879. bio_put(bio);
  880. }
  881. /*
  882. * Function to copy the from the ring buffer the 'struct blkif_request'
  883. * (which has the sectors we want, number of them, grant references, etc),
  884. * and transmute it to the block API to hand it over to the proper block disk.
  885. */
  886. static int
  887. __do_block_io_op(struct xen_blkif *blkif)
  888. {
  889. union blkif_back_rings *blk_rings = &blkif->blk_rings;
  890. struct blkif_request req;
  891. struct pending_req *pending_req;
  892. RING_IDX rc, rp;
  893. int more_to_do = 0;
  894. rc = blk_rings->common.req_cons;
  895. rp = blk_rings->common.sring->req_prod;
  896. rmb(); /* Ensure we see queued requests up to 'rp'. */
  897. if (RING_REQUEST_PROD_OVERFLOW(&blk_rings->common, rp)) {
  898. rc = blk_rings->common.rsp_prod_pvt;
  899. pr_warn(DRV_PFX "Frontend provided bogus ring requests (%d - %d = %d). Halting ring processing on dev=%04x\n",
  900. rp, rc, rp - rc, blkif->vbd.pdevice);
  901. return -EACCES;
  902. }
  903. while (rc != rp) {
  904. if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
  905. break;
  906. if (kthread_should_stop()) {
  907. more_to_do = 1;
  908. break;
  909. }
  910. pending_req = alloc_req(blkif);
  911. if (NULL == pending_req) {
  912. blkif->st_oo_req++;
  913. more_to_do = 1;
  914. break;
  915. }
  916. switch (blkif->blk_protocol) {
  917. case BLKIF_PROTOCOL_NATIVE:
  918. memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req));
  919. break;
  920. case BLKIF_PROTOCOL_X86_32:
  921. blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
  922. break;
  923. case BLKIF_PROTOCOL_X86_64:
  924. blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
  925. break;
  926. default:
  927. BUG();
  928. }
  929. blk_rings->common.req_cons = ++rc; /* before make_response() */
  930. /* Apply all sanity checks to /private copy/ of request. */
  931. barrier();
  932. switch (req.operation) {
  933. case BLKIF_OP_READ:
  934. case BLKIF_OP_WRITE:
  935. case BLKIF_OP_WRITE_BARRIER:
  936. case BLKIF_OP_FLUSH_DISKCACHE:
  937. case BLKIF_OP_INDIRECT:
  938. if (dispatch_rw_block_io(blkif, &req, pending_req))
  939. goto done;
  940. break;
  941. case BLKIF_OP_DISCARD:
  942. free_req(blkif, pending_req);
  943. if (dispatch_discard_io(blkif, &req))
  944. goto done;
  945. break;
  946. default:
  947. if (dispatch_other_io(blkif, &req, pending_req))
  948. goto done;
  949. break;
  950. }
  951. /* Yield point for this unbounded loop. */
  952. cond_resched();
  953. }
  954. done:
  955. return more_to_do;
  956. }
  957. static int
  958. do_block_io_op(struct xen_blkif *blkif)
  959. {
  960. union blkif_back_rings *blk_rings = &blkif->blk_rings;
  961. int more_to_do;
  962. do {
  963. more_to_do = __do_block_io_op(blkif);
  964. if (more_to_do)
  965. break;
  966. RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
  967. } while (more_to_do);
  968. return more_to_do;
  969. }
  970. /*
  971. * Transmutation of the 'struct blkif_request' to a proper 'struct bio'
  972. * and call the 'submit_bio' to pass it to the underlying storage.
  973. */
  974. static int dispatch_rw_block_io(struct xen_blkif *blkif,
  975. struct blkif_request *req,
  976. struct pending_req *pending_req)
  977. {
  978. struct phys_req preq;
  979. struct seg_buf *seg = pending_req->seg;
  980. unsigned int nseg;
  981. struct bio *bio = NULL;
  982. struct bio **biolist = pending_req->biolist;
  983. int i, nbio = 0;
  984. int operation;
  985. struct blk_plug plug;
  986. bool drain = false;
  987. struct grant_page **pages = pending_req->segments;
  988. unsigned short req_operation;
  989. req_operation = req->operation == BLKIF_OP_INDIRECT ?
  990. req->u.indirect.indirect_op : req->operation;
  991. if ((req->operation == BLKIF_OP_INDIRECT) &&
  992. (req_operation != BLKIF_OP_READ) &&
  993. (req_operation != BLKIF_OP_WRITE)) {
  994. pr_debug(DRV_PFX "Invalid indirect operation (%u)\n",
  995. req_operation);
  996. goto fail_response;
  997. }
  998. switch (req_operation) {
  999. case BLKIF_OP_READ:
  1000. blkif->st_rd_req++;
  1001. operation = READ;
  1002. break;
  1003. case BLKIF_OP_WRITE:
  1004. blkif->st_wr_req++;
  1005. operation = WRITE_ODIRECT;
  1006. break;
  1007. case BLKIF_OP_WRITE_BARRIER:
  1008. drain = true;
  1009. case BLKIF_OP_FLUSH_DISKCACHE:
  1010. blkif->st_f_req++;
  1011. operation = WRITE_FLUSH;
  1012. break;
  1013. default:
  1014. operation = 0; /* make gcc happy */
  1015. goto fail_response;
  1016. break;
  1017. }
  1018. /* Check that the number of segments is sane. */
  1019. nseg = req->operation == BLKIF_OP_INDIRECT ?
  1020. req->u.indirect.nr_segments : req->u.rw.nr_segments;
  1021. if (unlikely(nseg == 0 && operation != WRITE_FLUSH) ||
  1022. unlikely((req->operation != BLKIF_OP_INDIRECT) &&
  1023. (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) ||
  1024. unlikely((req->operation == BLKIF_OP_INDIRECT) &&
  1025. (nseg > MAX_INDIRECT_SEGMENTS))) {
  1026. pr_debug(DRV_PFX "Bad number of segments in request (%d)\n",
  1027. nseg);
  1028. /* Haven't submitted any bio's yet. */
  1029. goto fail_response;
  1030. }
  1031. preq.nr_sects = 0;
  1032. pending_req->blkif = blkif;
  1033. pending_req->id = req->u.rw.id;
  1034. pending_req->operation = req_operation;
  1035. pending_req->status = BLKIF_RSP_OKAY;
  1036. pending_req->nr_pages = nseg;
  1037. if (req->operation != BLKIF_OP_INDIRECT) {
  1038. preq.dev = req->u.rw.handle;
  1039. preq.sector_number = req->u.rw.sector_number;
  1040. for (i = 0; i < nseg; i++) {
  1041. pages[i]->gref = req->u.rw.seg[i].gref;
  1042. seg[i].nsec = req->u.rw.seg[i].last_sect -
  1043. req->u.rw.seg[i].first_sect + 1;
  1044. seg[i].offset = (req->u.rw.seg[i].first_sect << 9);
  1045. if ((req->u.rw.seg[i].last_sect >= (PAGE_SIZE >> 9)) ||
  1046. (req->u.rw.seg[i].last_sect <
  1047. req->u.rw.seg[i].first_sect))
  1048. goto fail_response;
  1049. preq.nr_sects += seg[i].nsec;
  1050. }
  1051. } else {
  1052. preq.dev = req->u.indirect.handle;
  1053. preq.sector_number = req->u.indirect.sector_number;
  1054. if (xen_blkbk_parse_indirect(req, pending_req, seg, &preq))
  1055. goto fail_response;
  1056. }
  1057. if (xen_vbd_translate(&preq, blkif, operation) != 0) {
  1058. pr_debug(DRV_PFX "access denied: %s of [%llu,%llu] on dev=%04x\n",
  1059. operation == READ ? "read" : "write",
  1060. preq.sector_number,
  1061. preq.sector_number + preq.nr_sects,
  1062. blkif->vbd.pdevice);
  1063. goto fail_response;
  1064. }
  1065. /*
  1066. * This check _MUST_ be done after xen_vbd_translate as the preq.bdev
  1067. * is set there.
  1068. */
  1069. for (i = 0; i < nseg; i++) {
  1070. if (((int)preq.sector_number|(int)seg[i].nsec) &
  1071. ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) {
  1072. pr_debug(DRV_PFX "Misaligned I/O request from domain %d",
  1073. blkif->domid);
  1074. goto fail_response;
  1075. }
  1076. }
  1077. /* Wait on all outstanding I/O's and once that has been completed
  1078. * issue the WRITE_FLUSH.
  1079. */
  1080. if (drain)
  1081. xen_blk_drain_io(pending_req->blkif);
  1082. /*
  1083. * If we have failed at this point, we need to undo the M2P override,
  1084. * set gnttab_set_unmap_op on all of the grant references and perform
  1085. * the hypercall to unmap the grants - that is all done in
  1086. * xen_blkbk_unmap.
  1087. */
  1088. if (xen_blkbk_map_seg(pending_req))
  1089. goto fail_flush;
  1090. /*
  1091. * This corresponding xen_blkif_put is done in __end_block_io_op, or
  1092. * below (in "!bio") if we are handling a BLKIF_OP_DISCARD.
  1093. */
  1094. xen_blkif_get(blkif);
  1095. for (i = 0; i < nseg; i++) {
  1096. while ((bio == NULL) ||
  1097. (bio_add_page(bio,
  1098. pages[i]->page,
  1099. seg[i].nsec << 9,
  1100. seg[i].offset) == 0)) {
  1101. int nr_iovecs = min_t(int, (nseg-i), BIO_MAX_PAGES);
  1102. bio = bio_alloc(GFP_KERNEL, nr_iovecs);
  1103. if (unlikely(bio == NULL))
  1104. goto fail_put_bio;
  1105. biolist[nbio++] = bio;
  1106. bio->bi_bdev = preq.bdev;
  1107. bio->bi_private = pending_req;
  1108. bio->bi_end_io = end_block_io_op;
  1109. bio->bi_sector = preq.sector_number;
  1110. }
  1111. preq.sector_number += seg[i].nsec;
  1112. }
  1113. /* This will be hit if the operation was a flush or discard. */
  1114. if (!bio) {
  1115. BUG_ON(operation != WRITE_FLUSH);
  1116. bio = bio_alloc(GFP_KERNEL, 0);
  1117. if (unlikely(bio == NULL))
  1118. goto fail_put_bio;
  1119. biolist[nbio++] = bio;
  1120. bio->bi_bdev = preq.bdev;
  1121. bio->bi_private = pending_req;
  1122. bio->bi_end_io = end_block_io_op;
  1123. }
  1124. atomic_set(&pending_req->pendcnt, nbio);
  1125. blk_start_plug(&plug);
  1126. for (i = 0; i < nbio; i++)
  1127. submit_bio(operation, biolist[i]);
  1128. /* Let the I/Os go.. */
  1129. blk_finish_plug(&plug);
  1130. if (operation == READ)
  1131. blkif->st_rd_sect += preq.nr_sects;
  1132. else if (operation & WRITE)
  1133. blkif->st_wr_sect += preq.nr_sects;
  1134. return 0;
  1135. fail_flush:
  1136. xen_blkbk_unmap(blkif, pending_req->segments,
  1137. pending_req->nr_pages);
  1138. fail_response:
  1139. /* Haven't submitted any bio's yet. */
  1140. make_response(blkif, req->u.rw.id, req_operation, BLKIF_RSP_ERROR);
  1141. free_req(blkif, pending_req);
  1142. msleep(1); /* back off a bit */
  1143. return -EIO;
  1144. fail_put_bio:
  1145. for (i = 0; i < nbio; i++)
  1146. bio_put(biolist[i]);
  1147. atomic_set(&pending_req->pendcnt, 1);
  1148. __end_block_io_op(pending_req, -EINVAL);
  1149. msleep(1); /* back off a bit */
  1150. return -EIO;
  1151. }
  1152. /*
  1153. * Put a response on the ring on how the operation fared.
  1154. */
  1155. static void make_response(struct xen_blkif *blkif, u64 id,
  1156. unsigned short op, int st)
  1157. {
  1158. struct blkif_response resp;
  1159. unsigned long flags;
  1160. union blkif_back_rings *blk_rings = &blkif->blk_rings;
  1161. int notify;
  1162. resp.id = id;
  1163. resp.operation = op;
  1164. resp.status = st;
  1165. spin_lock_irqsave(&blkif->blk_ring_lock, flags);
  1166. /* Place on the response ring for the relevant domain. */
  1167. switch (blkif->blk_protocol) {
  1168. case BLKIF_PROTOCOL_NATIVE:
  1169. memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt),
  1170. &resp, sizeof(resp));
  1171. break;
  1172. case BLKIF_PROTOCOL_X86_32:
  1173. memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt),
  1174. &resp, sizeof(resp));
  1175. break;
  1176. case BLKIF_PROTOCOL_X86_64:
  1177. memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt),
  1178. &resp, sizeof(resp));
  1179. break;
  1180. default:
  1181. BUG();
  1182. }
  1183. blk_rings->common.rsp_prod_pvt++;
  1184. RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
  1185. spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
  1186. if (notify)
  1187. notify_remote_via_irq(blkif->irq);
  1188. }
  1189. static int __init xen_blkif_init(void)
  1190. {
  1191. int rc = 0;
  1192. if (!xen_domain())
  1193. return -ENODEV;
  1194. rc = xen_blkif_interface_init();
  1195. if (rc)
  1196. goto failed_init;
  1197. rc = xen_blkif_xenbus_init();
  1198. if (rc)
  1199. goto failed_init;
  1200. failed_init:
  1201. return rc;
  1202. }
  1203. module_init(xen_blkif_init);
  1204. MODULE_LICENSE("Dual BSD/GPL");
  1205. MODULE_ALIAS("xen-backend:vbd");