dm-io.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493
  1. /*
  2. * Copyright (C) 2003 Sistina Software
  3. * Copyright (C) 2006 Red Hat GmbH
  4. *
  5. * This file is released under the GPL.
  6. */
  7. #include "dm.h"
  8. #include <linux/device-mapper.h>
  9. #include <linux/bio.h>
  10. #include <linux/mempool.h>
  11. #include <linux/module.h>
  12. #include <linux/sched.h>
  13. #include <linux/slab.h>
  14. #include <linux/dm-io.h>
  15. struct dm_io_client {
  16. mempool_t *pool;
  17. struct bio_set *bios;
  18. };
  19. /* FIXME: can we shrink this ? */
  20. struct io {
  21. unsigned long error_bits;
  22. unsigned long eopnotsupp_bits;
  23. atomic_t count;
  24. struct task_struct *sleeper;
  25. struct dm_io_client *client;
  26. io_notify_fn callback;
  27. void *context;
  28. };
  29. static struct kmem_cache *_dm_io_cache;
  30. /*
  31. * io contexts are only dynamically allocated for asynchronous
  32. * io. Since async io is likely to be the majority of io we'll
  33. * have the same number of io contexts as bios! (FIXME: must reduce this).
  34. */
  35. static unsigned int pages_to_ios(unsigned int pages)
  36. {
  37. return 4 * pages; /* too many ? */
  38. }
  39. /*
  40. * Create a client with mempool and bioset.
  41. */
  42. struct dm_io_client *dm_io_client_create(unsigned num_pages)
  43. {
  44. unsigned ios = pages_to_ios(num_pages);
  45. struct dm_io_client *client;
  46. client = kmalloc(sizeof(*client), GFP_KERNEL);
  47. if (!client)
  48. return ERR_PTR(-ENOMEM);
  49. client->pool = mempool_create_slab_pool(ios, _dm_io_cache);
  50. if (!client->pool)
  51. goto bad;
  52. client->bios = bioset_create(16, 0);
  53. if (!client->bios)
  54. goto bad;
  55. return client;
  56. bad:
  57. if (client->pool)
  58. mempool_destroy(client->pool);
  59. kfree(client);
  60. return ERR_PTR(-ENOMEM);
  61. }
  62. EXPORT_SYMBOL(dm_io_client_create);
  63. int dm_io_client_resize(unsigned num_pages, struct dm_io_client *client)
  64. {
  65. return mempool_resize(client->pool, pages_to_ios(num_pages),
  66. GFP_KERNEL);
  67. }
  68. EXPORT_SYMBOL(dm_io_client_resize);
  69. void dm_io_client_destroy(struct dm_io_client *client)
  70. {
  71. mempool_destroy(client->pool);
  72. bioset_free(client->bios);
  73. kfree(client);
  74. }
  75. EXPORT_SYMBOL(dm_io_client_destroy);
  76. /*-----------------------------------------------------------------
  77. * We need to keep track of which region a bio is doing io for.
  78. * In order to save a memory allocation we store this the last
  79. * bvec which we know is unused (blech).
  80. * XXX This is ugly and can OOPS with some configs... find another way.
  81. *---------------------------------------------------------------*/
  82. static inline void bio_set_region(struct bio *bio, unsigned region)
  83. {
  84. bio->bi_io_vec[bio->bi_max_vecs].bv_len = region;
  85. }
  86. static inline unsigned bio_get_region(struct bio *bio)
  87. {
  88. return bio->bi_io_vec[bio->bi_max_vecs].bv_len;
  89. }
  90. /*-----------------------------------------------------------------
  91. * We need an io object to keep track of the number of bios that
  92. * have been dispatched for a particular io.
  93. *---------------------------------------------------------------*/
  94. static void dec_count(struct io *io, unsigned int region, int error)
  95. {
  96. if (error) {
  97. set_bit(region, &io->error_bits);
  98. if (error == -EOPNOTSUPP)
  99. set_bit(region, &io->eopnotsupp_bits);
  100. }
  101. if (atomic_dec_and_test(&io->count)) {
  102. if (io->sleeper)
  103. wake_up_process(io->sleeper);
  104. else {
  105. unsigned long r = io->error_bits;
  106. io_notify_fn fn = io->callback;
  107. void *context = io->context;
  108. mempool_free(io, io->client->pool);
  109. fn(r, context);
  110. }
  111. }
  112. }
  113. static void endio(struct bio *bio, int error)
  114. {
  115. struct io *io;
  116. unsigned region;
  117. if (error && bio_data_dir(bio) == READ)
  118. zero_fill_bio(bio);
  119. /*
  120. * The bio destructor in bio_put() may use the io object.
  121. */
  122. io = bio->bi_private;
  123. region = bio_get_region(bio);
  124. bio->bi_max_vecs++;
  125. bio_put(bio);
  126. dec_count(io, region, error);
  127. }
  128. /*-----------------------------------------------------------------
  129. * These little objects provide an abstraction for getting a new
  130. * destination page for io.
  131. *---------------------------------------------------------------*/
  132. struct dpages {
  133. void (*get_page)(struct dpages *dp,
  134. struct page **p, unsigned long *len, unsigned *offset);
  135. void (*next_page)(struct dpages *dp);
  136. unsigned context_u;
  137. void *context_ptr;
  138. };
  139. /*
  140. * Functions for getting the pages from a list.
  141. */
  142. static void list_get_page(struct dpages *dp,
  143. struct page **p, unsigned long *len, unsigned *offset)
  144. {
  145. unsigned o = dp->context_u;
  146. struct page_list *pl = (struct page_list *) dp->context_ptr;
  147. *p = pl->page;
  148. *len = PAGE_SIZE - o;
  149. *offset = o;
  150. }
  151. static void list_next_page(struct dpages *dp)
  152. {
  153. struct page_list *pl = (struct page_list *) dp->context_ptr;
  154. dp->context_ptr = pl->next;
  155. dp->context_u = 0;
  156. }
  157. static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset)
  158. {
  159. dp->get_page = list_get_page;
  160. dp->next_page = list_next_page;
  161. dp->context_u = offset;
  162. dp->context_ptr = pl;
  163. }
  164. /*
  165. * Functions for getting the pages from a bvec.
  166. */
  167. static void bvec_get_page(struct dpages *dp,
  168. struct page **p, unsigned long *len, unsigned *offset)
  169. {
  170. struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr;
  171. *p = bvec->bv_page;
  172. *len = bvec->bv_len;
  173. *offset = bvec->bv_offset;
  174. }
  175. static void bvec_next_page(struct dpages *dp)
  176. {
  177. struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr;
  178. dp->context_ptr = bvec + 1;
  179. }
  180. static void bvec_dp_init(struct dpages *dp, struct bio_vec *bvec)
  181. {
  182. dp->get_page = bvec_get_page;
  183. dp->next_page = bvec_next_page;
  184. dp->context_ptr = bvec;
  185. }
  186. /*
  187. * Functions for getting the pages from a VMA.
  188. */
  189. static void vm_get_page(struct dpages *dp,
  190. struct page **p, unsigned long *len, unsigned *offset)
  191. {
  192. *p = vmalloc_to_page(dp->context_ptr);
  193. *offset = dp->context_u;
  194. *len = PAGE_SIZE - dp->context_u;
  195. }
  196. static void vm_next_page(struct dpages *dp)
  197. {
  198. dp->context_ptr += PAGE_SIZE - dp->context_u;
  199. dp->context_u = 0;
  200. }
  201. static void vm_dp_init(struct dpages *dp, void *data)
  202. {
  203. dp->get_page = vm_get_page;
  204. dp->next_page = vm_next_page;
  205. dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
  206. dp->context_ptr = data;
  207. }
  208. static void dm_bio_destructor(struct bio *bio)
  209. {
  210. struct io *io = bio->bi_private;
  211. bio_free(bio, io->client->bios);
  212. }
  213. /*
  214. * Functions for getting the pages from kernel memory.
  215. */
  216. static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len,
  217. unsigned *offset)
  218. {
  219. *p = virt_to_page(dp->context_ptr);
  220. *offset = dp->context_u;
  221. *len = PAGE_SIZE - dp->context_u;
  222. }
  223. static void km_next_page(struct dpages *dp)
  224. {
  225. dp->context_ptr += PAGE_SIZE - dp->context_u;
  226. dp->context_u = 0;
  227. }
  228. static void km_dp_init(struct dpages *dp, void *data)
  229. {
  230. dp->get_page = km_get_page;
  231. dp->next_page = km_next_page;
  232. dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
  233. dp->context_ptr = data;
  234. }
  235. /*-----------------------------------------------------------------
  236. * IO routines that accept a list of pages.
  237. *---------------------------------------------------------------*/
  238. static void do_region(int rw, unsigned region, struct dm_io_region *where,
  239. struct dpages *dp, struct io *io)
  240. {
  241. struct bio *bio;
  242. struct page *page;
  243. unsigned long len;
  244. unsigned offset;
  245. unsigned num_bvecs;
  246. sector_t remaining = where->count;
  247. while (remaining) {
  248. /*
  249. * Allocate a suitably sized-bio: we add an extra
  250. * bvec for bio_get/set_region() and decrement bi_max_vecs
  251. * to hide it from bio_add_page().
  252. */
  253. num_bvecs = dm_sector_div_up(remaining,
  254. (PAGE_SIZE >> SECTOR_SHIFT));
  255. num_bvecs = 1 + min_t(int, bio_get_nr_vecs(where->bdev),
  256. num_bvecs);
  257. if (unlikely(num_bvecs > BIO_MAX_PAGES))
  258. num_bvecs = BIO_MAX_PAGES;
  259. bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
  260. bio->bi_sector = where->sector + (where->count - remaining);
  261. bio->bi_bdev = where->bdev;
  262. bio->bi_end_io = endio;
  263. bio->bi_private = io;
  264. bio->bi_destructor = dm_bio_destructor;
  265. bio->bi_max_vecs--;
  266. bio_set_region(bio, region);
  267. /*
  268. * Try and add as many pages as possible.
  269. */
  270. while (remaining) {
  271. dp->get_page(dp, &page, &len, &offset);
  272. len = min(len, to_bytes(remaining));
  273. if (!bio_add_page(bio, page, len, offset))
  274. break;
  275. offset = 0;
  276. remaining -= to_sector(len);
  277. dp->next_page(dp);
  278. }
  279. atomic_inc(&io->count);
  280. submit_bio(rw, bio);
  281. }
  282. }
  283. static void dispatch_io(int rw, unsigned int num_regions,
  284. struct dm_io_region *where, struct dpages *dp,
  285. struct io *io, int sync)
  286. {
  287. int i;
  288. struct dpages old_pages = *dp;
  289. if (sync)
  290. rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG);
  291. /*
  292. * For multiple regions we need to be careful to rewind
  293. * the dp object for each call to do_region.
  294. */
  295. for (i = 0; i < num_regions; i++) {
  296. *dp = old_pages;
  297. if (where[i].count)
  298. do_region(rw, i, where + i, dp, io);
  299. }
  300. /*
  301. * Drop the extra reference that we were holding to avoid
  302. * the io being completed too early.
  303. */
  304. dec_count(io, 0, 0);
  305. }
  306. static int sync_io(struct dm_io_client *client, unsigned int num_regions,
  307. struct dm_io_region *where, int rw, struct dpages *dp,
  308. unsigned long *error_bits)
  309. {
  310. struct io io;
  311. if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
  312. WARN_ON(1);
  313. return -EIO;
  314. }
  315. retry:
  316. io.error_bits = 0;
  317. io.eopnotsupp_bits = 0;
  318. atomic_set(&io.count, 1); /* see dispatch_io() */
  319. io.sleeper = current;
  320. io.client = client;
  321. dispatch_io(rw, num_regions, where, dp, &io, 1);
  322. while (1) {
  323. set_current_state(TASK_UNINTERRUPTIBLE);
  324. if (!atomic_read(&io.count))
  325. break;
  326. io_schedule();
  327. }
  328. set_current_state(TASK_RUNNING);
  329. if (io.eopnotsupp_bits && (rw & (1 << BIO_RW_BARRIER))) {
  330. rw &= ~(1 << BIO_RW_BARRIER);
  331. goto retry;
  332. }
  333. if (error_bits)
  334. *error_bits = io.error_bits;
  335. return io.error_bits ? -EIO : 0;
  336. }
  337. static int async_io(struct dm_io_client *client, unsigned int num_regions,
  338. struct dm_io_region *where, int rw, struct dpages *dp,
  339. io_notify_fn fn, void *context)
  340. {
  341. struct io *io;
  342. if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
  343. WARN_ON(1);
  344. fn(1, context);
  345. return -EIO;
  346. }
  347. io = mempool_alloc(client->pool, GFP_NOIO);
  348. io->error_bits = 0;
  349. io->eopnotsupp_bits = 0;
  350. atomic_set(&io->count, 1); /* see dispatch_io() */
  351. io->sleeper = NULL;
  352. io->client = client;
  353. io->callback = fn;
  354. io->context = context;
  355. dispatch_io(rw, num_regions, where, dp, io, 0);
  356. return 0;
  357. }
  358. static int dp_init(struct dm_io_request *io_req, struct dpages *dp)
  359. {
  360. /* Set up dpages based on memory type */
  361. switch (io_req->mem.type) {
  362. case DM_IO_PAGE_LIST:
  363. list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
  364. break;
  365. case DM_IO_BVEC:
  366. bvec_dp_init(dp, io_req->mem.ptr.bvec);
  367. break;
  368. case DM_IO_VMA:
  369. vm_dp_init(dp, io_req->mem.ptr.vma);
  370. break;
  371. case DM_IO_KMEM:
  372. km_dp_init(dp, io_req->mem.ptr.addr);
  373. break;
  374. default:
  375. return -EINVAL;
  376. }
  377. return 0;
  378. }
  379. /*
  380. * New collapsed (a)synchronous interface.
  381. *
  382. * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug
  383. * the queue with blk_unplug() some time later or set the BIO_RW_SYNC bit in
  384. * io_req->bi_rw. If you fail to do one of these, the IO will be submitted to
  385. * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c.
  386. */
  387. int dm_io(struct dm_io_request *io_req, unsigned num_regions,
  388. struct dm_io_region *where, unsigned long *sync_error_bits)
  389. {
  390. int r;
  391. struct dpages dp;
  392. r = dp_init(io_req, &dp);
  393. if (r)
  394. return r;
  395. if (!io_req->notify.fn)
  396. return sync_io(io_req->client, num_regions, where,
  397. io_req->bi_rw, &dp, sync_error_bits);
  398. return async_io(io_req->client, num_regions, where, io_req->bi_rw,
  399. &dp, io_req->notify.fn, io_req->notify.context);
  400. }
  401. EXPORT_SYMBOL(dm_io);
  402. int __init dm_io_init(void)
  403. {
  404. _dm_io_cache = KMEM_CACHE(io, 0);
  405. if (!_dm_io_cache)
  406. return -ENOMEM;
  407. return 0;
  408. }
  409. void dm_io_exit(void)
  410. {
  411. kmem_cache_destroy(_dm_io_cache);
  412. _dm_io_cache = NULL;
  413. }