dm-io.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489
  1. /*
  2. * Copyright (C) 2003 Sistina Software
  3. * Copyright (C) 2006 Red Hat GmbH
  4. *
  5. * This file is released under the GPL.
  6. */
  7. #include "dm.h"
  8. #include <linux/device-mapper.h>
  9. #include <linux/bio.h>
  10. #include <linux/mempool.h>
  11. #include <linux/module.h>
  12. #include <linux/sched.h>
  13. #include <linux/slab.h>
  14. #include <linux/dm-io.h>
  15. #define DM_MSG_PREFIX "io"
  16. #define DM_IO_MAX_REGIONS BITS_PER_LONG
  17. #define MIN_IOS 16
  18. #define MIN_BIOS 16
  19. struct dm_io_client {
  20. mempool_t *pool;
  21. struct bio_set *bios;
  22. };
  23. /*
  24. * Aligning 'struct io' reduces the number of bits required to store
  25. * its address. Refer to store_io_and_region_in_bio() below.
  26. */
  27. struct io {
  28. unsigned long error_bits;
  29. atomic_t count;
  30. struct task_struct *sleeper;
  31. struct dm_io_client *client;
  32. io_notify_fn callback;
  33. void *context;
  34. } __attribute__((aligned(DM_IO_MAX_REGIONS)));
  35. static struct kmem_cache *_dm_io_cache;
  36. /*
  37. * Create a client with mempool and bioset.
  38. */
  39. struct dm_io_client *dm_io_client_create(void)
  40. {
  41. struct dm_io_client *client;
  42. client = kmalloc(sizeof(*client), GFP_KERNEL);
  43. if (!client)
  44. return ERR_PTR(-ENOMEM);
  45. client->pool = mempool_create_slab_pool(MIN_IOS, _dm_io_cache);
  46. if (!client->pool)
  47. goto bad;
  48. client->bios = bioset_create(MIN_BIOS, 0);
  49. if (!client->bios)
  50. goto bad;
  51. return client;
  52. bad:
  53. if (client->pool)
  54. mempool_destroy(client->pool);
  55. kfree(client);
  56. return ERR_PTR(-ENOMEM);
  57. }
  58. EXPORT_SYMBOL(dm_io_client_create);
  59. void dm_io_client_destroy(struct dm_io_client *client)
  60. {
  61. mempool_destroy(client->pool);
  62. bioset_free(client->bios);
  63. kfree(client);
  64. }
  65. EXPORT_SYMBOL(dm_io_client_destroy);
  66. /*-----------------------------------------------------------------
  67. * We need to keep track of which region a bio is doing io for.
  68. * To avoid a memory allocation to store just 5 or 6 bits, we
  69. * ensure the 'struct io' pointer is aligned so enough low bits are
  70. * always zero and then combine it with the region number directly in
  71. * bi_private.
  72. *---------------------------------------------------------------*/
  73. static void store_io_and_region_in_bio(struct bio *bio, struct io *io,
  74. unsigned region)
  75. {
  76. if (unlikely(!IS_ALIGNED((unsigned long)io, DM_IO_MAX_REGIONS))) {
  77. DMCRIT("Unaligned struct io pointer %p", io);
  78. BUG();
  79. }
  80. bio->bi_private = (void *)((unsigned long)io | region);
  81. }
  82. static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io,
  83. unsigned *region)
  84. {
  85. unsigned long val = (unsigned long)bio->bi_private;
  86. *io = (void *)(val & -(unsigned long)DM_IO_MAX_REGIONS);
  87. *region = val & (DM_IO_MAX_REGIONS - 1);
  88. }
  89. /*-----------------------------------------------------------------
  90. * We need an io object to keep track of the number of bios that
  91. * have been dispatched for a particular io.
  92. *---------------------------------------------------------------*/
  93. static void dec_count(struct io *io, unsigned int region, int error)
  94. {
  95. if (error)
  96. set_bit(region, &io->error_bits);
  97. if (atomic_dec_and_test(&io->count)) {
  98. if (io->sleeper)
  99. wake_up_process(io->sleeper);
  100. else {
  101. unsigned long r = io->error_bits;
  102. io_notify_fn fn = io->callback;
  103. void *context = io->context;
  104. mempool_free(io, io->client->pool);
  105. fn(r, context);
  106. }
  107. }
  108. }
  109. static void endio(struct bio *bio, int error)
  110. {
  111. struct io *io;
  112. unsigned region;
  113. if (error && bio_data_dir(bio) == READ)
  114. zero_fill_bio(bio);
  115. /*
  116. * The bio destructor in bio_put() may use the io object.
  117. */
  118. retrieve_io_and_region_from_bio(bio, &io, &region);
  119. bio_put(bio);
  120. dec_count(io, region, error);
  121. }
  122. /*-----------------------------------------------------------------
  123. * These little objects provide an abstraction for getting a new
  124. * destination page for io.
  125. *---------------------------------------------------------------*/
  126. struct dpages {
  127. void (*get_page)(struct dpages *dp,
  128. struct page **p, unsigned long *len, unsigned *offset);
  129. void (*next_page)(struct dpages *dp);
  130. unsigned context_u;
  131. void *context_ptr;
  132. };
  133. /*
  134. * Functions for getting the pages from a list.
  135. */
  136. static void list_get_page(struct dpages *dp,
  137. struct page **p, unsigned long *len, unsigned *offset)
  138. {
  139. unsigned o = dp->context_u;
  140. struct page_list *pl = (struct page_list *) dp->context_ptr;
  141. *p = pl->page;
  142. *len = PAGE_SIZE - o;
  143. *offset = o;
  144. }
  145. static void list_next_page(struct dpages *dp)
  146. {
  147. struct page_list *pl = (struct page_list *) dp->context_ptr;
  148. dp->context_ptr = pl->next;
  149. dp->context_u = 0;
  150. }
  151. static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset)
  152. {
  153. dp->get_page = list_get_page;
  154. dp->next_page = list_next_page;
  155. dp->context_u = offset;
  156. dp->context_ptr = pl;
  157. }
  158. /*
  159. * Functions for getting the pages from a bvec.
  160. */
  161. static void bvec_get_page(struct dpages *dp,
  162. struct page **p, unsigned long *len, unsigned *offset)
  163. {
  164. struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr;
  165. *p = bvec->bv_page;
  166. *len = bvec->bv_len;
  167. *offset = bvec->bv_offset;
  168. }
  169. static void bvec_next_page(struct dpages *dp)
  170. {
  171. struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr;
  172. dp->context_ptr = bvec + 1;
  173. }
  174. static void bvec_dp_init(struct dpages *dp, struct bio_vec *bvec)
  175. {
  176. dp->get_page = bvec_get_page;
  177. dp->next_page = bvec_next_page;
  178. dp->context_ptr = bvec;
  179. }
  180. /*
  181. * Functions for getting the pages from a VMA.
  182. */
  183. static void vm_get_page(struct dpages *dp,
  184. struct page **p, unsigned long *len, unsigned *offset)
  185. {
  186. *p = vmalloc_to_page(dp->context_ptr);
  187. *offset = dp->context_u;
  188. *len = PAGE_SIZE - dp->context_u;
  189. }
  190. static void vm_next_page(struct dpages *dp)
  191. {
  192. dp->context_ptr += PAGE_SIZE - dp->context_u;
  193. dp->context_u = 0;
  194. }
  195. static void vm_dp_init(struct dpages *dp, void *data)
  196. {
  197. dp->get_page = vm_get_page;
  198. dp->next_page = vm_next_page;
  199. dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
  200. dp->context_ptr = data;
  201. }
  202. static void dm_bio_destructor(struct bio *bio)
  203. {
  204. unsigned region;
  205. struct io *io;
  206. retrieve_io_and_region_from_bio(bio, &io, &region);
  207. bio_free(bio, io->client->bios);
  208. }
  209. /*
  210. * Functions for getting the pages from kernel memory.
  211. */
  212. static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len,
  213. unsigned *offset)
  214. {
  215. *p = virt_to_page(dp->context_ptr);
  216. *offset = dp->context_u;
  217. *len = PAGE_SIZE - dp->context_u;
  218. }
  219. static void km_next_page(struct dpages *dp)
  220. {
  221. dp->context_ptr += PAGE_SIZE - dp->context_u;
  222. dp->context_u = 0;
  223. }
  224. static void km_dp_init(struct dpages *dp, void *data)
  225. {
  226. dp->get_page = km_get_page;
  227. dp->next_page = km_next_page;
  228. dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
  229. dp->context_ptr = data;
  230. }
  231. /*-----------------------------------------------------------------
  232. * IO routines that accept a list of pages.
  233. *---------------------------------------------------------------*/
  234. static void do_region(int rw, unsigned region, struct dm_io_region *where,
  235. struct dpages *dp, struct io *io)
  236. {
  237. struct bio *bio;
  238. struct page *page;
  239. unsigned long len;
  240. unsigned offset;
  241. unsigned num_bvecs;
  242. sector_t remaining = where->count;
  243. /*
  244. * where->count may be zero if rw holds a flush and we need to
  245. * send a zero-sized flush.
  246. */
  247. do {
  248. /*
  249. * Allocate a suitably sized-bio.
  250. */
  251. num_bvecs = dm_sector_div_up(remaining,
  252. (PAGE_SIZE >> SECTOR_SHIFT));
  253. num_bvecs = min_t(int, bio_get_nr_vecs(where->bdev), num_bvecs);
  254. bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
  255. bio->bi_sector = where->sector + (where->count - remaining);
  256. bio->bi_bdev = where->bdev;
  257. bio->bi_end_io = endio;
  258. bio->bi_destructor = dm_bio_destructor;
  259. store_io_and_region_in_bio(bio, io, region);
  260. /*
  261. * Try and add as many pages as possible.
  262. */
  263. while (remaining) {
  264. dp->get_page(dp, &page, &len, &offset);
  265. len = min(len, to_bytes(remaining));
  266. if (!bio_add_page(bio, page, len, offset))
  267. break;
  268. offset = 0;
  269. remaining -= to_sector(len);
  270. dp->next_page(dp);
  271. }
  272. atomic_inc(&io->count);
  273. submit_bio(rw, bio);
  274. } while (remaining);
  275. }
  276. static void dispatch_io(int rw, unsigned int num_regions,
  277. struct dm_io_region *where, struct dpages *dp,
  278. struct io *io, int sync)
  279. {
  280. int i;
  281. struct dpages old_pages = *dp;
  282. BUG_ON(num_regions > DM_IO_MAX_REGIONS);
  283. if (sync)
  284. rw |= REQ_SYNC;
  285. /*
  286. * For multiple regions we need to be careful to rewind
  287. * the dp object for each call to do_region.
  288. */
  289. for (i = 0; i < num_regions; i++) {
  290. *dp = old_pages;
  291. if (where[i].count || (rw & REQ_FLUSH))
  292. do_region(rw, i, where + i, dp, io);
  293. }
  294. /*
  295. * Drop the extra reference that we were holding to avoid
  296. * the io being completed too early.
  297. */
  298. dec_count(io, 0, 0);
  299. }
  300. static int sync_io(struct dm_io_client *client, unsigned int num_regions,
  301. struct dm_io_region *where, int rw, struct dpages *dp,
  302. unsigned long *error_bits)
  303. {
  304. /*
  305. * gcc <= 4.3 can't do the alignment for stack variables, so we must
  306. * align it on our own.
  307. * volatile prevents the optimizer from removing or reusing
  308. * "io_" field from the stack frame (allowed in ANSI C).
  309. */
  310. volatile char io_[sizeof(struct io) + __alignof__(struct io) - 1];
  311. struct io *io = (struct io *)PTR_ALIGN(&io_, __alignof__(struct io));
  312. if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
  313. WARN_ON(1);
  314. return -EIO;
  315. }
  316. io->error_bits = 0;
  317. atomic_set(&io->count, 1); /* see dispatch_io() */
  318. io->sleeper = current;
  319. io->client = client;
  320. dispatch_io(rw, num_regions, where, dp, io, 1);
  321. while (1) {
  322. set_current_state(TASK_UNINTERRUPTIBLE);
  323. if (!atomic_read(&io->count))
  324. break;
  325. io_schedule();
  326. }
  327. set_current_state(TASK_RUNNING);
  328. if (error_bits)
  329. *error_bits = io->error_bits;
  330. return io->error_bits ? -EIO : 0;
  331. }
  332. static int async_io(struct dm_io_client *client, unsigned int num_regions,
  333. struct dm_io_region *where, int rw, struct dpages *dp,
  334. io_notify_fn fn, void *context)
  335. {
  336. struct io *io;
  337. if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
  338. WARN_ON(1);
  339. fn(1, context);
  340. return -EIO;
  341. }
  342. io = mempool_alloc(client->pool, GFP_NOIO);
  343. io->error_bits = 0;
  344. atomic_set(&io->count, 1); /* see dispatch_io() */
  345. io->sleeper = NULL;
  346. io->client = client;
  347. io->callback = fn;
  348. io->context = context;
  349. dispatch_io(rw, num_regions, where, dp, io, 0);
  350. return 0;
  351. }
  352. static int dp_init(struct dm_io_request *io_req, struct dpages *dp)
  353. {
  354. /* Set up dpages based on memory type */
  355. switch (io_req->mem.type) {
  356. case DM_IO_PAGE_LIST:
  357. list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
  358. break;
  359. case DM_IO_BVEC:
  360. bvec_dp_init(dp, io_req->mem.ptr.bvec);
  361. break;
  362. case DM_IO_VMA:
  363. vm_dp_init(dp, io_req->mem.ptr.vma);
  364. break;
  365. case DM_IO_KMEM:
  366. km_dp_init(dp, io_req->mem.ptr.addr);
  367. break;
  368. default:
  369. return -EINVAL;
  370. }
  371. return 0;
  372. }
  373. /*
  374. * New collapsed (a)synchronous interface.
  375. *
  376. * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug
  377. * the queue with blk_unplug() some time later or set REQ_SYNC in
  378. io_req->bi_rw. If you fail to do one of these, the IO will be submitted to
  379. * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c.
  380. */
  381. int dm_io(struct dm_io_request *io_req, unsigned num_regions,
  382. struct dm_io_region *where, unsigned long *sync_error_bits)
  383. {
  384. int r;
  385. struct dpages dp;
  386. r = dp_init(io_req, &dp);
  387. if (r)
  388. return r;
  389. if (!io_req->notify.fn)
  390. return sync_io(io_req->client, num_regions, where,
  391. io_req->bi_rw, &dp, sync_error_bits);
  392. return async_io(io_req->client, num_regions, where, io_req->bi_rw,
  393. &dp, io_req->notify.fn, io_req->notify.context);
  394. }
  395. EXPORT_SYMBOL(dm_io);
  396. int __init dm_io_init(void)
  397. {
  398. _dm_io_cache = KMEM_CACHE(io, 0);
  399. if (!_dm_io_cache)
  400. return -ENOMEM;
  401. return 0;
  402. }
  403. void dm_io_exit(void)
  404. {
  405. kmem_cache_destroy(_dm_io_cache);
  406. _dm_io_cache = NULL;
  407. }