dm-io.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461
  1. /*
  2. * Copyright (C) 2003 Sistina Software
  3. * Copyright (C) 2006 Red Hat GmbH
  4. *
  5. * This file is released under the GPL.
  6. */
  7. #include "dm-io.h"
  8. #include <linux/bio.h>
  9. #include <linux/mempool.h>
  10. #include <linux/module.h>
  11. #include <linux/sched.h>
  12. #include <linux/slab.h>
  13. static struct bio_set *_bios;
  14. struct dm_io_client {
  15. mempool_t *pool;
  16. struct bio_set *bios;
  17. };
  18. /* FIXME: can we shrink this ? */
  19. struct io {
  20. unsigned long error;
  21. atomic_t count;
  22. struct task_struct *sleeper;
  23. struct dm_io_client *client;
  24. io_notify_fn callback;
  25. void *context;
  26. };
  27. /*
  28. * io contexts are only dynamically allocated for asynchronous
  29. * io. Since async io is likely to be the majority of io we'll
  30. * have the same number of io contexts as bios! (FIXME: must reduce this).
  31. */
  32. static unsigned _num_ios;
  33. static mempool_t *_io_pool;
  34. /*
  35. * Temporary functions to allow old and new interfaces to co-exist.
  36. */
  37. static struct bio_set *bios(struct dm_io_client *client)
  38. {
  39. return client ? client->bios : _bios;
  40. }
  41. static mempool_t *io_pool(struct dm_io_client *client)
  42. {
  43. return client ? client->pool : _io_pool;
  44. }
  45. static unsigned int pages_to_ios(unsigned int pages)
  46. {
  47. return 4 * pages; /* too many ? */
  48. }
  49. static int resize_pool(unsigned int new_ios)
  50. {
  51. int r = 0;
  52. if (_io_pool) {
  53. if (new_ios == 0) {
  54. /* free off the pool */
  55. mempool_destroy(_io_pool);
  56. _io_pool = NULL;
  57. bioset_free(_bios);
  58. } else {
  59. /* resize the pool */
  60. r = mempool_resize(_io_pool, new_ios, GFP_KERNEL);
  61. }
  62. } else {
  63. /* create new pool */
  64. _io_pool = mempool_create_kmalloc_pool(new_ios,
  65. sizeof(struct io));
  66. if (!_io_pool)
  67. return -ENOMEM;
  68. _bios = bioset_create(16, 16);
  69. if (!_bios) {
  70. mempool_destroy(_io_pool);
  71. _io_pool = NULL;
  72. return -ENOMEM;
  73. }
  74. }
  75. if (!r)
  76. _num_ios = new_ios;
  77. return r;
  78. }
  79. int dm_io_get(unsigned int num_pages)
  80. {
  81. return resize_pool(_num_ios + pages_to_ios(num_pages));
  82. }
  83. void dm_io_put(unsigned int num_pages)
  84. {
  85. resize_pool(_num_ios - pages_to_ios(num_pages));
  86. }
  87. /*-----------------------------------------------------------------
  88. * We need to keep track of which region a bio is doing io for.
  89. * In order to save a memory allocation we store this the last
  90. * bvec which we know is unused (blech).
  91. * XXX This is ugly and can OOPS with some configs... find another way.
  92. *---------------------------------------------------------------*/
  93. static inline void bio_set_region(struct bio *bio, unsigned region)
  94. {
  95. bio->bi_io_vec[bio->bi_max_vecs].bv_len = region;
  96. }
  97. static inline unsigned bio_get_region(struct bio *bio)
  98. {
  99. return bio->bi_io_vec[bio->bi_max_vecs].bv_len;
  100. }
  101. /*-----------------------------------------------------------------
  102. * We need an io object to keep track of the number of bios that
  103. * have been dispatched for a particular io.
  104. *---------------------------------------------------------------*/
  105. static void dec_count(struct io *io, unsigned int region, int error)
  106. {
  107. if (error)
  108. set_bit(region, &io->error);
  109. if (atomic_dec_and_test(&io->count)) {
  110. if (io->sleeper)
  111. wake_up_process(io->sleeper);
  112. else {
  113. int r = io->error;
  114. io_notify_fn fn = io->callback;
  115. void *context = io->context;
  116. mempool_free(io, io_pool(io->client));
  117. fn(r, context);
  118. }
  119. }
  120. }
  121. static int endio(struct bio *bio, unsigned int done, int error)
  122. {
  123. struct io *io;
  124. unsigned region;
  125. /* keep going until we've finished */
  126. if (bio->bi_size)
  127. return 1;
  128. if (error && bio_data_dir(bio) == READ)
  129. zero_fill_bio(bio);
  130. /*
  131. * The bio destructor in bio_put() may use the io object.
  132. */
  133. io = bio->bi_private;
  134. region = bio_get_region(bio);
  135. bio->bi_max_vecs++;
  136. bio_put(bio);
  137. dec_count(io, region, error);
  138. return 0;
  139. }
  140. /*-----------------------------------------------------------------
  141. * These little objects provide an abstraction for getting a new
  142. * destination page for io.
  143. *---------------------------------------------------------------*/
  144. struct dpages {
  145. void (*get_page)(struct dpages *dp,
  146. struct page **p, unsigned long *len, unsigned *offset);
  147. void (*next_page)(struct dpages *dp);
  148. unsigned context_u;
  149. void *context_ptr;
  150. };
  151. /*
  152. * Functions for getting the pages from a list.
  153. */
  154. static void list_get_page(struct dpages *dp,
  155. struct page **p, unsigned long *len, unsigned *offset)
  156. {
  157. unsigned o = dp->context_u;
  158. struct page_list *pl = (struct page_list *) dp->context_ptr;
  159. *p = pl->page;
  160. *len = PAGE_SIZE - o;
  161. *offset = o;
  162. }
  163. static void list_next_page(struct dpages *dp)
  164. {
  165. struct page_list *pl = (struct page_list *) dp->context_ptr;
  166. dp->context_ptr = pl->next;
  167. dp->context_u = 0;
  168. }
  169. static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset)
  170. {
  171. dp->get_page = list_get_page;
  172. dp->next_page = list_next_page;
  173. dp->context_u = offset;
  174. dp->context_ptr = pl;
  175. }
  176. /*
  177. * Functions for getting the pages from a bvec.
  178. */
  179. static void bvec_get_page(struct dpages *dp,
  180. struct page **p, unsigned long *len, unsigned *offset)
  181. {
  182. struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr;
  183. *p = bvec->bv_page;
  184. *len = bvec->bv_len;
  185. *offset = bvec->bv_offset;
  186. }
  187. static void bvec_next_page(struct dpages *dp)
  188. {
  189. struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr;
  190. dp->context_ptr = bvec + 1;
  191. }
  192. static void bvec_dp_init(struct dpages *dp, struct bio_vec *bvec)
  193. {
  194. dp->get_page = bvec_get_page;
  195. dp->next_page = bvec_next_page;
  196. dp->context_ptr = bvec;
  197. }
  198. static void vm_get_page(struct dpages *dp,
  199. struct page **p, unsigned long *len, unsigned *offset)
  200. {
  201. *p = vmalloc_to_page(dp->context_ptr);
  202. *offset = dp->context_u;
  203. *len = PAGE_SIZE - dp->context_u;
  204. }
  205. static void vm_next_page(struct dpages *dp)
  206. {
  207. dp->context_ptr += PAGE_SIZE - dp->context_u;
  208. dp->context_u = 0;
  209. }
  210. static void vm_dp_init(struct dpages *dp, void *data)
  211. {
  212. dp->get_page = vm_get_page;
  213. dp->next_page = vm_next_page;
  214. dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
  215. dp->context_ptr = data;
  216. }
  217. static void dm_bio_destructor(struct bio *bio)
  218. {
  219. struct io *io = bio->bi_private;
  220. bio_free(bio, bios(io->client));
  221. }
  222. /*-----------------------------------------------------------------
  223. * IO routines that accept a list of pages.
  224. *---------------------------------------------------------------*/
  225. static void do_region(int rw, unsigned int region, struct io_region *where,
  226. struct dpages *dp, struct io *io)
  227. {
  228. struct bio *bio;
  229. struct page *page;
  230. unsigned long len;
  231. unsigned offset;
  232. unsigned num_bvecs;
  233. sector_t remaining = where->count;
  234. while (remaining) {
  235. /*
  236. * Allocate a suitably sized-bio: we add an extra
  237. * bvec for bio_get/set_region() and decrement bi_max_vecs
  238. * to hide it from bio_add_page().
  239. */
  240. num_bvecs = (remaining / (PAGE_SIZE >> SECTOR_SHIFT)) + 2;
  241. bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, bios(io->client));
  242. bio->bi_sector = where->sector + (where->count - remaining);
  243. bio->bi_bdev = where->bdev;
  244. bio->bi_end_io = endio;
  245. bio->bi_private = io;
  246. bio->bi_destructor = dm_bio_destructor;
  247. bio->bi_max_vecs--;
  248. bio_set_region(bio, region);
  249. /*
  250. * Try and add as many pages as possible.
  251. */
  252. while (remaining) {
  253. dp->get_page(dp, &page, &len, &offset);
  254. len = min(len, to_bytes(remaining));
  255. if (!bio_add_page(bio, page, len, offset))
  256. break;
  257. offset = 0;
  258. remaining -= to_sector(len);
  259. dp->next_page(dp);
  260. }
  261. atomic_inc(&io->count);
  262. submit_bio(rw, bio);
  263. }
  264. }
  265. static void dispatch_io(int rw, unsigned int num_regions,
  266. struct io_region *where, struct dpages *dp,
  267. struct io *io, int sync)
  268. {
  269. int i;
  270. struct dpages old_pages = *dp;
  271. if (sync)
  272. rw |= (1 << BIO_RW_SYNC);
  273. /*
  274. * For multiple regions we need to be careful to rewind
  275. * the dp object for each call to do_region.
  276. */
  277. for (i = 0; i < num_regions; i++) {
  278. *dp = old_pages;
  279. if (where[i].count)
  280. do_region(rw, i, where + i, dp, io);
  281. }
  282. /*
  283. * Drop the extra reference that we were holding to avoid
  284. * the io being completed too early.
  285. */
  286. dec_count(io, 0, 0);
  287. }
  288. static int sync_io(struct dm_io_client *client, unsigned int num_regions,
  289. struct io_region *where, int rw, struct dpages *dp,
  290. unsigned long *error_bits)
  291. {
  292. struct io io;
  293. if (num_regions > 1 && rw != WRITE) {
  294. WARN_ON(1);
  295. return -EIO;
  296. }
  297. io.error = 0;
  298. atomic_set(&io.count, 1); /* see dispatch_io() */
  299. io.sleeper = current;
  300. io.client = client;
  301. dispatch_io(rw, num_regions, where, dp, &io, 1);
  302. while (1) {
  303. set_current_state(TASK_UNINTERRUPTIBLE);
  304. if (!atomic_read(&io.count) || signal_pending(current))
  305. break;
  306. io_schedule();
  307. }
  308. set_current_state(TASK_RUNNING);
  309. if (atomic_read(&io.count))
  310. return -EINTR;
  311. if (error_bits)
  312. *error_bits = io.error;
  313. return io.error ? -EIO : 0;
  314. }
  315. static int async_io(struct dm_io_client *client, unsigned int num_regions,
  316. struct io_region *where, int rw, struct dpages *dp,
  317. io_notify_fn fn, void *context)
  318. {
  319. struct io *io;
  320. if (num_regions > 1 && rw != WRITE) {
  321. WARN_ON(1);
  322. fn(1, context);
  323. return -EIO;
  324. }
  325. io = mempool_alloc(io_pool(client), GFP_NOIO);
  326. io->error = 0;
  327. atomic_set(&io->count, 1); /* see dispatch_io() */
  328. io->sleeper = NULL;
  329. io->client = client;
  330. io->callback = fn;
  331. io->context = context;
  332. dispatch_io(rw, num_regions, where, dp, io, 0);
  333. return 0;
  334. }
  335. int dm_io_sync(unsigned int num_regions, struct io_region *where, int rw,
  336. struct page_list *pl, unsigned int offset,
  337. unsigned long *error_bits)
  338. {
  339. struct dpages dp;
  340. list_dp_init(&dp, pl, offset);
  341. return sync_io(NULL, num_regions, where, rw, &dp, error_bits);
  342. }
  343. int dm_io_sync_bvec(unsigned int num_regions, struct io_region *where, int rw,
  344. struct bio_vec *bvec, unsigned long *error_bits)
  345. {
  346. struct dpages dp;
  347. bvec_dp_init(&dp, bvec);
  348. return sync_io(NULL, num_regions, where, rw, &dp, error_bits);
  349. }
  350. int dm_io_sync_vm(unsigned int num_regions, struct io_region *where, int rw,
  351. void *data, unsigned long *error_bits)
  352. {
  353. struct dpages dp;
  354. vm_dp_init(&dp, data);
  355. return sync_io(NULL, num_regions, where, rw, &dp, error_bits);
  356. }
  357. int dm_io_async(unsigned int num_regions, struct io_region *where, int rw,
  358. struct page_list *pl, unsigned int offset,
  359. io_notify_fn fn, void *context)
  360. {
  361. struct dpages dp;
  362. list_dp_init(&dp, pl, offset);
  363. return async_io(NULL, num_regions, where, rw, &dp, fn, context);
  364. }
  365. int dm_io_async_bvec(unsigned int num_regions, struct io_region *where, int rw,
  366. struct bio_vec *bvec, io_notify_fn fn, void *context)
  367. {
  368. struct dpages dp;
  369. bvec_dp_init(&dp, bvec);
  370. return async_io(NULL, num_regions, where, rw, &dp, fn, context);
  371. }
  372. int dm_io_async_vm(unsigned int num_regions, struct io_region *where, int rw,
  373. void *data, io_notify_fn fn, void *context)
  374. {
  375. struct dpages dp;
  376. vm_dp_init(&dp, data);
  377. return async_io(NULL, num_regions, where, rw, &dp, fn, context);
  378. }
  379. EXPORT_SYMBOL(dm_io_get);
  380. EXPORT_SYMBOL(dm_io_put);
  381. EXPORT_SYMBOL(dm_io_sync);
  382. EXPORT_SYMBOL(dm_io_async);
  383. EXPORT_SYMBOL(dm_io_sync_bvec);
  384. EXPORT_SYMBOL(dm_io_async_bvec);
  385. EXPORT_SYMBOL(dm_io_sync_vm);
  386. EXPORT_SYMBOL(dm_io_async_vm);