blk-lib.c 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233
  1. /*
  2. * Functions related to generic helpers functions
  3. */
  4. #include <linux/kernel.h>
  5. #include <linux/module.h>
  6. #include <linux/bio.h>
  7. #include <linux/blkdev.h>
  8. #include <linux/scatterlist.h>
  9. #include "blk.h"
  10. static void blkdev_discard_end_io(struct bio *bio, int err)
  11. {
  12. if (err) {
  13. if (err == -EOPNOTSUPP)
  14. set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
  15. clear_bit(BIO_UPTODATE, &bio->bi_flags);
  16. }
  17. if (bio->bi_private)
  18. complete(bio->bi_private);
  19. __free_page(bio_page(bio));
  20. bio_put(bio);
  21. }
  22. /**
  23. * blkdev_issue_discard - queue a discard
  24. * @bdev: blockdev to issue discard for
  25. * @sector: start sector
  26. * @nr_sects: number of sectors to discard
  27. * @gfp_mask: memory allocation flags (for bio_alloc)
  28. * @flags: BLKDEV_IFL_* flags to control behaviour
  29. *
  30. * Description:
  31. * Issue a discard request for the sectors in question.
  32. */
  33. int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
  34. sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
  35. {
  36. DECLARE_COMPLETION_ONSTACK(wait);
  37. struct request_queue *q = bdev_get_queue(bdev);
  38. int type = flags & BLKDEV_IFL_BARRIER ?
  39. DISCARD_BARRIER : DISCARD_NOBARRIER;
  40. struct bio *bio;
  41. struct page *page;
  42. int ret = 0;
  43. if (!q)
  44. return -ENXIO;
  45. if (!blk_queue_discard(q))
  46. return -EOPNOTSUPP;
  47. while (nr_sects && !ret) {
  48. unsigned int sector_size = q->limits.logical_block_size;
  49. unsigned int max_discard_sectors =
  50. min(q->limits.max_discard_sectors, UINT_MAX >> 9);
  51. bio = bio_alloc(gfp_mask, 1);
  52. if (!bio)
  53. goto out;
  54. bio->bi_sector = sector;
  55. bio->bi_end_io = blkdev_discard_end_io;
  56. bio->bi_bdev = bdev;
  57. if (flags & BLKDEV_IFL_WAIT)
  58. bio->bi_private = &wait;
  59. /*
  60. * Add a zeroed one-sector payload as that's what
  61. * our current implementations need. If we'll ever need
  62. * more the interface will need revisiting.
  63. */
  64. page = alloc_page(gfp_mask | __GFP_ZERO);
  65. if (!page)
  66. goto out_free_bio;
  67. if (bio_add_pc_page(q, bio, page, sector_size, 0) < sector_size)
  68. goto out_free_page;
  69. /*
  70. * And override the bio size - the way discard works we
  71. * touch many more blocks on disk than the actual payload
  72. * length.
  73. */
  74. if (nr_sects > max_discard_sectors) {
  75. bio->bi_size = max_discard_sectors << 9;
  76. nr_sects -= max_discard_sectors;
  77. sector += max_discard_sectors;
  78. } else {
  79. bio->bi_size = nr_sects << 9;
  80. nr_sects = 0;
  81. }
  82. bio_get(bio);
  83. submit_bio(type, bio);
  84. if (flags & BLKDEV_IFL_WAIT)
  85. wait_for_completion(&wait);
  86. if (bio_flagged(bio, BIO_EOPNOTSUPP))
  87. ret = -EOPNOTSUPP;
  88. else if (!bio_flagged(bio, BIO_UPTODATE))
  89. ret = -EIO;
  90. bio_put(bio);
  91. }
  92. return ret;
  93. out_free_page:
  94. __free_page(page);
  95. out_free_bio:
  96. bio_put(bio);
  97. out:
  98. return -ENOMEM;
  99. }
  100. EXPORT_SYMBOL(blkdev_issue_discard);
  101. struct bio_batch
  102. {
  103. atomic_t done;
  104. unsigned long flags;
  105. struct completion *wait;
  106. bio_end_io_t *end_io;
  107. };
  108. static void bio_batch_end_io(struct bio *bio, int err)
  109. {
  110. struct bio_batch *bb = bio->bi_private;
  111. if (err) {
  112. if (err == -EOPNOTSUPP)
  113. set_bit(BIO_EOPNOTSUPP, &bb->flags);
  114. else
  115. clear_bit(BIO_UPTODATE, &bb->flags);
  116. }
  117. if (bb) {
  118. if (bb->end_io)
  119. bb->end_io(bio, err);
  120. atomic_inc(&bb->done);
  121. complete(bb->wait);
  122. }
  123. bio_put(bio);
  124. }
  125. /**
  126. * blkdev_issue_zeroout generate number of zero filed write bios
  127. * @bdev: blockdev to issue
  128. * @sector: start sector
  129. * @nr_sects: number of sectors to write
  130. * @gfp_mask: memory allocation flags (for bio_alloc)
  131. * @flags: BLKDEV_IFL_* flags to control behaviour
  132. *
  133. * Description:
  134. * Generate and issue number of bios with zerofiled pages.
  135. * Send barrier at the beginning and at the end if requested. This guarantie
  136. * correct request ordering. Empty barrier allow us to avoid post queue flush.
  137. */
  138. int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
  139. sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
  140. {
  141. int ret = 0;
  142. struct bio *bio;
  143. struct bio_batch bb;
  144. unsigned int sz, issued = 0;
  145. DECLARE_COMPLETION_ONSTACK(wait);
  146. atomic_set(&bb.done, 0);
  147. bb.flags = 1 << BIO_UPTODATE;
  148. bb.wait = &wait;
  149. bb.end_io = NULL;
  150. if (flags & BLKDEV_IFL_BARRIER) {
  151. /* issue async barrier before the data */
  152. ret = blkdev_issue_flush(bdev, gfp_mask, NULL, 0);
  153. if (ret)
  154. return ret;
  155. }
  156. submit:
  157. while (nr_sects != 0) {
  158. bio = bio_alloc(gfp_mask,
  159. min(nr_sects, (sector_t)BIO_MAX_PAGES));
  160. if (!bio)
  161. break;
  162. bio->bi_sector = sector;
  163. bio->bi_bdev = bdev;
  164. bio->bi_end_io = bio_batch_end_io;
  165. if (flags & BLKDEV_IFL_WAIT)
  166. bio->bi_private = &bb;
  167. while (nr_sects != 0) {
  168. sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
  169. if (sz == 0)
  170. /* bio has maximum size possible */
  171. break;
  172. ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0);
  173. nr_sects -= ret >> 9;
  174. sector += ret >> 9;
  175. if (ret < (sz << 9))
  176. break;
  177. }
  178. issued++;
  179. submit_bio(WRITE, bio);
  180. }
  181. /*
  182. * When all data bios are in flight. Send final barrier if requeted.
  183. */
  184. if (nr_sects == 0 && flags & BLKDEV_IFL_BARRIER)
  185. ret = blkdev_issue_flush(bdev, gfp_mask, NULL,
  186. flags & BLKDEV_IFL_WAIT);
  187. if (flags & BLKDEV_IFL_WAIT)
  188. /* Wait for bios in-flight */
  189. while ( issued != atomic_read(&bb.done))
  190. wait_for_completion(&wait);
  191. if (!test_bit(BIO_UPTODATE, &bb.flags))
  192. /* One of bios in the batch was completed with error.*/
  193. ret = -EIO;
  194. if (ret)
  195. goto out;
  196. if (test_bit(BIO_EOPNOTSUPP, &bb.flags)) {
  197. ret = -EOPNOTSUPP;
  198. goto out;
  199. }
  200. if (nr_sects != 0)
  201. goto submit;
  202. out:
  203. return ret;
  204. }
  205. EXPORT_SYMBOL(blkdev_issue_zeroout);