blk-lib.c 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205
  1. /*
  2. * Functions related to generic helpers functions
  3. */
  4. #include <linux/kernel.h>
  5. #include <linux/module.h>
  6. #include <linux/bio.h>
  7. #include <linux/blkdev.h>
  8. #include <linux/scatterlist.h>
  9. #include "blk.h"
  10. static void blkdev_discard_end_io(struct bio *bio, int err)
  11. {
  12. if (err) {
  13. if (err == -EOPNOTSUPP)
  14. set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
  15. clear_bit(BIO_UPTODATE, &bio->bi_flags);
  16. }
  17. if (bio->bi_private)
  18. complete(bio->bi_private);
  19. bio_put(bio);
  20. }
  21. /**
  22. * blkdev_issue_discard - queue a discard
  23. * @bdev: blockdev to issue discard for
  24. * @sector: start sector
  25. * @nr_sects: number of sectors to discard
  26. * @gfp_mask: memory allocation flags (for bio_alloc)
  27. * @flags: BLKDEV_IFL_* flags to control behaviour
  28. *
  29. * Description:
  30. * Issue a discard request for the sectors in question.
  31. */
  32. int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
  33. sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
  34. {
  35. DECLARE_COMPLETION_ONSTACK(wait);
  36. struct request_queue *q = bdev_get_queue(bdev);
  37. int type = REQ_WRITE | REQ_DISCARD;
  38. unsigned int max_discard_sectors;
  39. struct bio *bio;
  40. int ret = 0;
  41. if (!q)
  42. return -ENXIO;
  43. if (!blk_queue_discard(q))
  44. return -EOPNOTSUPP;
  45. /*
  46. * Ensure that max_discard_sectors is of the proper
  47. * granularity
  48. */
  49. max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
  50. if (q->limits.discard_granularity) {
  51. unsigned int disc_sects = q->limits.discard_granularity >> 9;
  52. max_discard_sectors &= ~(disc_sects - 1);
  53. }
  54. if (flags & BLKDEV_DISCARD_SECURE) {
  55. if (!blk_queue_secdiscard(q))
  56. return -EOPNOTSUPP;
  57. type |= REQ_SECURE;
  58. }
  59. while (nr_sects && !ret) {
  60. bio = bio_alloc(gfp_mask, 1);
  61. if (!bio) {
  62. ret = -ENOMEM;
  63. break;
  64. }
  65. bio->bi_sector = sector;
  66. bio->bi_end_io = blkdev_discard_end_io;
  67. bio->bi_bdev = bdev;
  68. bio->bi_private = &wait;
  69. if (nr_sects > max_discard_sectors) {
  70. bio->bi_size = max_discard_sectors << 9;
  71. nr_sects -= max_discard_sectors;
  72. sector += max_discard_sectors;
  73. } else {
  74. bio->bi_size = nr_sects << 9;
  75. nr_sects = 0;
  76. }
  77. bio_get(bio);
  78. submit_bio(type, bio);
  79. wait_for_completion(&wait);
  80. if (bio_flagged(bio, BIO_EOPNOTSUPP))
  81. ret = -EOPNOTSUPP;
  82. else if (!bio_flagged(bio, BIO_UPTODATE))
  83. ret = -EIO;
  84. bio_put(bio);
  85. }
  86. return ret;
  87. }
  88. EXPORT_SYMBOL(blkdev_issue_discard);
  89. struct bio_batch
  90. {
  91. atomic_t done;
  92. unsigned long flags;
  93. struct completion *wait;
  94. };
  95. static void bio_batch_end_io(struct bio *bio, int err)
  96. {
  97. struct bio_batch *bb = bio->bi_private;
  98. if (err) {
  99. if (err == -EOPNOTSUPP)
  100. set_bit(BIO_EOPNOTSUPP, &bb->flags);
  101. else
  102. clear_bit(BIO_UPTODATE, &bb->flags);
  103. }
  104. if (bb)
  105. if (atomic_dec_and_test(&bb->done))
  106. complete(bb->wait);
  107. bio_put(bio);
  108. }
  109. /**
  110. * blkdev_issue_zeroout - generate number of zero filed write bios
  111. * @bdev: blockdev to issue
  112. * @sector: start sector
  113. * @nr_sects: number of sectors to write
  114. * @gfp_mask: memory allocation flags (for bio_alloc)
  115. *
  116. * Description:
  117. * Generate and issue number of bios with zerofiled pages.
  118. */
  119. int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
  120. sector_t nr_sects, gfp_t gfp_mask)
  121. {
  122. int ret;
  123. struct bio *bio;
  124. struct bio_batch bb;
  125. unsigned int sz;
  126. DECLARE_COMPLETION_ONSTACK(wait);
  127. atomic_set(&bb.done, 1);
  128. bb.flags = 1 << BIO_UPTODATE;
  129. bb.wait = &wait;
  130. submit:
  131. ret = 0;
  132. while (nr_sects != 0) {
  133. bio = bio_alloc(gfp_mask,
  134. min(nr_sects, (sector_t)BIO_MAX_PAGES));
  135. if (!bio) {
  136. ret = -ENOMEM;
  137. break;
  138. }
  139. bio->bi_sector = sector;
  140. bio->bi_bdev = bdev;
  141. bio->bi_end_io = bio_batch_end_io;
  142. bio->bi_private = &bb;
  143. while (nr_sects != 0) {
  144. sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
  145. if (sz == 0)
  146. /* bio has maximum size possible */
  147. break;
  148. ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0);
  149. nr_sects -= ret >> 9;
  150. sector += ret >> 9;
  151. if (ret < (sz << 9))
  152. break;
  153. }
  154. ret = 0;
  155. atomic_inc(&bb.done);
  156. submit_bio(WRITE, bio);
  157. }
  158. /* Wait for bios in-flight */
  159. if (!atomic_dec_and_test(&bb.done))
  160. wait_for_completion(&wait);
  161. if (!test_bit(BIO_UPTODATE, &bb.flags))
  162. /* One of bios in the batch was completed with error.*/
  163. ret = -EIO;
  164. if (ret)
  165. goto out;
  166. if (test_bit(BIO_EOPNOTSUPP, &bb.flags)) {
  167. ret = -EOPNOTSUPP;
  168. goto out;
  169. }
  170. if (nr_sects != 0)
  171. goto submit;
  172. out:
  173. return ret;
  174. }
  175. EXPORT_SYMBOL(blkdev_issue_zeroout);