blk-lib.c 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298
  1. /*
  2. * Functions related to generic helpers functions
  3. */
  4. #include <linux/kernel.h>
  5. #include <linux/module.h>
  6. #include <linux/bio.h>
  7. #include <linux/blkdev.h>
  8. #include <linux/scatterlist.h>
  9. #include "blk.h"
  10. struct bio_batch {
  11. atomic_t done;
  12. unsigned long flags;
  13. struct completion *wait;
  14. };
  15. static void bio_batch_end_io(struct bio *bio, int err)
  16. {
  17. struct bio_batch *bb = bio->bi_private;
  18. if (err && (err != -EOPNOTSUPP))
  19. clear_bit(BIO_UPTODATE, &bb->flags);
  20. if (atomic_dec_and_test(&bb->done))
  21. complete(bb->wait);
  22. bio_put(bio);
  23. }
  24. /**
  25. * blkdev_issue_discard - queue a discard
  26. * @bdev: blockdev to issue discard for
  27. * @sector: start sector
  28. * @nr_sects: number of sectors to discard
  29. * @gfp_mask: memory allocation flags (for bio_alloc)
  30. * @flags: BLKDEV_IFL_* flags to control behaviour
  31. *
  32. * Description:
  33. * Issue a discard request for the sectors in question.
  34. */
  35. int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
  36. sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
  37. {
  38. DECLARE_COMPLETION_ONSTACK(wait);
  39. struct request_queue *q = bdev_get_queue(bdev);
  40. int type = REQ_WRITE | REQ_DISCARD;
  41. sector_t max_discard_sectors;
  42. sector_t granularity, alignment;
  43. struct bio_batch bb;
  44. struct bio *bio;
  45. int ret = 0;
  46. if (!q)
  47. return -ENXIO;
  48. if (!blk_queue_discard(q))
  49. return -EOPNOTSUPP;
  50. /* Zero-sector (unknown) and one-sector granularities are the same. */
  51. granularity = max(q->limits.discard_granularity >> 9, 1U);
  52. alignment = bdev_discard_alignment(bdev) >> 9;
  53. alignment = sector_div(alignment, granularity);
  54. /*
  55. * Ensure that max_discard_sectors is of the proper
  56. * granularity, so that requests stay aligned after a split.
  57. */
  58. max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
  59. sector_div(max_discard_sectors, granularity);
  60. max_discard_sectors *= granularity;
  61. if (unlikely(!max_discard_sectors)) {
  62. /* Avoid infinite loop below. Being cautious never hurts. */
  63. return -EOPNOTSUPP;
  64. }
  65. if (flags & BLKDEV_DISCARD_SECURE) {
  66. if (!blk_queue_secdiscard(q))
  67. return -EOPNOTSUPP;
  68. type |= REQ_SECURE;
  69. }
  70. atomic_set(&bb.done, 1);
  71. bb.flags = 1 << BIO_UPTODATE;
  72. bb.wait = &wait;
  73. while (nr_sects) {
  74. unsigned int req_sects;
  75. sector_t end_sect, tmp;
  76. bio = bio_alloc(gfp_mask, 1);
  77. if (!bio) {
  78. ret = -ENOMEM;
  79. break;
  80. }
  81. req_sects = min_t(sector_t, nr_sects, max_discard_sectors);
  82. /*
  83. * If splitting a request, and the next starting sector would be
  84. * misaligned, stop the discard at the previous aligned sector.
  85. */
  86. end_sect = sector + req_sects;
  87. tmp = end_sect;
  88. if (req_sects < nr_sects &&
  89. sector_div(tmp, granularity) != alignment) {
  90. end_sect = end_sect - alignment;
  91. sector_div(end_sect, granularity);
  92. end_sect = end_sect * granularity + alignment;
  93. req_sects = end_sect - sector;
  94. }
  95. bio->bi_sector = sector;
  96. bio->bi_end_io = bio_batch_end_io;
  97. bio->bi_bdev = bdev;
  98. bio->bi_private = &bb;
  99. bio->bi_size = req_sects << 9;
  100. nr_sects -= req_sects;
  101. sector = end_sect;
  102. atomic_inc(&bb.done);
  103. submit_bio(type, bio);
  104. }
  105. /* Wait for bios in-flight */
  106. if (!atomic_dec_and_test(&bb.done))
  107. wait_for_completion(&wait);
  108. if (!test_bit(BIO_UPTODATE, &bb.flags))
  109. ret = -EIO;
  110. return ret;
  111. }
  112. EXPORT_SYMBOL(blkdev_issue_discard);
  113. /**
  114. * blkdev_issue_write_same - queue a write same operation
  115. * @bdev: target blockdev
  116. * @sector: start sector
  117. * @nr_sects: number of sectors to write
  118. * @gfp_mask: memory allocation flags (for bio_alloc)
  119. * @page: page containing data to write
  120. *
  121. * Description:
  122. * Issue a write same request for the sectors in question.
  123. */
  124. int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
  125. sector_t nr_sects, gfp_t gfp_mask,
  126. struct page *page)
  127. {
  128. DECLARE_COMPLETION_ONSTACK(wait);
  129. struct request_queue *q = bdev_get_queue(bdev);
  130. unsigned int max_write_same_sectors;
  131. struct bio_batch bb;
  132. struct bio *bio;
  133. int ret = 0;
  134. if (!q)
  135. return -ENXIO;
  136. max_write_same_sectors = q->limits.max_write_same_sectors;
  137. if (max_write_same_sectors == 0)
  138. return -EOPNOTSUPP;
  139. atomic_set(&bb.done, 1);
  140. bb.flags = 1 << BIO_UPTODATE;
  141. bb.wait = &wait;
  142. while (nr_sects) {
  143. bio = bio_alloc(gfp_mask, 1);
  144. if (!bio) {
  145. ret = -ENOMEM;
  146. break;
  147. }
  148. bio->bi_sector = sector;
  149. bio->bi_end_io = bio_batch_end_io;
  150. bio->bi_bdev = bdev;
  151. bio->bi_private = &bb;
  152. bio->bi_vcnt = 1;
  153. bio->bi_io_vec->bv_page = page;
  154. bio->bi_io_vec->bv_offset = 0;
  155. bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
  156. if (nr_sects > max_write_same_sectors) {
  157. bio->bi_size = max_write_same_sectors << 9;
  158. nr_sects -= max_write_same_sectors;
  159. sector += max_write_same_sectors;
  160. } else {
  161. bio->bi_size = nr_sects << 9;
  162. nr_sects = 0;
  163. }
  164. atomic_inc(&bb.done);
  165. submit_bio(REQ_WRITE | REQ_WRITE_SAME, bio);
  166. }
  167. /* Wait for bios in-flight */
  168. if (!atomic_dec_and_test(&bb.done))
  169. wait_for_completion(&wait);
  170. if (!test_bit(BIO_UPTODATE, &bb.flags))
  171. ret = -ENOTSUPP;
  172. return ret;
  173. }
  174. EXPORT_SYMBOL(blkdev_issue_write_same);
  175. /**
  176. * blkdev_issue_zeroout - generate number of zero filed write bios
  177. * @bdev: blockdev to issue
  178. * @sector: start sector
  179. * @nr_sects: number of sectors to write
  180. * @gfp_mask: memory allocation flags (for bio_alloc)
  181. *
  182. * Description:
  183. * Generate and issue number of bios with zerofiled pages.
  184. */
  185. int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
  186. sector_t nr_sects, gfp_t gfp_mask)
  187. {
  188. int ret;
  189. struct bio *bio;
  190. struct bio_batch bb;
  191. unsigned int sz;
  192. DECLARE_COMPLETION_ONSTACK(wait);
  193. atomic_set(&bb.done, 1);
  194. bb.flags = 1 << BIO_UPTODATE;
  195. bb.wait = &wait;
  196. ret = 0;
  197. while (nr_sects != 0) {
  198. bio = bio_alloc(gfp_mask,
  199. min(nr_sects, (sector_t)BIO_MAX_PAGES));
  200. if (!bio) {
  201. ret = -ENOMEM;
  202. break;
  203. }
  204. bio->bi_sector = sector;
  205. bio->bi_bdev = bdev;
  206. bio->bi_end_io = bio_batch_end_io;
  207. bio->bi_private = &bb;
  208. while (nr_sects != 0) {
  209. sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
  210. ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0);
  211. nr_sects -= ret >> 9;
  212. sector += ret >> 9;
  213. if (ret < (sz << 9))
  214. break;
  215. }
  216. ret = 0;
  217. atomic_inc(&bb.done);
  218. submit_bio(WRITE, bio);
  219. }
  220. /* Wait for bios in-flight */
  221. if (!atomic_dec_and_test(&bb.done))
  222. wait_for_completion(&wait);
  223. if (!test_bit(BIO_UPTODATE, &bb.flags))
  224. /* One of bios in the batch was completed with error.*/
  225. ret = -EIO;
  226. return ret;
  227. }
  228. /**
  229. * blkdev_issue_zeroout - zero-fill a block range
  230. * @bdev: blockdev to write
  231. * @sector: start sector
  232. * @nr_sects: number of sectors to write
  233. * @gfp_mask: memory allocation flags (for bio_alloc)
  234. *
  235. * Description:
  236. * Generate and issue number of bios with zerofiled pages.
  237. */
  238. int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
  239. sector_t nr_sects, gfp_t gfp_mask)
  240. {
  241. if (bdev_write_same(bdev)) {
  242. unsigned char bdn[BDEVNAME_SIZE];
  243. if (!blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask,
  244. ZERO_PAGE(0)))
  245. return 0;
  246. bdevname(bdev, bdn);
  247. pr_err("%s: WRITE SAME failed. Manually zeroing.\n", bdn);
  248. }
  249. return __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask);
  250. }
  251. EXPORT_SYMBOL(blkdev_issue_zeroout);