blk-barrier.c 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296
  1. /*
  2. * Functions related to barrier IO handling
  3. */
  4. #include <linux/kernel.h>
  5. #include <linux/module.h>
  6. #include <linux/bio.h>
  7. #include <linux/blkdev.h>
  8. #include <linux/gfp.h>
  9. #include "blk.h"
  10. /*
  11. * Cache flushing for ordered writes handling
  12. */
  13. unsigned blk_ordered_cur_seq(struct request_queue *q)
  14. {
  15. if (!q->ordseq)
  16. return 0;
  17. return 1 << ffz(q->ordseq);
  18. }
  19. unsigned blk_ordered_req_seq(struct request *rq)
  20. {
  21. struct request_queue *q = rq->q;
  22. BUG_ON(q->ordseq == 0);
  23. if (rq == &q->pre_flush_rq)
  24. return QUEUE_ORDSEQ_PREFLUSH;
  25. if (rq == &q->bar_rq)
  26. return QUEUE_ORDSEQ_BAR;
  27. if (rq == &q->post_flush_rq)
  28. return QUEUE_ORDSEQ_POSTFLUSH;
  29. /*
  30. * !fs requests don't need to follow barrier ordering. Always
  31. * put them at the front. This fixes the following deadlock.
  32. *
  33. * http://thread.gmane.org/gmane.linux.kernel/537473
  34. */
  35. if (rq->cmd_type != REQ_TYPE_FS)
  36. return QUEUE_ORDSEQ_DRAIN;
  37. if ((rq->cmd_flags & REQ_ORDERED_COLOR) ==
  38. (q->orig_bar_rq->cmd_flags & REQ_ORDERED_COLOR))
  39. return QUEUE_ORDSEQ_DRAIN;
  40. else
  41. return QUEUE_ORDSEQ_DONE;
  42. }
  43. bool blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
  44. {
  45. struct request *rq;
  46. if (error && !q->orderr)
  47. q->orderr = error;
  48. BUG_ON(q->ordseq & seq);
  49. q->ordseq |= seq;
  50. if (blk_ordered_cur_seq(q) != QUEUE_ORDSEQ_DONE)
  51. return false;
  52. /*
  53. * Okay, sequence complete.
  54. */
  55. q->ordseq = 0;
  56. rq = q->orig_bar_rq;
  57. __blk_end_request_all(rq, q->orderr);
  58. return true;
  59. }
  60. static void pre_flush_end_io(struct request *rq, int error)
  61. {
  62. elv_completed_request(rq->q, rq);
  63. blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_PREFLUSH, error);
  64. }
  65. static void bar_end_io(struct request *rq, int error)
  66. {
  67. elv_completed_request(rq->q, rq);
  68. blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_BAR, error);
  69. }
  70. static void post_flush_end_io(struct request *rq, int error)
  71. {
  72. elv_completed_request(rq->q, rq);
  73. blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_POSTFLUSH, error);
  74. }
  75. static void queue_flush(struct request_queue *q, unsigned which)
  76. {
  77. struct request *rq;
  78. rq_end_io_fn *end_io;
  79. if (which == QUEUE_ORDERED_DO_PREFLUSH) {
  80. rq = &q->pre_flush_rq;
  81. end_io = pre_flush_end_io;
  82. } else {
  83. rq = &q->post_flush_rq;
  84. end_io = post_flush_end_io;
  85. }
  86. blk_rq_init(q, rq);
  87. rq->cmd_type = REQ_TYPE_FS;
  88. rq->cmd_flags = REQ_HARDBARRIER | REQ_FLUSH;
  89. rq->rq_disk = q->orig_bar_rq->rq_disk;
  90. rq->end_io = end_io;
  91. elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
  92. }
  93. static inline struct request *start_ordered(struct request_queue *q,
  94. struct request *rq)
  95. {
  96. unsigned skip = 0;
  97. q->orderr = 0;
  98. q->ordered = q->next_ordered;
  99. q->ordseq |= QUEUE_ORDSEQ_STARTED;
  100. /*
  101. * For an empty barrier, there's no actual BAR request, which
  102. * in turn makes POSTFLUSH unnecessary. Mask them off.
  103. */
  104. if (!blk_rq_sectors(rq))
  105. q->ordered &= ~(QUEUE_ORDERED_DO_BAR |
  106. QUEUE_ORDERED_DO_POSTFLUSH);
  107. /* stash away the original request */
  108. blk_dequeue_request(rq);
  109. q->orig_bar_rq = rq;
  110. rq = NULL;
  111. /*
  112. * Queue ordered sequence. As we stack them at the head, we
  113. * need to queue in reverse order. Note that we rely on that
  114. * no fs request uses ELEVATOR_INSERT_FRONT and thus no fs
  115. * request gets inbetween ordered sequence.
  116. */
  117. if (q->ordered & QUEUE_ORDERED_DO_POSTFLUSH) {
  118. queue_flush(q, QUEUE_ORDERED_DO_POSTFLUSH);
  119. rq = &q->post_flush_rq;
  120. } else
  121. skip |= QUEUE_ORDSEQ_POSTFLUSH;
  122. if (q->ordered & QUEUE_ORDERED_DO_BAR) {
  123. rq = &q->bar_rq;
  124. /* initialize proxy request and queue it */
  125. blk_rq_init(q, rq);
  126. init_request_from_bio(rq, q->orig_bar_rq->bio);
  127. if (q->ordered & QUEUE_ORDERED_DO_FUA)
  128. rq->cmd_flags |= REQ_FUA;
  129. rq->end_io = bar_end_io;
  130. elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
  131. } else
  132. skip |= QUEUE_ORDSEQ_BAR;
  133. if (q->ordered & QUEUE_ORDERED_DO_PREFLUSH) {
  134. queue_flush(q, QUEUE_ORDERED_DO_PREFLUSH);
  135. rq = &q->pre_flush_rq;
  136. } else
  137. skip |= QUEUE_ORDSEQ_PREFLUSH;
  138. if (queue_in_flight(q))
  139. rq = NULL;
  140. else
  141. skip |= QUEUE_ORDSEQ_DRAIN;
  142. /*
  143. * Complete skipped sequences. If whole sequence is complete,
  144. * return %NULL to tell elevator that this request is gone.
  145. */
  146. if (blk_ordered_complete_seq(q, skip, 0))
  147. rq = NULL;
  148. return rq;
  149. }
  150. struct request *blk_do_ordered(struct request_queue *q, struct request *rq)
  151. {
  152. const int is_barrier = rq->cmd_type == REQ_TYPE_FS &&
  153. (rq->cmd_flags & REQ_HARDBARRIER);
  154. if (!q->ordseq) {
  155. if (!is_barrier)
  156. return rq;
  157. if (q->next_ordered != QUEUE_ORDERED_NONE)
  158. return start_ordered(q, rq);
  159. else {
  160. /*
  161. * Queue ordering not supported. Terminate
  162. * with prejudice.
  163. */
  164. blk_dequeue_request(rq);
  165. __blk_end_request_all(rq, -EOPNOTSUPP);
  166. return NULL;
  167. }
  168. }
  169. /*
  170. * Ordered sequence in progress
  171. */
  172. /* Special requests are not subject to ordering rules. */
  173. if (rq->cmd_type != REQ_TYPE_FS &&
  174. rq != &q->pre_flush_rq && rq != &q->post_flush_rq)
  175. return rq;
  176. /* Ordered by draining. Wait for turn. */
  177. WARN_ON(blk_ordered_req_seq(rq) < blk_ordered_cur_seq(q));
  178. if (blk_ordered_req_seq(rq) > blk_ordered_cur_seq(q))
  179. rq = ERR_PTR(-EAGAIN);
  180. return rq;
  181. }
  182. static void bio_end_empty_barrier(struct bio *bio, int err)
  183. {
  184. if (err) {
  185. if (err == -EOPNOTSUPP)
  186. set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
  187. clear_bit(BIO_UPTODATE, &bio->bi_flags);
  188. }
  189. if (bio->bi_private)
  190. complete(bio->bi_private);
  191. bio_put(bio);
  192. }
  193. /**
  194. * blkdev_issue_flush - queue a flush
  195. * @bdev: blockdev to issue flush for
  196. * @gfp_mask: memory allocation flags (for bio_alloc)
  197. * @error_sector: error sector
  198. * @flags: BLKDEV_IFL_* flags to control behaviour
  199. *
  200. * Description:
  201. * Issue a flush for the block device in question. Caller can supply
  202. * room for storing the error offset in case of a flush error, if they
  203. * wish to. If WAIT flag is not passed then caller may check only what
  204. * request was pushed in some internal queue for later handling.
  205. */
  206. int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
  207. sector_t *error_sector, unsigned long flags)
  208. {
  209. DECLARE_COMPLETION_ONSTACK(wait);
  210. struct request_queue *q;
  211. struct bio *bio;
  212. int ret = 0;
  213. if (bdev->bd_disk == NULL)
  214. return -ENXIO;
  215. q = bdev_get_queue(bdev);
  216. if (!q)
  217. return -ENXIO;
  218. /*
  219. * some block devices may not have their queue correctly set up here
  220. * (e.g. loop device without a backing file) and so issuing a flush
  221. * here will panic. Ensure there is a request function before issuing
  222. * the barrier.
  223. */
  224. if (!q->make_request_fn)
  225. return -ENXIO;
  226. bio = bio_alloc(gfp_mask, 0);
  227. bio->bi_end_io = bio_end_empty_barrier;
  228. bio->bi_bdev = bdev;
  229. if (test_bit(BLKDEV_WAIT, &flags))
  230. bio->bi_private = &wait;
  231. bio_get(bio);
  232. submit_bio(WRITE_BARRIER, bio);
  233. if (test_bit(BLKDEV_WAIT, &flags)) {
  234. wait_for_completion(&wait);
  235. /*
  236. * The driver must store the error location in ->bi_sector, if
  237. * it supports it. For non-stacked drivers, this should be
  238. * copied from blk_rq_pos(rq).
  239. */
  240. if (error_sector)
  241. *error_sector = bio->bi_sector;
  242. }
  243. if (bio_flagged(bio, BIO_EOPNOTSUPP))
  244. ret = -EOPNOTSUPP;
  245. else if (!bio_flagged(bio, BIO_UPTODATE))
  246. ret = -EIO;
  247. bio_put(bio);
  248. return ret;
  249. }
  250. EXPORT_SYMBOL(blkdev_issue_flush);