Browse Source

block: implement request_queue->dma_drain_needed

Draining shouldn't be done for commands where overflow may indicate
data integrity issues.  Add dma_drain_needed callback to
request_queue.  Drain buffer is appened iff this function returns
non-zero.

Signed-off-by: Tejun Heo <htejun@gmail.com>
Cc: James Bottomley <James.Bottomley@HansenPartnership.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Tejun Heo 17 years ago
parent
commit
2fb98e8414
3 changed files with 11 additions and 5 deletions
  1. 1 1
      block/blk-merge.c
  2. 5 2
      block/blk-settings.c
  3. 5 2
      include/linux/blkdev.h

+ 1 - 1
block/blk-merge.c

@@ -220,7 +220,7 @@ new_segment:
 		bvprv = bvec;
 		bvprv = bvec;
 	} /* segments in rq */
 	} /* segments in rq */
 
 
-	if (q->dma_drain_size) {
+	if (q->dma_drain_size && q->dma_drain_needed(rq)) {
 		sg->page_link &= ~0x02;
 		sg->page_link &= ~0x02;
 		sg = sg_next(sg);
 		sg = sg_next(sg);
 		sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
 		sg_set_page(sg, virt_to_page(q->dma_drain_buffer),

+ 5 - 2
block/blk-settings.c

@@ -296,6 +296,7 @@ EXPORT_SYMBOL(blk_queue_stack_limits);
  * blk_queue_dma_drain - Set up a drain buffer for excess dma.
  * blk_queue_dma_drain - Set up a drain buffer for excess dma.
  *
  *
  * @q:  the request queue for the device
  * @q:  the request queue for the device
+ * @dma_drain_needed: fn which returns non-zero if drain is necessary
  * @buf:	physically contiguous buffer
  * @buf:	physically contiguous buffer
  * @size:	size of the buffer in bytes
  * @size:	size of the buffer in bytes
  *
  *
@@ -315,14 +316,16 @@ EXPORT_SYMBOL(blk_queue_stack_limits);
  * device can support otherwise there won't be room for the drain
  * device can support otherwise there won't be room for the drain
  * buffer.
  * buffer.
  */
  */
-int blk_queue_dma_drain(struct request_queue *q, void *buf,
-				unsigned int size)
+extern int blk_queue_dma_drain(struct request_queue *q,
+			       dma_drain_needed_fn *dma_drain_needed,
+			       void *buf, unsigned int size)
 {
 {
 	if (q->max_hw_segments < 2 || q->max_phys_segments < 2)
 	if (q->max_hw_segments < 2 || q->max_phys_segments < 2)
 		return -EINVAL;
 		return -EINVAL;
 	/* make room for appending the drain */
 	/* make room for appending the drain */
 	--q->max_hw_segments;
 	--q->max_hw_segments;
 	--q->max_phys_segments;
 	--q->max_phys_segments;
+	q->dma_drain_needed = dma_drain_needed;
 	q->dma_drain_buffer = buf;
 	q->dma_drain_buffer = buf;
 	q->dma_drain_size = size;
 	q->dma_drain_size = size;
 
 

+ 5 - 2
include/linux/blkdev.h

@@ -259,6 +259,7 @@ struct bio_vec;
 typedef int (merge_bvec_fn) (struct request_queue *, struct bio *, struct bio_vec *);
 typedef int (merge_bvec_fn) (struct request_queue *, struct bio *, struct bio_vec *);
 typedef void (prepare_flush_fn) (struct request_queue *, struct request *);
 typedef void (prepare_flush_fn) (struct request_queue *, struct request *);
 typedef void (softirq_done_fn)(struct request *);
 typedef void (softirq_done_fn)(struct request *);
+typedef int (dma_drain_needed_fn)(struct request *);
 
 
 enum blk_queue_state {
 enum blk_queue_state {
 	Queue_down,
 	Queue_down,
@@ -295,6 +296,7 @@ struct request_queue
 	merge_bvec_fn		*merge_bvec_fn;
 	merge_bvec_fn		*merge_bvec_fn;
 	prepare_flush_fn	*prepare_flush_fn;
 	prepare_flush_fn	*prepare_flush_fn;
 	softirq_done_fn		*softirq_done_fn;
 	softirq_done_fn		*softirq_done_fn;
+	dma_drain_needed_fn	*dma_drain_needed;
 
 
 	/*
 	/*
 	 * Dispatch queue sorting
 	 * Dispatch queue sorting
@@ -699,8 +701,9 @@ extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short);
 extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
 extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
 extern void blk_queue_hardsect_size(struct request_queue *, unsigned short);
 extern void blk_queue_hardsect_size(struct request_queue *, unsigned short);
 extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
 extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
-extern int blk_queue_dma_drain(struct request_queue *q, void *buf,
-			       unsigned int size);
+extern int blk_queue_dma_drain(struct request_queue *q,
+			       dma_drain_needed_fn *dma_drain_needed,
+			       void *buf, unsigned int size);
 extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
 extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
 extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
 extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
 extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *);
 extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *);