|
@@ -1808,25 +1808,35 @@ void elv_dequeue_request(struct request_queue *q, struct request *rq)
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * __end_that_request_first - end I/O on a request
|
|
|
- * @req: the request being processed
|
|
|
+ * blk_update_request - Special helper function for request stacking drivers
|
|
|
+ * @rq: the request being processed
|
|
|
* @error: %0 for success, < %0 for error
|
|
|
- * @nr_bytes: number of bytes to complete
|
|
|
+ * @nr_bytes: number of bytes to complete @rq
|
|
|
*
|
|
|
* Description:
|
|
|
- * Ends I/O on a number of bytes attached to @req, and sets it up
|
|
|
- * for the next range of segments (if any) in the cluster.
|
|
|
+ * Ends I/O on a number of bytes attached to @rq, but doesn't complete
|
|
|
+ * the request structure even if @rq doesn't have leftover.
|
|
|
+ * If @rq has leftover, sets it up for the next range of segments.
|
|
|
+ *
|
|
|
+ * This special helper function is only for request stacking drivers
|
|
|
+ * (e.g. request-based dm) so that they can handle partial completion.
|
|
|
+ * Actual device drivers should use blk_end_request instead.
|
|
|
+ *
|
|
|
+ * Passing the result of blk_rq_bytes() as @nr_bytes guarantees
|
|
|
+ * %false return from this function.
|
|
|
*
|
|
|
* Return:
|
|
|
- * %0 - we are done with this request, call end_that_request_last()
|
|
|
- * %1 - still buffers pending for this request
|
|
|
+ * %false - this request doesn't have any more data
|
|
|
+ * %true - this request has more data
|
|
|
**/
|
|
|
-static int __end_that_request_first(struct request *req, int error,
|
|
|
- int nr_bytes)
|
|
|
+bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
|
|
|
{
|
|
|
int total_bytes, bio_nbytes, next_idx = 0;
|
|
|
struct bio *bio;
|
|
|
|
|
|
+ if (!req->bio)
|
|
|
+ return false;
|
|
|
+
|
|
|
trace_block_rq_complete(req->q, req);
|
|
|
|
|
|
/*
|
|
@@ -1903,8 +1913,16 @@ static int __end_that_request_first(struct request *req, int error,
|
|
|
/*
|
|
|
* completely done
|
|
|
*/
|
|
|
- if (!req->bio)
|
|
|
- return 0;
|
|
|
+ if (!req->bio) {
|
|
|
+ /*
|
|
|
+ * Reset counters so that the request stacking driver
|
|
|
+ * can find how many bytes remain in the request
|
|
|
+ * later.
|
|
|
+ */
|
|
|
+ req->nr_sectors = req->hard_nr_sectors = 0;
|
|
|
+ req->current_nr_sectors = req->hard_cur_sectors = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
|
|
|
/*
|
|
|
* if the request wasn't completed, update state
|
|
@@ -1918,29 +1936,31 @@ static int __end_that_request_first(struct request *req, int error,
|
|
|
|
|
|
blk_recalc_rq_sectors(req, total_bytes >> 9);
|
|
|
blk_recalc_rq_segments(req);
|
|
|
- return 1;
|
|
|
+ return true;
|
|
|
}
|
|
|
+EXPORT_SYMBOL_GPL(blk_update_request);
|
|
|
|
|
|
-static int end_that_request_data(struct request *rq, int error,
|
|
|
- unsigned int nr_bytes, unsigned int bidi_bytes)
|
|
|
+static bool blk_update_bidi_request(struct request *rq, int error,
|
|
|
+ unsigned int nr_bytes,
|
|
|
+ unsigned int bidi_bytes)
|
|
|
{
|
|
|
- if (rq->bio) {
|
|
|
- if (__end_that_request_first(rq, error, nr_bytes))
|
|
|
- return 1;
|
|
|
+ if (blk_update_request(rq, error, nr_bytes))
|
|
|
+ return true;
|
|
|
|
|
|
- /* Bidi request must be completed as a whole */
|
|
|
- if (blk_bidi_rq(rq) &&
|
|
|
- __end_that_request_first(rq->next_rq, error, bidi_bytes))
|
|
|
- return 1;
|
|
|
- }
|
|
|
+ /* Bidi request must be completed as a whole */
|
|
|
+ if (unlikely(blk_bidi_rq(rq)) &&
|
|
|
+ blk_update_request(rq->next_rq, error, bidi_bytes))
|
|
|
+ return true;
|
|
|
|
|
|
- return 0;
|
|
|
+ add_disk_randomness(rq->rq_disk);
|
|
|
+
|
|
|
+ return false;
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
* queue lock must be held
|
|
|
*/
|
|
|
-static void end_that_request_last(struct request *req, int error)
|
|
|
+static void blk_finish_request(struct request *req, int error)
|
|
|
{
|
|
|
if (blk_rq_tagged(req))
|
|
|
blk_queue_end_tag(req->q, req);
|
|
@@ -1966,161 +1986,65 @@ static void end_that_request_last(struct request *req, int error)
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * blk_end_io - Generic end_io function to complete a request.
|
|
|
- * @rq: the request being processed
|
|
|
- * @error: %0 for success, < %0 for error
|
|
|
- * @nr_bytes: number of bytes to complete @rq
|
|
|
- * @bidi_bytes: number of bytes to complete @rq->next_rq
|
|
|
+ * blk_end_bidi_request - Complete a bidi request
|
|
|
+ * @rq: the request to complete
|
|
|
+ * @error: %0 for success, < %0 for error
|
|
|
+ * @nr_bytes: number of bytes to complete @rq
|
|
|
+ * @bidi_bytes: number of bytes to complete @rq->next_rq
|
|
|
*
|
|
|
* Description:
|
|
|
* Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
|
|
|
- * If @rq has leftover, sets it up for the next range of segments.
|
|
|
+ * Drivers that supports bidi can safely call this member for any
|
|
|
+ * type of request, bidi or uni. In the later case @bidi_bytes is
|
|
|
+ * just ignored.
|
|
|
*
|
|
|
* Return:
|
|
|
- * %0 - we are done with this request
|
|
|
- * %1 - this request is not freed yet, it still has pending buffers.
|
|
|
+ * %false - we are done with this request
|
|
|
+ * %true - still buffers pending for this request
|
|
|
**/
|
|
|
-static int blk_end_io(struct request *rq, int error, unsigned int nr_bytes,
|
|
|
- unsigned int bidi_bytes)
|
|
|
+bool blk_end_bidi_request(struct request *rq, int error,
|
|
|
+ unsigned int nr_bytes, unsigned int bidi_bytes)
|
|
|
{
|
|
|
struct request_queue *q = rq->q;
|
|
|
- unsigned long flags = 0UL;
|
|
|
-
|
|
|
- if (end_that_request_data(rq, error, nr_bytes, bidi_bytes))
|
|
|
- return 1;
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
- add_disk_randomness(rq->rq_disk);
|
|
|
+ if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
|
|
|
+ return true;
|
|
|
|
|
|
spin_lock_irqsave(q->queue_lock, flags);
|
|
|
- end_that_request_last(rq, error);
|
|
|
+ blk_finish_request(rq, error);
|
|
|
spin_unlock_irqrestore(q->queue_lock, flags);
|
|
|
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-/**
|
|
|
- * blk_end_request - Helper function for drivers to complete the request.
|
|
|
- * @rq: the request being processed
|
|
|
- * @error: %0 for success, < %0 for error
|
|
|
- * @nr_bytes: number of bytes to complete
|
|
|
- *
|
|
|
- * Description:
|
|
|
- * Ends I/O on a number of bytes attached to @rq.
|
|
|
- * If @rq has leftover, sets it up for the next range of segments.
|
|
|
- *
|
|
|
- * Return:
|
|
|
- * %0 - we are done with this request
|
|
|
- * %1 - still buffers pending for this request
|
|
|
- **/
|
|
|
-int blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
|
|
|
-{
|
|
|
- return blk_end_io(rq, error, nr_bytes, 0);
|
|
|
-}
|
|
|
-EXPORT_SYMBOL_GPL(blk_end_request);
|
|
|
-
|
|
|
-/**
|
|
|
- * __blk_end_request - Helper function for drivers to complete the request.
|
|
|
- * @rq: the request being processed
|
|
|
- * @error: %0 for success, < %0 for error
|
|
|
- * @nr_bytes: number of bytes to complete
|
|
|
- *
|
|
|
- * Description:
|
|
|
- * Must be called with queue lock held unlike blk_end_request().
|
|
|
- *
|
|
|
- * Return:
|
|
|
- * %0 - we are done with this request
|
|
|
- * %1 - still buffers pending for this request
|
|
|
- **/
|
|
|
-int __blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
|
|
|
-{
|
|
|
- if (rq->bio && __end_that_request_first(rq, error, nr_bytes))
|
|
|
- return 1;
|
|
|
-
|
|
|
- add_disk_randomness(rq->rq_disk);
|
|
|
-
|
|
|
- end_that_request_last(rq, error);
|
|
|
-
|
|
|
- return 0;
|
|
|
+ return false;
|
|
|
}
|
|
|
-EXPORT_SYMBOL_GPL(__blk_end_request);
|
|
|
+EXPORT_SYMBOL_GPL(blk_end_bidi_request);
|
|
|
|
|
|
/**
|
|
|
- * blk_end_bidi_request - Helper function for drivers to complete bidi request.
|
|
|
- * @rq: the bidi request being processed
|
|
|
+ * __blk_end_bidi_request - Complete a bidi request with queue lock held
|
|
|
+ * @rq: the request to complete
|
|
|
* @error: %0 for success, < %0 for error
|
|
|
* @nr_bytes: number of bytes to complete @rq
|
|
|
* @bidi_bytes: number of bytes to complete @rq->next_rq
|
|
|
*
|
|
|
* Description:
|
|
|
- * Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
|
|
|
+ * Identical to blk_end_bidi_request() except that queue lock is
|
|
|
+ * assumed to be locked on entry and remains so on return.
|
|
|
*
|
|
|
* Return:
|
|
|
- * %0 - we are done with this request
|
|
|
- * %1 - still buffers pending for this request
|
|
|
- **/
|
|
|
-int blk_end_bidi_request(struct request *rq, int error, unsigned int nr_bytes,
|
|
|
- unsigned int bidi_bytes)
|
|
|
-{
|
|
|
- return blk_end_io(rq, error, nr_bytes, bidi_bytes);
|
|
|
-}
|
|
|
-EXPORT_SYMBOL_GPL(blk_end_bidi_request);
|
|
|
-
|
|
|
-/**
|
|
|
- * end_request - end I/O on the current segment of the request
|
|
|
- * @req: the request being processed
|
|
|
- * @uptodate: error value or %0/%1 uptodate flag
|
|
|
- *
|
|
|
- * Description:
|
|
|
- * Ends I/O on the current segment of a request. If that is the only
|
|
|
- * remaining segment, the request is also completed and freed.
|
|
|
- *
|
|
|
- * This is a remnant of how older block drivers handled I/O completions.
|
|
|
- * Modern drivers typically end I/O on the full request in one go, unless
|
|
|
- * they have a residual value to account for. For that case this function
|
|
|
- * isn't really useful, unless the residual just happens to be the
|
|
|
- * full current segment. In other words, don't use this function in new
|
|
|
- * code. Use blk_end_request() or __blk_end_request() to end a request.
|
|
|
+ * %false - we are done with this request
|
|
|
+ * %true - still buffers pending for this request
|
|
|
**/
|
|
|
-void end_request(struct request *req, int uptodate)
|
|
|
+bool __blk_end_bidi_request(struct request *rq, int error,
|
|
|
+ unsigned int nr_bytes, unsigned int bidi_bytes)
|
|
|
{
|
|
|
- int error = 0;
|
|
|
+ if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
|
|
|
+ return true;
|
|
|
|
|
|
- if (uptodate <= 0)
|
|
|
- error = uptodate ? uptodate : -EIO;
|
|
|
+ blk_finish_request(rq, error);
|
|
|
|
|
|
- __blk_end_request(req, error, req->hard_cur_sectors << 9);
|
|
|
-}
|
|
|
-EXPORT_SYMBOL(end_request);
|
|
|
-
|
|
|
-/**
|
|
|
- * blk_update_request - Special helper function for request stacking drivers
|
|
|
- * @rq: the request being processed
|
|
|
- * @error: %0 for success, < %0 for error
|
|
|
- * @nr_bytes: number of bytes to complete @rq
|
|
|
- *
|
|
|
- * Description:
|
|
|
- * Ends I/O on a number of bytes attached to @rq, but doesn't complete
|
|
|
- * the request structure even if @rq doesn't have leftover.
|
|
|
- * If @rq has leftover, sets it up for the next range of segments.
|
|
|
- *
|
|
|
- * This special helper function is only for request stacking drivers
|
|
|
- * (e.g. request-based dm) so that they can handle partial completion.
|
|
|
- * Actual device drivers should use blk_end_request instead.
|
|
|
- */
|
|
|
-void blk_update_request(struct request *rq, int error, unsigned int nr_bytes)
|
|
|
-{
|
|
|
- if (!end_that_request_data(rq, error, nr_bytes, 0)) {
|
|
|
- /*
|
|
|
- * These members are not updated in end_that_request_data()
|
|
|
- * when all bios are completed.
|
|
|
- * Update them so that the request stacking driver can find
|
|
|
- * how many bytes remain in the request later.
|
|
|
- */
|
|
|
- rq->nr_sectors = rq->hard_nr_sectors = 0;
|
|
|
- rq->current_nr_sectors = rq->hard_cur_sectors = 0;
|
|
|
- }
|
|
|
+ return false;
|
|
|
}
|
|
|
-EXPORT_SYMBOL_GPL(blk_update_request);
|
|
|
+EXPORT_SYMBOL_GPL(__blk_end_bidi_request);
|
|
|
|
|
|
void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
|
|
|
struct bio *bio)
|