|
@@ -348,15 +348,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
|
|
|
* A block was successfully transferred.
|
|
|
*/
|
|
|
spin_lock_irq(&md->lock);
|
|
|
- ret = end_that_request_chunk(req, 1, brq.data.bytes_xfered);
|
|
|
- if (!ret) {
|
|
|
- /*
|
|
|
- * The whole request completed successfully.
|
|
|
- */
|
|
|
- add_disk_randomness(req->rq_disk);
|
|
|
- blkdev_dequeue_request(req);
|
|
|
- end_that_request_last(req, 1);
|
|
|
- }
|
|
|
+ ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
|
|
|
spin_unlock_irq(&md->lock);
|
|
|
} while (ret);
|
|
|
|
|
@@ -386,27 +378,21 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
|
|
|
else
|
|
|
bytes = blocks << 9;
|
|
|
spin_lock_irq(&md->lock);
|
|
|
- ret = end_that_request_chunk(req, 1, bytes);
|
|
|
+ ret = __blk_end_request(req, 0, bytes);
|
|
|
spin_unlock_irq(&md->lock);
|
|
|
}
|
|
|
} else if (rq_data_dir(req) != READ &&
|
|
|
(card->host->caps & MMC_CAP_MULTIWRITE)) {
|
|
|
spin_lock_irq(&md->lock);
|
|
|
- ret = end_that_request_chunk(req, 1, brq.data.bytes_xfered);
|
|
|
+ ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
|
|
|
spin_unlock_irq(&md->lock);
|
|
|
}
|
|
|
|
|
|
mmc_release_host(card->host);
|
|
|
|
|
|
spin_lock_irq(&md->lock);
|
|
|
- while (ret) {
|
|
|
- ret = end_that_request_chunk(req, 0,
|
|
|
- req->current_nr_sectors << 9);
|
|
|
- }
|
|
|
-
|
|
|
- add_disk_randomness(req->rq_disk);
|
|
|
- blkdev_dequeue_request(req);
|
|
|
- end_that_request_last(req, 0);
|
|
|
+ while (ret)
|
|
|
+ ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req));
|
|
|
spin_unlock_irq(&md->lock);
|
|
|
|
|
|
return 0;
|