|
@@ -1140,10 +1140,31 @@ static int blkvsc_do_request(struct block_device_context *blkdev,
|
|
|
return pending;
|
|
|
}
|
|
|
|
|
|
+static int blkvsc_do_pending_reqs(struct block_device_context *blkdev)
|
|
|
+{
|
|
|
+ struct blkvsc_request *pend_req, *tmp;
|
|
|
+ int ret = 0;
|
|
|
+
|
|
|
+ /* Flush the pending list first */
|
|
|
+ list_for_each_entry_safe(pend_req, tmp, &blkdev->pending_list,
|
|
|
+ pend_entry) {
|
|
|
+ DPRINT_DBG(BLKVSC_DRV, "working off pending_list - %p\n",
|
|
|
+ pend_req);
|
|
|
+
|
|
|
+ ret = blkvsc_submit_request(pend_req,
|
|
|
+ blkvsc_request_completion);
|
|
|
+ if (ret != 0)
|
|
|
+ break;
|
|
|
+ else
|
|
|
+ list_del(&pend_req->pend_entry);
|
|
|
+ }
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
/* Static decl */
|
|
|
static int blkvsc_probe(struct device *dev);
|
|
|
static void blkvsc_request(struct request_queue *queue);
|
|
|
-static int blkvsc_do_pending_reqs(struct block_device_context *blkdev);
|
|
|
|
|
|
static int blkvsc_ringbuffer_size = BLKVSC_RING_BUFFER_SIZE;
|
|
|
module_param(blkvsc_ringbuffer_size, int, S_IRUGO);
|
|
@@ -1470,28 +1491,6 @@ static void blkvsc_request_completion(struct hv_storvsc_request *request)
|
|
|
spin_unlock_irqrestore(&blkdev->lock, flags);
|
|
|
}
|
|
|
|
|
|
-static int blkvsc_do_pending_reqs(struct block_device_context *blkdev)
|
|
|
-{
|
|
|
- struct blkvsc_request *pend_req, *tmp;
|
|
|
- int ret = 0;
|
|
|
-
|
|
|
- /* Flush the pending list first */
|
|
|
- list_for_each_entry_safe(pend_req, tmp, &blkdev->pending_list,
|
|
|
- pend_entry) {
|
|
|
- DPRINT_DBG(BLKVSC_DRV, "working off pending_list - %p\n",
|
|
|
- pend_req);
|
|
|
-
|
|
|
- ret = blkvsc_submit_request(pend_req,
|
|
|
- blkvsc_request_completion);
|
|
|
- if (ret != 0)
|
|
|
- break;
|
|
|
- else
|
|
|
- list_del(&pend_req->pend_entry);
|
|
|
- }
|
|
|
-
|
|
|
- return ret;
|
|
|
-}
|
|
|
-
|
|
|
static void blkvsc_request(struct request_queue *queue)
|
|
|
{
|
|
|
struct block_device_context *blkdev = NULL;
|