|
@@ -243,56 +243,3 @@ int bsg_setup_queue(struct device *dev, struct request_queue *q,
|
|
|
return 0;
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(bsg_setup_queue);
|
|
|
-
|
|
|
-/**
|
|
|
- * bsg_remove_queue - Deletes the bsg dev from the q
|
|
|
- * @q: the request_queue that is to be torn down.
|
|
|
- *
|
|
|
- * Notes:
|
|
|
- * Before unregistering the queue empty any requests that are blocked
|
|
|
- */
|
|
|
-void bsg_remove_queue(struct request_queue *q)
|
|
|
-{
|
|
|
- struct request *req; /* block request */
|
|
|
- int counts; /* totals for request_list count and starved */
|
|
|
-
|
|
|
- if (!q)
|
|
|
- return;
|
|
|
-
|
|
|
- /* Stop taking in new requests */
|
|
|
- spin_lock_irq(q->queue_lock);
|
|
|
- blk_stop_queue(q);
|
|
|
-
|
|
|
- /* drain all requests in the queue */
|
|
|
- while (1) {
|
|
|
- /* need the lock to fetch a request
|
|
|
- * this may fetch the same reqeust as the previous pass
|
|
|
- */
|
|
|
- req = blk_fetch_request(q);
|
|
|
- /* save requests in use and starved */
|
|
|
- counts = q->rq.count[0] + q->rq.count[1] +
|
|
|
- q->rq.starved[0] + q->rq.starved[1];
|
|
|
- spin_unlock_irq(q->queue_lock);
|
|
|
- /* any requests still outstanding? */
|
|
|
- if (counts == 0)
|
|
|
- break;
|
|
|
-
|
|
|
- /* This may be the same req as the previous iteration,
|
|
|
- * always send the blk_end_request_all after a prefetch.
|
|
|
- * It is not okay to not end the request because the
|
|
|
- * prefetch started the request.
|
|
|
- */
|
|
|
- if (req) {
|
|
|
- /* return -ENXIO to indicate that this queue is
|
|
|
- * going away
|
|
|
- */
|
|
|
- req->errors = -ENXIO;
|
|
|
- blk_end_request_all(req, -ENXIO);
|
|
|
- }
|
|
|
-
|
|
|
- msleep(200); /* allow bsg to possibly finish */
|
|
|
- spin_lock_irq(q->queue_lock);
|
|
|
- }
|
|
|
- bsg_unregister_queue(q);
|
|
|
-}
|
|
|
-EXPORT_SYMBOL_GPL(bsg_remove_queue);
|