|
@@ -387,7 +387,7 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
|
|
|
if (!list_empty(&q->queue_head) && q->request_fn)
|
|
|
__blk_run_queue(q);
|
|
|
|
|
|
- drain |= q->rq.elvpriv;
|
|
|
+ drain |= q->nr_rqs_elvpriv;
|
|
|
|
|
|
/*
|
|
|
* Unfortunately, requests are queued at and tracked from
|
|
@@ -397,7 +397,7 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
|
|
|
if (drain_all) {
|
|
|
drain |= !list_empty(&q->queue_head);
|
|
|
for (i = 0; i < 2; i++) {
|
|
|
- drain |= q->rq.count[i];
|
|
|
+ drain |= q->nr_rqs[i];
|
|
|
drain |= q->in_flight[i];
|
|
|
drain |= !list_empty(&q->flush_queue[i]);
|
|
|
}
|
|
@@ -526,7 +526,6 @@ static int blk_init_free_list(struct request_queue *q)
|
|
|
|
|
|
rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0;
|
|
|
rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0;
|
|
|
- rl->elvpriv = 0;
|
|
|
init_waitqueue_head(&rl->wait[BLK_RW_SYNC]);
|
|
|
init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]);
|
|
|
|
|
@@ -791,9 +790,10 @@ static void freed_request(struct request_queue *q, unsigned int flags)
|
|
|
struct request_list *rl = &q->rq;
|
|
|
int sync = rw_is_sync(flags);
|
|
|
|
|
|
+ q->nr_rqs[sync]--;
|
|
|
rl->count[sync]--;
|
|
|
if (flags & REQ_ELVPRIV)
|
|
|
- rl->elvpriv--;
|
|
|
+ q->nr_rqs_elvpriv--;
|
|
|
|
|
|
__freed_request(q, sync);
|
|
|
|
|
@@ -902,6 +902,7 @@ static struct request *__get_request(struct request_queue *q, int rw_flags,
|
|
|
if (rl->count[is_sync] >= (3 * q->nr_requests / 2))
|
|
|
return NULL;
|
|
|
|
|
|
+ q->nr_rqs[is_sync]++;
|
|
|
rl->count[is_sync]++;
|
|
|
rl->starved[is_sync] = 0;
|
|
|
|
|
@@ -917,7 +918,7 @@ static struct request *__get_request(struct request_queue *q, int rw_flags,
|
|
|
*/
|
|
|
if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) {
|
|
|
rw_flags |= REQ_ELVPRIV;
|
|
|
- rl->elvpriv++;
|
|
|
+ q->nr_rqs_elvpriv++;
|
|
|
if (et->icq_cache && ioc)
|
|
|
icq = ioc_lookup_icq(ioc, q);
|
|
|
}
|
|
@@ -978,7 +979,7 @@ fail_elvpriv:
|
|
|
rq->elv.icq = NULL;
|
|
|
|
|
|
spin_lock_irq(q->queue_lock);
|
|
|
- rl->elvpriv--;
|
|
|
+ q->nr_rqs_elvpriv--;
|
|
|
spin_unlock_irq(q->queue_lock);
|
|
|
goto out;
|
|
|
|