|
@@ -64,13 +64,15 @@ static void drive_stat_acct(struct request *rq, int new_io)
|
|
|
return;
|
|
|
|
|
|
cpu = part_stat_lock();
|
|
|
- part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
|
|
|
|
|
|
- if (!new_io)
|
|
|
+ if (!new_io) {
|
|
|
+ part = rq->part;
|
|
|
part_stat_inc(cpu, part, merges[rw]);
|
|
|
- else {
|
|
|
+ } else {
|
|
|
+ part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
|
|
|
part_round_stats(cpu, part);
|
|
|
part_inc_in_flight(part, rw);
|
|
|
+ rq->part = part;
|
|
|
}
|
|
|
|
|
|
part_stat_unlock();
|
|
@@ -128,6 +130,7 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
|
|
|
rq->ref_count = 1;
|
|
|
rq->start_time = jiffies;
|
|
|
set_start_time_ns(rq);
|
|
|
+ rq->part = NULL;
|
|
|
}
|
|
|
EXPORT_SYMBOL(blk_rq_init);
|
|
|
|
|
@@ -804,11 +807,16 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
|
|
|
rl->starved[is_sync] = 0;
|
|
|
|
|
|
priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
|
|
|
- if (priv)
|
|
|
+ if (priv) {
|
|
|
rl->elvpriv++;
|
|
|
|
|
|
- if (blk_queue_io_stat(q))
|
|
|
- rw_flags |= REQ_IO_STAT;
|
|
|
+ /*
|
|
|
+ * Don't do stats for non-priv requests
|
|
|
+ */
|
|
|
+ if (blk_queue_io_stat(q))
|
|
|
+ rw_flags |= REQ_IO_STAT;
|
|
|
+ }
|
|
|
+
|
|
|
spin_unlock_irq(q->queue_lock);
|
|
|
|
|
|
rq = blk_alloc_request(q, rw_flags, priv, gfp_mask);
|
|
@@ -1777,7 +1785,7 @@ static void blk_account_io_completion(struct request *req, unsigned int bytes)
|
|
|
int cpu;
|
|
|
|
|
|
cpu = part_stat_lock();
|
|
|
- part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
|
|
|
+ part = req->part;
|
|
|
part_stat_add(cpu, part, sectors[rw], bytes >> 9);
|
|
|
part_stat_unlock();
|
|
|
}
|
|
@@ -1797,7 +1805,7 @@ static void blk_account_io_done(struct request *req)
|
|
|
int cpu;
|
|
|
|
|
|
cpu = part_stat_lock();
|
|
|
- part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
|
|
|
+ part = req->part;
|
|
|
|
|
|
part_stat_inc(cpu, part, ios[rw]);
|
|
|
part_stat_add(cpu, part, ticks[rw], duration);
|