|
@@ -31,6 +31,7 @@ struct dm_io {
|
|
|
int error;
|
|
|
struct bio *bio;
|
|
|
atomic_t io_count;
|
|
|
+ unsigned long start_time;
|
|
|
};
|
|
|
|
|
|
/*
|
|
@@ -244,6 +245,36 @@ static inline void free_tio(struct mapped_device *md, struct target_io *tio)
|
|
|
mempool_free(tio, md->tio_pool);
|
|
|
}
|
|
|
|
|
|
+static void start_io_acct(struct dm_io *io)
|
|
|
+{
|
|
|
+ struct mapped_device *md = io->md;
|
|
|
+
|
|
|
+ io->start_time = jiffies;
|
|
|
+
|
|
|
+ preempt_disable();
|
|
|
+ disk_round_stats(dm_disk(md));
|
|
|
+ preempt_enable();
|
|
|
+ dm_disk(md)->in_flight = atomic_inc_return(&md->pending);
|
|
|
+}
|
|
|
+
|
|
|
+static int end_io_acct(struct dm_io *io)
|
|
|
+{
|
|
|
+ struct mapped_device *md = io->md;
|
|
|
+ struct bio *bio = io->bio;
|
|
|
+ unsigned long duration = jiffies - io->start_time;
|
|
|
+ int pending;
|
|
|
+ int rw = bio_data_dir(bio);
|
|
|
+
|
|
|
+ preempt_disable();
|
|
|
+ disk_round_stats(dm_disk(md));
|
|
|
+ preempt_enable();
|
|
|
+ dm_disk(md)->in_flight = pending = atomic_dec_return(&md->pending);
|
|
|
+
|
|
|
+ disk_stat_add(dm_disk(md), ticks[rw], duration);
|
|
|
+
|
|
|
+ return !pending;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Add the bio to the list of deferred io.
|
|
|
*/
|
|
@@ -299,7 +330,7 @@ static void dec_pending(struct dm_io *io, int error)
|
|
|
io->error = error;
|
|
|
|
|
|
if (atomic_dec_and_test(&io->io_count)) {
|
|
|
- if (atomic_dec_and_test(&io->md->pending))
|
|
|
+ if (end_io_acct(io))
|
|
|
/* nudge anyone waiting on suspend queue */
|
|
|
wake_up(&io->md->wait);
|
|
|
|
|
@@ -554,7 +585,7 @@ static void __split_bio(struct mapped_device *md, struct bio *bio)
|
|
|
ci.sector_count = bio_sectors(bio);
|
|
|
ci.idx = bio->bi_idx;
|
|
|
|
|
|
- atomic_inc(&md->pending);
|
|
|
+ start_io_acct(ci.io);
|
|
|
while (ci.sector_count)
|
|
|
__clone_and_map(&ci);
|
|
|
|