|
@@ -525,9 +525,12 @@ static int __noflush_suspending(struct mapped_device *md)
|
|
|
static void dec_pending(struct dm_io *io, int error)
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
+ int io_error;
|
|
|
+ struct bio *bio;
|
|
|
+ struct mapped_device *md = io->md;
|
|
|
|
|
|
/* Push-back supersedes any I/O errors */
|
|
|
- if (error && !(io->error > 0 && __noflush_suspending(io->md)))
|
|
|
+ if (error && !(io->error > 0 && __noflush_suspending(md)))
|
|
|
io->error = error;
|
|
|
|
|
|
if (atomic_dec_and_test(&io->io_count)) {
|
|
@@ -537,24 +540,27 @@ static void dec_pending(struct dm_io *io, int error)
|
|
|
* This must be handled before the sleeper on
|
|
|
* suspend queue merges the pushback list.
|
|
|
*/
|
|
|
- spin_lock_irqsave(&io->md->pushback_lock, flags);
|
|
|
- if (__noflush_suspending(io->md))
|
|
|
- bio_list_add(&io->md->pushback, io->bio);
|
|
|
+ spin_lock_irqsave(&md->pushback_lock, flags);
|
|
|
+ if (__noflush_suspending(md))
|
|
|
+ bio_list_add(&md->pushback, io->bio);
|
|
|
else
|
|
|
/* noflush suspend was interrupted. */
|
|
|
io->error = -EIO;
|
|
|
- spin_unlock_irqrestore(&io->md->pushback_lock, flags);
|
|
|
+ spin_unlock_irqrestore(&md->pushback_lock, flags);
|
|
|
}
|
|
|
|
|
|
end_io_acct(io);
|
|
|
|
|
|
- if (io->error != DM_ENDIO_REQUEUE) {
|
|
|
- trace_block_bio_complete(io->md->queue, io->bio);
|
|
|
+ io_error = io->error;
|
|
|
+ bio = io->bio;
|
|
|
|
|
|
- bio_endio(io->bio, io->error);
|
|
|
- }
|
|
|
+ free_io(md, io);
|
|
|
+
|
|
|
+ if (io_error != DM_ENDIO_REQUEUE) {
|
|
|
+ trace_block_bio_complete(md->queue, bio);
|
|
|
|
|
|
- free_io(io->md, io);
|
|
|
+ bio_endio(bio, io_error);
|
|
|
+ }
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -562,6 +568,7 @@ static void clone_endio(struct bio *bio, int error)
|
|
|
{
|
|
|
int r = 0;
|
|
|
struct dm_target_io *tio = bio->bi_private;
|
|
|
+ struct dm_io *io = tio->io;
|
|
|
struct mapped_device *md = tio->io->md;
|
|
|
dm_endio_fn endio = tio->ti->type->end_io;
|
|
|
|
|
@@ -585,15 +592,14 @@ static void clone_endio(struct bio *bio, int error)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- dec_pending(tio->io, error);
|
|
|
-
|
|
|
/*
|
|
|
* Store md for cleanup instead of tio which is about to get freed.
|
|
|
*/
|
|
|
bio->bi_private = md->bs;
|
|
|
|
|
|
- bio_put(bio);
|
|
|
free_tio(md, tio);
|
|
|
+ bio_put(bio);
|
|
|
+ dec_pending(io, error);
|
|
|
}
|
|
|
|
|
|
static sector_t max_io_len(struct mapped_device *md,
|