|
@@ -47,6 +47,7 @@ struct dm_io {
|
|
|
atomic_t io_count;
|
|
|
struct bio *bio;
|
|
|
unsigned long start_time;
|
|
|
+ spinlock_t endio_lock;
|
|
|
};
|
|
|
|
|
|
/*
|
|
@@ -578,8 +579,12 @@ static void dec_pending(struct dm_io *io, int error)
|
|
|
struct mapped_device *md = io->md;
|
|
|
|
|
|
/* Push-back supersedes any I/O errors */
|
|
|
- if (error && !(io->error > 0 && __noflush_suspending(md)))
|
|
|
- io->error = error;
|
|
|
+ if (unlikely(error)) {
|
|
|
+ spin_lock_irqsave(&io->endio_lock, flags);
|
|
|
+ if (!(io->error > 0 && __noflush_suspending(md)))
|
|
|
+ io->error = error;
|
|
|
+ spin_unlock_irqrestore(&io->endio_lock, flags);
|
|
|
+ }
|
|
|
|
|
|
if (atomic_dec_and_test(&io->io_count)) {
|
|
|
if (io->error == DM_ENDIO_REQUEUE) {
|
|
@@ -1226,6 +1231,7 @@ static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
|
|
|
atomic_set(&ci.io->io_count, 1);
|
|
|
ci.io->bio = bio;
|
|
|
ci.io->md = md;
|
|
|
+ spin_lock_init(&ci.io->endio_lock);
|
|
|
ci.sector = bio->bi_sector;
|
|
|
ci.sector_count = bio_sectors(bio);
|
|
|
if (unlikely(bio_empty_barrier(bio)))
|