|
@@ -67,6 +67,7 @@ static int max_queued_requests = 1024;
|
|
|
|
|
|
static void allow_barrier(struct r10conf *conf);
|
|
|
static void lower_barrier(struct r10conf *conf);
|
|
|
+static int enough(struct r10conf *conf, int ignore);
|
|
|
|
|
|
static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
|
|
|
{
|
|
@@ -347,6 +348,19 @@ static void raid10_end_read_request(struct bio *bio, int error)
|
|
|
* wait for the 'master' bio.
|
|
|
*/
|
|
|
set_bit(R10BIO_Uptodate, &r10_bio->state);
|
|
|
+ } else {
|
|
|
+ /* If all other devices that store this block have
|
|
|
+ * failed, we want to return the error upwards rather
|
|
|
+ * than fail the last device. Here we redefine
|
|
|
+ * "uptodate" to mean "Don't want to retry"
|
|
|
+ */
|
|
|
+ unsigned long flags;
|
|
|
+ spin_lock_irqsave(&conf->device_lock, flags);
|
|
|
+ if (!enough(conf, rdev->raid_disk))
|
|
|
+ uptodate = 1;
|
|
|
+ spin_unlock_irqrestore(&conf->device_lock, flags);
|
|
|
+ }
|
|
|
+ if (uptodate) {
|
|
|
raid_end_bio_io(r10_bio);
|
|
|
rdev_dec_pending(rdev, conf->mddev);
|
|
|
} else {
|
|
@@ -2052,6 +2066,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
|
|
|
"md/raid10:%s: %s: Failing raid device\n",
|
|
|
mdname(mddev), b);
|
|
|
md_error(mddev, conf->mirrors[d].rdev);
|
|
|
+ r10_bio->devs[r10_bio->read_slot].bio = IO_BLOCKED;
|
|
|
return;
|
|
|
}
|
|
|
|
|
@@ -2105,8 +2120,11 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
|
|
|
rdev,
|
|
|
r10_bio->devs[r10_bio->read_slot].addr
|
|
|
+ sect,
|
|
|
- s, 0))
|
|
|
+ s, 0)) {
|
|
|
md_error(mddev, rdev);
|
|
|
+ r10_bio->devs[r10_bio->read_slot].bio
|
|
|
+ = IO_BLOCKED;
|
|
|
+ }
|
|
|
break;
|
|
|
}
|
|
|
|
|
@@ -2299,17 +2317,20 @@ static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
|
|
|
* This is all done synchronously while the array is
|
|
|
* frozen.
|
|
|
*/
|
|
|
+ bio = r10_bio->devs[slot].bio;
|
|
|
+ bdevname(bio->bi_bdev, b);
|
|
|
+ bio_put(bio);
|
|
|
+ r10_bio->devs[slot].bio = NULL;
|
|
|
+
|
|
|
if (mddev->ro == 0) {
|
|
|
freeze_array(conf);
|
|
|
fix_read_error(conf, mddev, r10_bio);
|
|
|
unfreeze_array(conf);
|
|
|
- }
|
|
|
+ } else
|
|
|
+ r10_bio->devs[slot].bio = IO_BLOCKED;
|
|
|
+
|
|
|
rdev_dec_pending(rdev, mddev);
|
|
|
|
|
|
- bio = r10_bio->devs[slot].bio;
|
|
|
- bdevname(bio->bi_bdev, b);
|
|
|
- r10_bio->devs[slot].bio =
|
|
|
- mddev->ro ? IO_BLOCKED : NULL;
|
|
|
read_more:
|
|
|
rdev = read_balance(conf, r10_bio, &max_sectors);
|
|
|
if (rdev == NULL) {
|
|
@@ -2318,13 +2339,10 @@ read_more:
|
|
|
mdname(mddev), b,
|
|
|
(unsigned long long)r10_bio->sector);
|
|
|
raid_end_bio_io(r10_bio);
|
|
|
- bio_put(bio);
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC);
|
|
|
- if (bio)
|
|
|
- bio_put(bio);
|
|
|
slot = r10_bio->read_slot;
|
|
|
printk_ratelimited(
|
|
|
KERN_ERR
|
|
@@ -2360,7 +2378,6 @@ read_more:
|
|
|
mbio->bi_phys_segments++;
|
|
|
spin_unlock_irq(&conf->device_lock);
|
|
|
generic_make_request(bio);
|
|
|
- bio = NULL;
|
|
|
|
|
|
r10_bio = mempool_alloc(conf->r10bio_pool,
|
|
|
GFP_NOIO);
|