|
@@ -1334,18 +1334,21 @@ retry_write:
|
|
|
blocked_rdev = rrdev;
|
|
|
break;
|
|
|
}
|
|
|
+ if (rdev && (test_bit(Faulty, &rdev->flags)
|
|
|
+ || test_bit(Unmerged, &rdev->flags)))
|
|
|
+ rdev = NULL;
|
|
|
if (rrdev && (test_bit(Faulty, &rrdev->flags)
|
|
|
|| test_bit(Unmerged, &rrdev->flags)))
|
|
|
rrdev = NULL;
|
|
|
|
|
|
r10_bio->devs[i].bio = NULL;
|
|
|
r10_bio->devs[i].repl_bio = NULL;
|
|
|
- if (!rdev || test_bit(Faulty, &rdev->flags) ||
|
|
|
- test_bit(Unmerged, &rdev->flags)) {
|
|
|
+
|
|
|
+ if (!rdev && !rrdev) {
|
|
|
set_bit(R10BIO_Degraded, &r10_bio->state);
|
|
|
continue;
|
|
|
}
|
|
|
- if (test_bit(WriteErrorSeen, &rdev->flags)) {
|
|
|
+ if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) {
|
|
|
sector_t first_bad;
|
|
|
sector_t dev_sector = r10_bio->devs[i].addr;
|
|
|
int bad_sectors;
|
|
@@ -1387,8 +1390,10 @@ retry_write:
|
|
|
max_sectors = good_sectors;
|
|
|
}
|
|
|
}
|
|
|
- r10_bio->devs[i].bio = bio;
|
|
|
- atomic_inc(&rdev->nr_pending);
|
|
|
+ if (rdev) {
|
|
|
+ r10_bio->devs[i].bio = bio;
|
|
|
+ atomic_inc(&rdev->nr_pending);
|
|
|
+ }
|
|
|
if (rrdev) {
|
|
|
r10_bio->devs[i].repl_bio = bio;
|
|
|
atomic_inc(&rrdev->nr_pending);
|
|
@@ -1444,69 +1449,71 @@ retry_write:
|
|
|
for (i = 0; i < conf->copies; i++) {
|
|
|
struct bio *mbio;
|
|
|
int d = r10_bio->devs[i].devnum;
|
|
|
- if (!r10_bio->devs[i].bio)
|
|
|
- continue;
|
|
|
+ if (r10_bio->devs[i].bio) {
|
|
|
+ struct md_rdev *rdev = conf->mirrors[d].rdev;
|
|
|
+ mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
|
|
|
+ md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
|
|
|
+ max_sectors);
|
|
|
+ r10_bio->devs[i].bio = mbio;
|
|
|
+
|
|
|
+ mbio->bi_sector = (r10_bio->devs[i].addr+
|
|
|
+ choose_data_offset(r10_bio,
|
|
|
+ rdev));
|
|
|
+ mbio->bi_bdev = rdev->bdev;
|
|
|
+ mbio->bi_end_io = raid10_end_write_request;
|
|
|
+ mbio->bi_rw = WRITE | do_sync | do_fua | do_discard;
|
|
|
+ mbio->bi_private = r10_bio;
|
|
|
|
|
|
- mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
|
|
|
- md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
|
|
|
- max_sectors);
|
|
|
- r10_bio->devs[i].bio = mbio;
|
|
|
+ atomic_inc(&r10_bio->remaining);
|
|
|
|
|
|
- mbio->bi_sector = (r10_bio->devs[i].addr+
|
|
|
- choose_data_offset(r10_bio,
|
|
|
- conf->mirrors[d].rdev));
|
|
|
- mbio->bi_bdev = conf->mirrors[d].rdev->bdev;
|
|
|
- mbio->bi_end_io = raid10_end_write_request;
|
|
|
- mbio->bi_rw = WRITE | do_sync | do_fua | do_discard;
|
|
|
- mbio->bi_private = r10_bio;
|
|
|
+ cb = blk_check_plugged(raid10_unplug, mddev,
|
|
|
+ sizeof(*plug));
|
|
|
+ if (cb)
|
|
|
+ plug = container_of(cb, struct raid10_plug_cb,
|
|
|
+ cb);
|
|
|
+ else
|
|
|
+ plug = NULL;
|
|
|
+ spin_lock_irqsave(&conf->device_lock, flags);
|
|
|
+ if (plug) {
|
|
|
+ bio_list_add(&plug->pending, mbio);
|
|
|
+ plug->pending_cnt++;
|
|
|
+ } else {
|
|
|
+ bio_list_add(&conf->pending_bio_list, mbio);
|
|
|
+ conf->pending_count++;
|
|
|
+ }
|
|
|
+ spin_unlock_irqrestore(&conf->device_lock, flags);
|
|
|
+ if (!plug)
|
|
|
+ md_wakeup_thread(mddev->thread);
|
|
|
+ }
|
|
|
|
|
|
- atomic_inc(&r10_bio->remaining);
|
|
|
+ if (r10_bio->devs[i].repl_bio) {
|
|
|
+ struct md_rdev *rdev = conf->mirrors[d].replacement;
|
|
|
+ if (rdev == NULL) {
|
|
|
+ /* Replacement just got moved to main 'rdev' */
|
|
|
+ smp_mb();
|
|
|
+ rdev = conf->mirrors[d].rdev;
|
|
|
+ }
|
|
|
+ mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
|
|
|
+ md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
|
|
|
+ max_sectors);
|
|
|
+ r10_bio->devs[i].repl_bio = mbio;
|
|
|
+
|
|
|
+ mbio->bi_sector = (r10_bio->devs[i].addr +
|
|
|
+ choose_data_offset(
|
|
|
+ r10_bio, rdev));
|
|
|
+ mbio->bi_bdev = rdev->bdev;
|
|
|
+ mbio->bi_end_io = raid10_end_write_request;
|
|
|
+ mbio->bi_rw = WRITE | do_sync | do_fua | do_discard;
|
|
|
+ mbio->bi_private = r10_bio;
|
|
|
|
|
|
- cb = blk_check_plugged(raid10_unplug, mddev, sizeof(*plug));
|
|
|
- if (cb)
|
|
|
- plug = container_of(cb, struct raid10_plug_cb, cb);
|
|
|
- else
|
|
|
- plug = NULL;
|
|
|
- spin_lock_irqsave(&conf->device_lock, flags);
|
|
|
- if (plug) {
|
|
|
- bio_list_add(&plug->pending, mbio);
|
|
|
- plug->pending_cnt++;
|
|
|
- } else {
|
|
|
+ atomic_inc(&r10_bio->remaining);
|
|
|
+ spin_lock_irqsave(&conf->device_lock, flags);
|
|
|
bio_list_add(&conf->pending_bio_list, mbio);
|
|
|
conf->pending_count++;
|
|
|
+ spin_unlock_irqrestore(&conf->device_lock, flags);
|
|
|
+ if (!mddev_check_plugged(mddev))
|
|
|
+ md_wakeup_thread(mddev->thread);
|
|
|
}
|
|
|
- spin_unlock_irqrestore(&conf->device_lock, flags);
|
|
|
- if (!plug)
|
|
|
- md_wakeup_thread(mddev->thread);
|
|
|
-
|
|
|
- if (!r10_bio->devs[i].repl_bio)
|
|
|
- continue;
|
|
|
-
|
|
|
- mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
|
|
|
- md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
|
|
|
- max_sectors);
|
|
|
- r10_bio->devs[i].repl_bio = mbio;
|
|
|
-
|
|
|
- /* We are actively writing to the original device
|
|
|
- * so it cannot disappear, so the replacement cannot
|
|
|
- * become NULL here
|
|
|
- */
|
|
|
- mbio->bi_sector = (r10_bio->devs[i].addr +
|
|
|
- choose_data_offset(
|
|
|
- r10_bio,
|
|
|
- conf->mirrors[d].replacement));
|
|
|
- mbio->bi_bdev = conf->mirrors[d].replacement->bdev;
|
|
|
- mbio->bi_end_io = raid10_end_write_request;
|
|
|
- mbio->bi_rw = WRITE | do_sync | do_fua | do_discard;
|
|
|
- mbio->bi_private = r10_bio;
|
|
|
-
|
|
|
- atomic_inc(&r10_bio->remaining);
|
|
|
- spin_lock_irqsave(&conf->device_lock, flags);
|
|
|
- bio_list_add(&conf->pending_bio_list, mbio);
|
|
|
- conf->pending_count++;
|
|
|
- spin_unlock_irqrestore(&conf->device_lock, flags);
|
|
|
- if (!mddev_check_plugged(mddev))
|
|
|
- md_wakeup_thread(mddev->thread);
|
|
|
}
|
|
|
|
|
|
/* Don't remove the bias on 'remaining' (one_write_done) until
|