|
@@ -93,11 +93,11 @@ static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh)
|
|
|
if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD)
|
|
|
md_wakeup_thread(conf->mddev->thread);
|
|
|
}
|
|
|
- list_add_tail(&sh->lru, &conf->inactive_list);
|
|
|
atomic_dec(&conf->active_stripes);
|
|
|
- if (!conf->inactive_blocked ||
|
|
|
- atomic_read(&conf->active_stripes) < (conf->max_nr_stripes*3/4))
|
|
|
+ if (!test_bit(STRIPE_EXPANDING, &sh->state)) {
|
|
|
+ list_add_tail(&sh->lru, &conf->inactive_list);
|
|
|
wake_up(&conf->wait_for_stripe);
|
|
|
+ }
|
|
|
}
|
|
|
}
|
|
|
}
|
|
@@ -273,9 +273,8 @@ static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector
|
|
|
} else {
|
|
|
if (!test_bit(STRIPE_HANDLE, &sh->state))
|
|
|
atomic_inc(&conf->active_stripes);
|
|
|
- if (list_empty(&sh->lru))
|
|
|
- BUG();
|
|
|
- list_del_init(&sh->lru);
|
|
|
+ if (!list_empty(&sh->lru))
|
|
|
+ list_del_init(&sh->lru);
|
|
|
}
|
|
|
}
|
|
|
} while (sh == NULL);
|
|
@@ -1035,6 +1034,18 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+static int stripe_to_pdidx(sector_t stripe, raid5_conf_t *conf, int disks)
|
|
|
+{
|
|
|
+ int sectors_per_chunk = conf->chunk_size >> 9;
|
|
|
+ sector_t x = stripe;
|
|
|
+ int pd_idx, dd_idx;
|
|
|
+ int chunk_offset = sector_div(x, sectors_per_chunk);
|
|
|
+ stripe = x;
|
|
|
+ raid5_compute_sector(stripe*(disks-1)*sectors_per_chunk
|
|
|
+ + chunk_offset, disks, disks-1, &dd_idx, &pd_idx, conf);
|
|
|
+ return pd_idx;
|
|
|
+}
|
|
|
+
|
|
|
|
|
|
/*
|
|
|
* handle_stripe - do things to a stripe.
|
|
@@ -1061,7 +1072,7 @@ static void handle_stripe(struct stripe_head *sh)
|
|
|
struct bio *return_bi= NULL;
|
|
|
struct bio *bi;
|
|
|
int i;
|
|
|
- int syncing;
|
|
|
+ int syncing, expanding, expanded;
|
|
|
int locked=0, uptodate=0, to_read=0, to_write=0, failed=0, written=0;
|
|
|
int non_overwrite = 0;
|
|
|
int failed_num=0;
|
|
@@ -1076,6 +1087,8 @@ static void handle_stripe(struct stripe_head *sh)
|
|
|
clear_bit(STRIPE_DELAYED, &sh->state);
|
|
|
|
|
|
syncing = test_bit(STRIPE_SYNCING, &sh->state);
|
|
|
+ expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state);
|
|
|
+ expanded = test_bit(STRIPE_EXPAND_READY, &sh->state);
|
|
|
/* Now to look around and see what can be done */
|
|
|
|
|
|
rcu_read_lock();
|
|
@@ -1268,13 +1281,14 @@ static void handle_stripe(struct stripe_head *sh)
|
|
|
* parity, or to satisfy requests
|
|
|
* or to load a block that is being partially written.
|
|
|
*/
|
|
|
- if (to_read || non_overwrite || (syncing && (uptodate < disks))) {
|
|
|
+ if (to_read || non_overwrite || (syncing && (uptodate < disks)) || expanding) {
|
|
|
for (i=disks; i--;) {
|
|
|
dev = &sh->dev[i];
|
|
|
if (!test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) &&
|
|
|
(dev->toread ||
|
|
|
(dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
|
|
|
syncing ||
|
|
|
+ expanding ||
|
|
|
(failed && (sh->dev[failed_num].toread ||
|
|
|
(sh->dev[failed_num].towrite && !test_bit(R5_OVERWRITE, &sh->dev[failed_num].flags))))
|
|
|
)
|
|
@@ -1464,13 +1478,76 @@ static void handle_stripe(struct stripe_head *sh)
|
|
|
set_bit(R5_Wantwrite, &dev->flags);
|
|
|
set_bit(R5_ReWrite, &dev->flags);
|
|
|
set_bit(R5_LOCKED, &dev->flags);
|
|
|
+ locked++;
|
|
|
} else {
|
|
|
/* let's read it back */
|
|
|
set_bit(R5_Wantread, &dev->flags);
|
|
|
set_bit(R5_LOCKED, &dev->flags);
|
|
|
+ locked++;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+ if (expanded && test_bit(STRIPE_EXPANDING, &sh->state)) {
|
|
|
+ /* Need to write out all blocks after computing parity */
|
|
|
+ sh->disks = conf->raid_disks;
|
|
|
+ sh->pd_idx = stripe_to_pdidx(sh->sector, conf, conf->raid_disks);
|
|
|
+ compute_parity(sh, RECONSTRUCT_WRITE);
|
|
|
+ for (i= conf->raid_disks; i--;) {
|
|
|
+ set_bit(R5_LOCKED, &sh->dev[i].flags);
|
|
|
+ locked++;
|
|
|
+ set_bit(R5_Wantwrite, &sh->dev[i].flags);
|
|
|
+ }
|
|
|
+ clear_bit(STRIPE_EXPANDING, &sh->state);
|
|
|
+ } else if (expanded) {
|
|
|
+ clear_bit(STRIPE_EXPAND_READY, &sh->state);
|
|
|
+ wake_up(&conf->wait_for_overlap);
|
|
|
+ md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
|
|
|
+ }
|
|
|
+
|
|
|
+ if (expanding && locked == 0) {
|
|
|
+ /* We have read all the blocks in this stripe and now we need to
|
|
|
+ * copy some of them into a target stripe for expand.
|
|
|
+ */
|
|
|
+ clear_bit(STRIPE_EXPAND_SOURCE, &sh->state);
|
|
|
+ for (i=0; i< sh->disks; i++)
|
|
|
+ if (i != sh->pd_idx) {
|
|
|
+ int dd_idx, pd_idx, j;
|
|
|
+ struct stripe_head *sh2;
|
|
|
+
|
|
|
+ sector_t bn = compute_blocknr(sh, i);
|
|
|
+ sector_t s = raid5_compute_sector(bn, conf->raid_disks,
|
|
|
+ conf->raid_disks-1,
|
|
|
+ &dd_idx, &pd_idx, conf);
|
|
|
+ sh2 = get_active_stripe(conf, s, conf->raid_disks, pd_idx, 1);
|
|
|
+ if (sh2 == NULL)
|
|
|
+ /* so far only the early blocks of this stripe
|
|
|
+ * have been requested. When later blocks
|
|
|
+ * get requested, we will try again
|
|
|
+ */
|
|
|
+ continue;
|
|
|
+ if(!test_bit(STRIPE_EXPANDING, &sh2->state) ||
|
|
|
+ test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) {
|
|
|
+ /* must have already done this block */
|
|
|
+ release_stripe(sh2);
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+ memcpy(page_address(sh2->dev[dd_idx].page),
|
|
|
+ page_address(sh->dev[i].page),
|
|
|
+ STRIPE_SIZE);
|
|
|
+ set_bit(R5_Expanded, &sh2->dev[dd_idx].flags);
|
|
|
+ set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags);
|
|
|
+ for (j=0; j<conf->raid_disks; j++)
|
|
|
+ if (j != sh2->pd_idx &&
|
|
|
+ !test_bit(R5_Expanded, &sh2->dev[j].flags))
|
|
|
+ break;
|
|
|
+ if (j == conf->raid_disks) {
|
|
|
+ set_bit(STRIPE_EXPAND_READY, &sh2->state);
|
|
|
+ set_bit(STRIPE_HANDLE, &sh2->state);
|
|
|
+ }
|
|
|
+ release_stripe(sh2);
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
spin_unlock(&sh->lock);
|
|
|
|
|
|
while ((bi=return_bi)) {
|
|
@@ -1509,7 +1586,7 @@ static void handle_stripe(struct stripe_head *sh)
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
if (rdev) {
|
|
|
- if (syncing)
|
|
|
+ if (syncing || expanding || expanded)
|
|
|
md_sync_acct(rdev->bdev, STRIPE_SECTORS);
|
|
|
|
|
|
bi->bi_bdev = rdev->bdev;
|
|
@@ -1757,12 +1834,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
|
|
|
{
|
|
|
raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
|
|
|
struct stripe_head *sh;
|
|
|
- int sectors_per_chunk = conf->chunk_size >> 9;
|
|
|
- sector_t x;
|
|
|
- unsigned long stripe;
|
|
|
- int chunk_offset;
|
|
|
- int dd_idx, pd_idx;
|
|
|
- sector_t first_sector;
|
|
|
+ int pd_idx;
|
|
|
+ sector_t first_sector, last_sector;
|
|
|
int raid_disks = conf->raid_disks;
|
|
|
int data_disks = raid_disks-1;
|
|
|
sector_t max_sector = mddev->size << 1;
|
|
@@ -1781,6 +1854,80 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
|
+
|
|
|
+ if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
|
|
|
+ /* reshaping is quite different to recovery/resync so it is
|
|
|
+ * handled quite separately ... here.
|
|
|
+ *
|
|
|
+ * On each call to sync_request, we gather one chunk worth of
|
|
|
+ * destination stripes and flag them as expanding.
|
|
|
+ * Then we find all the source stripes and request reads.
|
|
|
+ * As the reads complete, handle_stripe will copy the data
|
|
|
+ * into the destination stripe and release that stripe.
|
|
|
+ */
|
|
|
+ int i;
|
|
|
+ int dd_idx;
|
|
|
+ for (i=0; i < conf->chunk_size/512; i+= STRIPE_SECTORS) {
|
|
|
+ int j;
|
|
|
+ int skipped = 0;
|
|
|
+ pd_idx = stripe_to_pdidx(sector_nr+i, conf, conf->raid_disks);
|
|
|
+ sh = get_active_stripe(conf, sector_nr+i,
|
|
|
+ conf->raid_disks, pd_idx, 0);
|
|
|
+ set_bit(STRIPE_EXPANDING, &sh->state);
|
|
|
+ /* If any of this stripe is beyond the end of the old
|
|
|
+ * array, then we need to zero those blocks
|
|
|
+ */
|
|
|
+ for (j=sh->disks; j--;) {
|
|
|
+ sector_t s;
|
|
|
+ if (j == sh->pd_idx)
|
|
|
+ continue;
|
|
|
+ s = compute_blocknr(sh, j);
|
|
|
+ if (s < (mddev->array_size<<1)) {
|
|
|
+ skipped = 1;
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+ memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE);
|
|
|
+ set_bit(R5_Expanded, &sh->dev[j].flags);
|
|
|
+ set_bit(R5_UPTODATE, &sh->dev[j].flags);
|
|
|
+ }
|
|
|
+ if (!skipped) {
|
|
|
+ set_bit(STRIPE_EXPAND_READY, &sh->state);
|
|
|
+ set_bit(STRIPE_HANDLE, &sh->state);
|
|
|
+ }
|
|
|
+ release_stripe(sh);
|
|
|
+ }
|
|
|
+ spin_lock_irq(&conf->device_lock);
|
|
|
+ conf->expand_progress = (sector_nr + i)*(conf->raid_disks-1);
|
|
|
+ spin_unlock_irq(&conf->device_lock);
|
|
|
+ /* Ok, those stripe are ready. We can start scheduling
|
|
|
+ * reads on the source stripes.
|
|
|
+ * The source stripes are determined by mapping the first and last
|
|
|
+ * block on the destination stripes.
|
|
|
+ */
|
|
|
+ raid_disks = conf->previous_raid_disks;
|
|
|
+ data_disks = raid_disks - 1;
|
|
|
+ first_sector =
|
|
|
+ raid5_compute_sector(sector_nr*(conf->raid_disks-1),
|
|
|
+ raid_disks, data_disks,
|
|
|
+ &dd_idx, &pd_idx, conf);
|
|
|
+ last_sector =
|
|
|
+ raid5_compute_sector((sector_nr+conf->chunk_size/512)
|
|
|
+ *(conf->raid_disks-1) -1,
|
|
|
+ raid_disks, data_disks,
|
|
|
+ &dd_idx, &pd_idx, conf);
|
|
|
+ if (last_sector >= (mddev->size<<1))
|
|
|
+ last_sector = (mddev->size<<1)-1;
|
|
|
+ while (first_sector <= last_sector) {
|
|
|
+ pd_idx = stripe_to_pdidx(first_sector, conf, conf->previous_raid_disks);
|
|
|
+ sh = get_active_stripe(conf, first_sector,
|
|
|
+ conf->previous_raid_disks, pd_idx, 0);
|
|
|
+ set_bit(STRIPE_EXPAND_SOURCE, &sh->state);
|
|
|
+ set_bit(STRIPE_HANDLE, &sh->state);
|
|
|
+ release_stripe(sh);
|
|
|
+ first_sector += STRIPE_SECTORS;
|
|
|
+ }
|
|
|
+ return conf->chunk_size>>9;
|
|
|
+ }
|
|
|
/* if there is 1 or more failed drives and we are trying
|
|
|
* to resync, then assert that we are finished, because there is
|
|
|
* nothing we can do.
|
|
@@ -1799,13 +1946,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
|
|
|
return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */
|
|
|
}
|
|
|
|
|
|
- x = sector_nr;
|
|
|
- chunk_offset = sector_div(x, sectors_per_chunk);
|
|
|
- stripe = x;
|
|
|
- BUG_ON(x != stripe);
|
|
|
-
|
|
|
- first_sector = raid5_compute_sector((sector_t)stripe*data_disks*sectors_per_chunk
|
|
|
- + chunk_offset, raid_disks, data_disks, &dd_idx, &pd_idx, conf);
|
|
|
+ pd_idx = stripe_to_pdidx(sector_nr, conf, raid_disks);
|
|
|
sh = get_active_stripe(conf, sector_nr, raid_disks, pd_idx, 1);
|
|
|
if (sh == NULL) {
|
|
|
sh = get_active_stripe(conf, sector_nr, raid_disks, pd_idx, 0);
|