|
@@ -825,11 +825,29 @@ static int make_request(mddev_t *mddev, struct bio * bio)
|
|
*/
|
|
*/
|
|
bp = bio_split(bio,
|
|
bp = bio_split(bio,
|
|
chunk_sects - (bio->bi_sector & (chunk_sects - 1)) );
|
|
chunk_sects - (bio->bi_sector & (chunk_sects - 1)) );
|
|
|
|
+
|
|
|
|
+ /* Each of these 'make_request' calls will call 'wait_barrier'.
|
|
|
|
+ * If the first succeeds but the second blocks due to the resync
|
|
|
|
+ * thread raising the barrier, we will deadlock because the
|
|
|
|
+ * IO to the underlying device will be queued in generic_make_request
|
|
|
|
+ * and will never complete, so will never reduce nr_pending.
|
|
|
|
+ * So increment nr_waiting here so no new raise_barriers will
|
|
|
|
+ * succeed, and so the second wait_barrier cannot block.
|
|
|
|
+ */
|
|
|
|
+ spin_lock_irq(&conf->resync_lock);
|
|
|
|
+ conf->nr_waiting++;
|
|
|
|
+ spin_unlock_irq(&conf->resync_lock);
|
|
|
|
+
|
|
if (make_request(mddev, &bp->bio1))
|
|
if (make_request(mddev, &bp->bio1))
|
|
generic_make_request(&bp->bio1);
|
|
generic_make_request(&bp->bio1);
|
|
if (make_request(mddev, &bp->bio2))
|
|
if (make_request(mddev, &bp->bio2))
|
|
generic_make_request(&bp->bio2);
|
|
generic_make_request(&bp->bio2);
|
|
|
|
|
|
|
|
+ spin_lock_irq(&conf->resync_lock);
|
|
|
|
+ conf->nr_waiting--;
|
|
|
|
+ wake_up(&conf->wait_barrier);
|
|
|
|
+ spin_unlock_irq(&conf->resync_lock);
|
|
|
|
+
|
|
bio_pair_release(bp);
|
|
bio_pair_release(bp);
|
|
return 0;
|
|
return 0;
|
|
bad_map:
|
|
bad_map:
|