|
@@ -226,12 +226,12 @@ static int md_make_request(struct request_queue *q, struct bio *bio)
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
rcu_read_lock();
|
|
rcu_read_lock();
|
|
- if (mddev->suspended || mddev->barrier) {
|
|
|
|
|
|
+ if (mddev->suspended) {
|
|
DEFINE_WAIT(__wait);
|
|
DEFINE_WAIT(__wait);
|
|
for (;;) {
|
|
for (;;) {
|
|
prepare_to_wait(&mddev->sb_wait, &__wait,
|
|
prepare_to_wait(&mddev->sb_wait, &__wait,
|
|
TASK_UNINTERRUPTIBLE);
|
|
TASK_UNINTERRUPTIBLE);
|
|
- if (!mddev->suspended && !mddev->barrier)
|
|
|
|
|
|
+ if (!mddev->suspended)
|
|
break;
|
|
break;
|
|
rcu_read_unlock();
|
|
rcu_read_unlock();
|
|
schedule();
|
|
schedule();
|
|
@@ -282,40 +282,29 @@ EXPORT_SYMBOL_GPL(mddev_resume);
|
|
|
|
|
|
int mddev_congested(mddev_t *mddev, int bits)
|
|
int mddev_congested(mddev_t *mddev, int bits)
|
|
{
|
|
{
|
|
- if (mddev->barrier)
|
|
|
|
- return 1;
|
|
|
|
return mddev->suspended;
|
|
return mddev->suspended;
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(mddev_congested);
|
|
EXPORT_SYMBOL(mddev_congested);
|
|
|
|
|
|
/*
|
|
/*
|
|
- * Generic barrier handling for md
|
|
|
|
|
|
+ * Generic flush handling for md
|
|
*/
|
|
*/
|
|
|
|
|
|
-#define POST_REQUEST_BARRIER ((void*)1)
|
|
|
|
-
|
|
|
|
-static void md_end_barrier(struct bio *bio, int err)
|
|
|
|
|
|
+static void md_end_flush(struct bio *bio, int err)
|
|
{
|
|
{
|
|
mdk_rdev_t *rdev = bio->bi_private;
|
|
mdk_rdev_t *rdev = bio->bi_private;
|
|
mddev_t *mddev = rdev->mddev;
|
|
mddev_t *mddev = rdev->mddev;
|
|
- if (err == -EOPNOTSUPP && mddev->barrier != POST_REQUEST_BARRIER)
|
|
|
|
- set_bit(BIO_EOPNOTSUPP, &mddev->barrier->bi_flags);
|
|
|
|
|
|
|
|
rdev_dec_pending(rdev, mddev);
|
|
rdev_dec_pending(rdev, mddev);
|
|
|
|
|
|
if (atomic_dec_and_test(&mddev->flush_pending)) {
|
|
if (atomic_dec_and_test(&mddev->flush_pending)) {
|
|
- if (mddev->barrier == POST_REQUEST_BARRIER) {
|
|
|
|
- /* This was a post-request barrier */
|
|
|
|
- mddev->barrier = NULL;
|
|
|
|
- wake_up(&mddev->sb_wait);
|
|
|
|
- } else
|
|
|
|
- /* The pre-request barrier has finished */
|
|
|
|
- schedule_work(&mddev->barrier_work);
|
|
|
|
|
|
+ /* The pre-request flush has finished */
|
|
|
|
+ schedule_work(&mddev->flush_work);
|
|
}
|
|
}
|
|
bio_put(bio);
|
|
bio_put(bio);
|
|
}
|
|
}
|
|
|
|
|
|
-static void submit_barriers(mddev_t *mddev)
|
|
|
|
|
|
+static void submit_flushes(mddev_t *mddev)
|
|
{
|
|
{
|
|
mdk_rdev_t *rdev;
|
|
mdk_rdev_t *rdev;
|
|
|
|
|
|
@@ -332,60 +321,56 @@ static void submit_barriers(mddev_t *mddev)
|
|
atomic_inc(&rdev->nr_pending);
|
|
atomic_inc(&rdev->nr_pending);
|
|
rcu_read_unlock();
|
|
rcu_read_unlock();
|
|
bi = bio_alloc(GFP_KERNEL, 0);
|
|
bi = bio_alloc(GFP_KERNEL, 0);
|
|
- bi->bi_end_io = md_end_barrier;
|
|
|
|
|
|
+ bi->bi_end_io = md_end_flush;
|
|
bi->bi_private = rdev;
|
|
bi->bi_private = rdev;
|
|
bi->bi_bdev = rdev->bdev;
|
|
bi->bi_bdev = rdev->bdev;
|
|
atomic_inc(&mddev->flush_pending);
|
|
atomic_inc(&mddev->flush_pending);
|
|
- submit_bio(WRITE_BARRIER, bi);
|
|
|
|
|
|
+ submit_bio(WRITE_FLUSH, bi);
|
|
rcu_read_lock();
|
|
rcu_read_lock();
|
|
rdev_dec_pending(rdev, mddev);
|
|
rdev_dec_pending(rdev, mddev);
|
|
}
|
|
}
|
|
rcu_read_unlock();
|
|
rcu_read_unlock();
|
|
}
|
|
}
|
|
|
|
|
|
-static void md_submit_barrier(struct work_struct *ws)
|
|
|
|
|
|
+static void md_submit_flush_data(struct work_struct *ws)
|
|
{
|
|
{
|
|
- mddev_t *mddev = container_of(ws, mddev_t, barrier_work);
|
|
|
|
- struct bio *bio = mddev->barrier;
|
|
|
|
|
|
+ mddev_t *mddev = container_of(ws, mddev_t, flush_work);
|
|
|
|
+ struct bio *bio = mddev->flush_bio;
|
|
|
|
|
|
atomic_set(&mddev->flush_pending, 1);
|
|
atomic_set(&mddev->flush_pending, 1);
|
|
|
|
|
|
- if (test_bit(BIO_EOPNOTSUPP, &bio->bi_flags))
|
|
|
|
- bio_endio(bio, -EOPNOTSUPP);
|
|
|
|
- else if (bio->bi_size == 0)
|
|
|
|
|
|
+ if (bio->bi_size == 0)
|
|
/* an empty barrier - all done */
|
|
/* an empty barrier - all done */
|
|
bio_endio(bio, 0);
|
|
bio_endio(bio, 0);
|
|
else {
|
|
else {
|
|
- bio->bi_rw &= ~REQ_HARDBARRIER;
|
|
|
|
|
|
+ bio->bi_rw &= ~REQ_FLUSH;
|
|
if (mddev->pers->make_request(mddev, bio))
|
|
if (mddev->pers->make_request(mddev, bio))
|
|
generic_make_request(bio);
|
|
generic_make_request(bio);
|
|
- mddev->barrier = POST_REQUEST_BARRIER;
|
|
|
|
- submit_barriers(mddev);
|
|
|
|
}
|
|
}
|
|
if (atomic_dec_and_test(&mddev->flush_pending)) {
|
|
if (atomic_dec_and_test(&mddev->flush_pending)) {
|
|
- mddev->barrier = NULL;
|
|
|
|
|
|
+ mddev->flush_bio = NULL;
|
|
wake_up(&mddev->sb_wait);
|
|
wake_up(&mddev->sb_wait);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
-void md_barrier_request(mddev_t *mddev, struct bio *bio)
|
|
|
|
|
|
+void md_flush_request(mddev_t *mddev, struct bio *bio)
|
|
{
|
|
{
|
|
spin_lock_irq(&mddev->write_lock);
|
|
spin_lock_irq(&mddev->write_lock);
|
|
wait_event_lock_irq(mddev->sb_wait,
|
|
wait_event_lock_irq(mddev->sb_wait,
|
|
- !mddev->barrier,
|
|
|
|
|
|
+ !mddev->flush_bio,
|
|
mddev->write_lock, /*nothing*/);
|
|
mddev->write_lock, /*nothing*/);
|
|
- mddev->barrier = bio;
|
|
|
|
|
|
+ mddev->flush_bio = bio;
|
|
spin_unlock_irq(&mddev->write_lock);
|
|
spin_unlock_irq(&mddev->write_lock);
|
|
|
|
|
|
atomic_set(&mddev->flush_pending, 1);
|
|
atomic_set(&mddev->flush_pending, 1);
|
|
- INIT_WORK(&mddev->barrier_work, md_submit_barrier);
|
|
|
|
|
|
+ INIT_WORK(&mddev->flush_work, md_submit_flush_data);
|
|
|
|
|
|
- submit_barriers(mddev);
|
|
|
|
|
|
+ submit_flushes(mddev);
|
|
|
|
|
|
if (atomic_dec_and_test(&mddev->flush_pending))
|
|
if (atomic_dec_and_test(&mddev->flush_pending))
|
|
- schedule_work(&mddev->barrier_work);
|
|
|
|
|
|
+ schedule_work(&mddev->flush_work);
|
|
}
|
|
}
|
|
-EXPORT_SYMBOL(md_barrier_request);
|
|
|
|
|
|
+EXPORT_SYMBOL(md_flush_request);
|
|
|
|
|
|
/* Support for plugging.
|
|
/* Support for plugging.
|
|
* This mirrors the plugging support in request_queue, but does not
|
|
* This mirrors the plugging support in request_queue, but does not
|
|
@@ -696,31 +681,6 @@ static void super_written(struct bio *bio, int error)
|
|
bio_put(bio);
|
|
bio_put(bio);
|
|
}
|
|
}
|
|
|
|
|
|
-static void super_written_barrier(struct bio *bio, int error)
|
|
|
|
-{
|
|
|
|
- struct bio *bio2 = bio->bi_private;
|
|
|
|
- mdk_rdev_t *rdev = bio2->bi_private;
|
|
|
|
- mddev_t *mddev = rdev->mddev;
|
|
|
|
-
|
|
|
|
- if (!test_bit(BIO_UPTODATE, &bio->bi_flags) &&
|
|
|
|
- error == -EOPNOTSUPP) {
|
|
|
|
- unsigned long flags;
|
|
|
|
- /* barriers don't appear to be supported :-( */
|
|
|
|
- set_bit(BarriersNotsupp, &rdev->flags);
|
|
|
|
- mddev->barriers_work = 0;
|
|
|
|
- spin_lock_irqsave(&mddev->write_lock, flags);
|
|
|
|
- bio2->bi_next = mddev->biolist;
|
|
|
|
- mddev->biolist = bio2;
|
|
|
|
- spin_unlock_irqrestore(&mddev->write_lock, flags);
|
|
|
|
- wake_up(&mddev->sb_wait);
|
|
|
|
- bio_put(bio);
|
|
|
|
- } else {
|
|
|
|
- bio_put(bio2);
|
|
|
|
- bio->bi_private = rdev;
|
|
|
|
- super_written(bio, error);
|
|
|
|
- }
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
|
|
void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
|
|
sector_t sector, int size, struct page *page)
|
|
sector_t sector, int size, struct page *page)
|
|
{
|
|
{
|
|
@@ -729,51 +689,28 @@ void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
|
|
* and decrement it on completion, waking up sb_wait
|
|
* and decrement it on completion, waking up sb_wait
|
|
* if zero is reached.
|
|
* if zero is reached.
|
|
* If an error occurred, call md_error
|
|
* If an error occurred, call md_error
|
|
- *
|
|
|
|
- * As we might need to resubmit the request if REQ_HARDBARRIER
|
|
|
|
- * causes ENOTSUPP, we allocate a spare bio...
|
|
|
|
*/
|
|
*/
|
|
struct bio *bio = bio_alloc(GFP_NOIO, 1);
|
|
struct bio *bio = bio_alloc(GFP_NOIO, 1);
|
|
- int rw = REQ_WRITE | REQ_SYNC | REQ_UNPLUG;
|
|
|
|
|
|
|
|
bio->bi_bdev = rdev->bdev;
|
|
bio->bi_bdev = rdev->bdev;
|
|
bio->bi_sector = sector;
|
|
bio->bi_sector = sector;
|
|
bio_add_page(bio, page, size, 0);
|
|
bio_add_page(bio, page, size, 0);
|
|
bio->bi_private = rdev;
|
|
bio->bi_private = rdev;
|
|
bio->bi_end_io = super_written;
|
|
bio->bi_end_io = super_written;
|
|
- bio->bi_rw = rw;
|
|
|
|
|
|
|
|
atomic_inc(&mddev->pending_writes);
|
|
atomic_inc(&mddev->pending_writes);
|
|
- if (!test_bit(BarriersNotsupp, &rdev->flags)) {
|
|
|
|
- struct bio *rbio;
|
|
|
|
- rw |= REQ_HARDBARRIER;
|
|
|
|
- rbio = bio_clone(bio, GFP_NOIO);
|
|
|
|
- rbio->bi_private = bio;
|
|
|
|
- rbio->bi_end_io = super_written_barrier;
|
|
|
|
- submit_bio(rw, rbio);
|
|
|
|
- } else
|
|
|
|
- submit_bio(rw, bio);
|
|
|
|
|
|
+ submit_bio(REQ_WRITE | REQ_SYNC | REQ_UNPLUG | REQ_FLUSH | REQ_FUA,
|
|
|
|
+ bio);
|
|
}
|
|
}
|
|
|
|
|
|
void md_super_wait(mddev_t *mddev)
|
|
void md_super_wait(mddev_t *mddev)
|
|
{
|
|
{
|
|
- /* wait for all superblock writes that were scheduled to complete.
|
|
|
|
- * if any had to be retried (due to BARRIER problems), retry them
|
|
|
|
- */
|
|
|
|
|
|
+ /* wait for all superblock writes that were scheduled to complete */
|
|
DEFINE_WAIT(wq);
|
|
DEFINE_WAIT(wq);
|
|
for(;;) {
|
|
for(;;) {
|
|
prepare_to_wait(&mddev->sb_wait, &wq, TASK_UNINTERRUPTIBLE);
|
|
prepare_to_wait(&mddev->sb_wait, &wq, TASK_UNINTERRUPTIBLE);
|
|
if (atomic_read(&mddev->pending_writes)==0)
|
|
if (atomic_read(&mddev->pending_writes)==0)
|
|
break;
|
|
break;
|
|
- while (mddev->biolist) {
|
|
|
|
- struct bio *bio;
|
|
|
|
- spin_lock_irq(&mddev->write_lock);
|
|
|
|
- bio = mddev->biolist;
|
|
|
|
- mddev->biolist = bio->bi_next ;
|
|
|
|
- bio->bi_next = NULL;
|
|
|
|
- spin_unlock_irq(&mddev->write_lock);
|
|
|
|
- submit_bio(bio->bi_rw, bio);
|
|
|
|
- }
|
|
|
|
schedule();
|
|
schedule();
|
|
}
|
|
}
|
|
finish_wait(&mddev->sb_wait, &wq);
|
|
finish_wait(&mddev->sb_wait, &wq);
|
|
@@ -1070,7 +1007,6 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
|
|
clear_bit(Faulty, &rdev->flags);
|
|
clear_bit(Faulty, &rdev->flags);
|
|
clear_bit(In_sync, &rdev->flags);
|
|
clear_bit(In_sync, &rdev->flags);
|
|
clear_bit(WriteMostly, &rdev->flags);
|
|
clear_bit(WriteMostly, &rdev->flags);
|
|
- clear_bit(BarriersNotsupp, &rdev->flags);
|
|
|
|
|
|
|
|
if (mddev->raid_disks == 0) {
|
|
if (mddev->raid_disks == 0) {
|
|
mddev->major_version = 0;
|
|
mddev->major_version = 0;
|
|
@@ -1485,7 +1421,6 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
|
|
clear_bit(Faulty, &rdev->flags);
|
|
clear_bit(Faulty, &rdev->flags);
|
|
clear_bit(In_sync, &rdev->flags);
|
|
clear_bit(In_sync, &rdev->flags);
|
|
clear_bit(WriteMostly, &rdev->flags);
|
|
clear_bit(WriteMostly, &rdev->flags);
|
|
- clear_bit(BarriersNotsupp, &rdev->flags);
|
|
|
|
|
|
|
|
if (mddev->raid_disks == 0) {
|
|
if (mddev->raid_disks == 0) {
|
|
mddev->major_version = 1;
|
|
mddev->major_version = 1;
|
|
@@ -4506,7 +4441,6 @@ int md_run(mddev_t *mddev)
|
|
/* may be over-ridden by personality */
|
|
/* may be over-ridden by personality */
|
|
mddev->resync_max_sectors = mddev->dev_sectors;
|
|
mddev->resync_max_sectors = mddev->dev_sectors;
|
|
|
|
|
|
- mddev->barriers_work = 1;
|
|
|
|
mddev->ok_start_degraded = start_dirty_degraded;
|
|
mddev->ok_start_degraded = start_dirty_degraded;
|
|
|
|
|
|
if (start_readonly && mddev->ro == 0)
|
|
if (start_readonly && mddev->ro == 0)
|
|
@@ -4685,7 +4619,6 @@ static void md_clean(mddev_t *mddev)
|
|
mddev->recovery = 0;
|
|
mddev->recovery = 0;
|
|
mddev->in_sync = 0;
|
|
mddev->in_sync = 0;
|
|
mddev->degraded = 0;
|
|
mddev->degraded = 0;
|
|
- mddev->barriers_work = 0;
|
|
|
|
mddev->safemode = 0;
|
|
mddev->safemode = 0;
|
|
mddev->bitmap_info.offset = 0;
|
|
mddev->bitmap_info.offset = 0;
|
|
mddev->bitmap_info.default_offset = 0;
|
|
mddev->bitmap_info.default_offset = 0;
|