|
@@ -63,6 +63,11 @@ struct nilfs_segment_buffer *nilfs_segbuf_new(struct super_block *sb)
|
|
|
INIT_LIST_HEAD(&segbuf->sb_list);
|
|
|
INIT_LIST_HEAD(&segbuf->sb_segsum_buffers);
|
|
|
INIT_LIST_HEAD(&segbuf->sb_payload_buffers);
|
|
|
+
|
|
|
+ init_completion(&segbuf->sb_bio_event);
|
|
|
+ atomic_set(&segbuf->sb_err, 0);
|
|
|
+ segbuf->sb_nbio = 0;
|
|
|
+
|
|
|
return segbuf;
|
|
|
}
|
|
|
|
|
@@ -132,8 +137,6 @@ int nilfs_segbuf_reset(struct nilfs_segment_buffer *segbuf, unsigned flags,
|
|
|
segbuf->sb_sum.sumbytes = sizeof(struct nilfs_segment_summary);
|
|
|
segbuf->sb_sum.nfinfo = segbuf->sb_sum.nfileblk = 0;
|
|
|
segbuf->sb_sum.ctime = ctime;
|
|
|
-
|
|
|
- segbuf->sb_io_error = 0;
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -247,7 +250,7 @@ void nilfs_release_buffers(struct list_head *list)
|
|
|
static void nilfs_end_bio_write(struct bio *bio, int err)
|
|
|
{
|
|
|
const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
|
|
|
- struct nilfs_write_info *wi = bio->bi_private;
|
|
|
+ struct nilfs_segment_buffer *segbuf = bio->bi_private;
|
|
|
|
|
|
if (err == -EOPNOTSUPP) {
|
|
|
set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
|
|
@@ -256,21 +259,22 @@ static void nilfs_end_bio_write(struct bio *bio, int err)
|
|
|
}
|
|
|
|
|
|
if (!uptodate)
|
|
|
- atomic_inc(&wi->err);
|
|
|
+ atomic_inc(&segbuf->sb_err);
|
|
|
|
|
|
bio_put(bio);
|
|
|
- complete(&wi->bio_event);
|
|
|
+ complete(&segbuf->sb_bio_event);
|
|
|
}
|
|
|
|
|
|
-static int nilfs_submit_seg_bio(struct nilfs_write_info *wi, int mode)
|
|
|
+static int nilfs_segbuf_submit_bio(struct nilfs_segment_buffer *segbuf,
|
|
|
+ struct nilfs_write_info *wi, int mode)
|
|
|
{
|
|
|
struct bio *bio = wi->bio;
|
|
|
int err;
|
|
|
|
|
|
- if (wi->nbio > 0 && bdi_write_congested(wi->bdi)) {
|
|
|
- wait_for_completion(&wi->bio_event);
|
|
|
- wi->nbio--;
|
|
|
- if (unlikely(atomic_read(&wi->err))) {
|
|
|
+ if (segbuf->sb_nbio > 0 && bdi_write_congested(wi->bdi)) {
|
|
|
+ wait_for_completion(&segbuf->sb_bio_event);
|
|
|
+ segbuf->sb_nbio--;
|
|
|
+ if (unlikely(atomic_read(&segbuf->sb_err))) {
|
|
|
bio_put(bio);
|
|
|
err = -EIO;
|
|
|
goto failed;
|
|
@@ -278,7 +282,7 @@ static int nilfs_submit_seg_bio(struct nilfs_write_info *wi, int mode)
|
|
|
}
|
|
|
|
|
|
bio->bi_end_io = nilfs_end_bio_write;
|
|
|
- bio->bi_private = wi;
|
|
|
+ bio->bi_private = segbuf;
|
|
|
bio_get(bio);
|
|
|
submit_bio(mode, bio);
|
|
|
if (bio_flagged(bio, BIO_EOPNOTSUPP)) {
|
|
@@ -286,7 +290,7 @@ static int nilfs_submit_seg_bio(struct nilfs_write_info *wi, int mode)
|
|
|
err = -EOPNOTSUPP;
|
|
|
goto failed;
|
|
|
}
|
|
|
- wi->nbio++;
|
|
|
+ segbuf->sb_nbio++;
|
|
|
bio_put(bio);
|
|
|
|
|
|
wi->bio = NULL;
|
|
@@ -336,15 +340,12 @@ void nilfs_segbuf_prepare_write(struct nilfs_segment_buffer *segbuf,
|
|
|
wi->max_pages = bio_get_nr_vecs(wi->sb->s_bdev);
|
|
|
wi->nr_vecs = min(wi->max_pages, wi->rest_blocks);
|
|
|
wi->start = wi->end = 0;
|
|
|
- wi->nbio = 0;
|
|
|
wi->blocknr = segbuf->sb_pseg_start;
|
|
|
-
|
|
|
- atomic_set(&wi->err, 0);
|
|
|
- init_completion(&wi->bio_event);
|
|
|
}
|
|
|
|
|
|
-static int nilfs_submit_bh(struct nilfs_write_info *wi, struct buffer_head *bh,
|
|
|
- int mode)
|
|
|
+static int nilfs_segbuf_submit_bh(struct nilfs_segment_buffer *segbuf,
|
|
|
+ struct nilfs_write_info *wi,
|
|
|
+ struct buffer_head *bh, int mode)
|
|
|
{
|
|
|
int len, err;
|
|
|
|
|
@@ -363,7 +364,7 @@ static int nilfs_submit_bh(struct nilfs_write_info *wi, struct buffer_head *bh,
|
|
|
return 0;
|
|
|
}
|
|
|
/* bio is FULL */
|
|
|
- err = nilfs_submit_seg_bio(wi, mode);
|
|
|
+ err = nilfs_segbuf_submit_bio(segbuf, wi, mode);
|
|
|
/* never submit current bh */
|
|
|
if (likely(!err))
|
|
|
goto repeat;
|
|
@@ -377,13 +378,13 @@ int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf,
|
|
|
int res = 0, rw = WRITE;
|
|
|
|
|
|
list_for_each_entry(bh, &segbuf->sb_segsum_buffers, b_assoc_buffers) {
|
|
|
- res = nilfs_submit_bh(wi, bh, rw);
|
|
|
+ res = nilfs_segbuf_submit_bh(segbuf, wi, bh, rw);
|
|
|
if (unlikely(res))
|
|
|
goto failed_bio;
|
|
|
}
|
|
|
|
|
|
list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) {
|
|
|
- res = nilfs_submit_bh(wi, bh, rw);
|
|
|
+ res = nilfs_segbuf_submit_bh(segbuf, wi, bh, rw);
|
|
|
if (unlikely(res))
|
|
|
goto failed_bio;
|
|
|
}
|
|
@@ -394,7 +395,7 @@ int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf,
|
|
|
* submission.
|
|
|
*/
|
|
|
rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG);
|
|
|
- res = nilfs_submit_seg_bio(wi, rw);
|
|
|
+ res = nilfs_segbuf_submit_bio(segbuf, wi, rw);
|
|
|
}
|
|
|
|
|
|
failed_bio:
|
|
@@ -403,29 +404,27 @@ int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf,
|
|
|
|
|
|
/**
|
|
|
* nilfs_segbuf_wait - wait for completion of requested BIOs
|
|
|
- * @wi: nilfs_write_info
|
|
|
+ * @segbuf: segment buffer
|
|
|
*
|
|
|
* Return Value: On Success, 0 is returned. On Error, one of the following
|
|
|
* negative error code is returned.
|
|
|
*
|
|
|
* %-EIO - I/O error
|
|
|
*/
|
|
|
-int nilfs_segbuf_wait(struct nilfs_segment_buffer *segbuf,
|
|
|
- struct nilfs_write_info *wi)
|
|
|
+int nilfs_segbuf_wait(struct nilfs_segment_buffer *segbuf)
|
|
|
{
|
|
|
int err = 0;
|
|
|
|
|
|
- if (!wi->nbio)
|
|
|
+ if (!segbuf->sb_nbio)
|
|
|
return 0;
|
|
|
|
|
|
do {
|
|
|
- wait_for_completion(&wi->bio_event);
|
|
|
- } while (--wi->nbio > 0);
|
|
|
+ wait_for_completion(&segbuf->sb_bio_event);
|
|
|
+ } while (--segbuf->sb_nbio > 0);
|
|
|
|
|
|
- if (unlikely(atomic_read(&wi->err) > 0)) {
|
|
|
+ if (unlikely(atomic_read(&segbuf->sb_err) > 0)) {
|
|
|
printk(KERN_ERR "NILFS: IO error writing segment\n");
|
|
|
err = -EIO;
|
|
|
- segbuf->sb_io_error = 1;
|
|
|
}
|
|
|
return err;
|
|
|
}
|