|
@@ -821,7 +821,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
|
|
|
* contents - it is a noop if I/O is still in
|
|
|
* flight on potentially older contents.
|
|
|
*/
|
|
|
- ll_rw_block(SWRITE, 1, &bh);
|
|
|
+ ll_rw_block(SWRITE_SYNC, 1, &bh);
|
|
|
brelse(bh);
|
|
|
spin_lock(lock);
|
|
|
}
|
|
@@ -2940,16 +2940,19 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
|
|
|
for (i = 0; i < nr; i++) {
|
|
|
struct buffer_head *bh = bhs[i];
|
|
|
|
|
|
- if (rw == SWRITE)
|
|
|
+ if (rw == SWRITE || rw == SWRITE_SYNC)
|
|
|
lock_buffer(bh);
|
|
|
else if (test_set_buffer_locked(bh))
|
|
|
continue;
|
|
|
|
|
|
- if (rw == WRITE || rw == SWRITE) {
|
|
|
+ if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC) {
|
|
|
if (test_clear_buffer_dirty(bh)) {
|
|
|
bh->b_end_io = end_buffer_write_sync;
|
|
|
get_bh(bh);
|
|
|
- submit_bh(WRITE, bh);
|
|
|
+ if (rw == SWRITE_SYNC)
|
|
|
+ submit_bh(WRITE_SYNC, bh);
|
|
|
+ else
|
|
|
+ submit_bh(WRITE, bh);
|
|
|
continue;
|
|
|
}
|
|
|
} else {
|
|
@@ -2978,7 +2981,7 @@ int sync_dirty_buffer(struct buffer_head *bh)
|
|
|
if (test_clear_buffer_dirty(bh)) {
|
|
|
get_bh(bh);
|
|
|
bh->b_end_io = end_buffer_write_sync;
|
|
|
- ret = submit_bh(WRITE, bh);
|
|
|
+ ret = submit_bh(WRITE_SYNC, bh);
|
|
|
wait_on_buffer(bh);
|
|
|
if (buffer_eopnotsupp(bh)) {
|
|
|
clear_buffer_eopnotsupp(bh);
|