|
@@ -120,6 +120,12 @@ void ext4_evict_inode(struct inode *inode)
|
|
|
int err;
|
|
|
|
|
|
trace_ext4_evict_inode(inode);
|
|
|
+
|
|
|
+ mutex_lock(&inode->i_mutex);
|
|
|
+ ext4_flush_completed_IO(inode);
|
|
|
+ mutex_unlock(&inode->i_mutex);
|
|
|
+ ext4_ioend_wait(inode);
|
|
|
+
|
|
|
if (inode->i_nlink) {
|
|
|
/*
|
|
|
* When journalling data dirty buffers are tracked only in the
|
|
@@ -983,6 +989,8 @@ static int ext4_journalled_write_end(struct file *file,
|
|
|
from = pos & (PAGE_CACHE_SIZE - 1);
|
|
|
to = from + len;
|
|
|
|
|
|
+ BUG_ON(!ext4_handle_valid(handle));
|
|
|
+
|
|
|
if (copied < len) {
|
|
|
if (!PageUptodate(page))
|
|
|
copied = 0;
|
|
@@ -1283,7 +1291,12 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd,
|
|
|
else if (test_opt(inode->i_sb, MBLK_IO_SUBMIT))
|
|
|
err = ext4_bio_write_page(&io_submit, page,
|
|
|
len, mpd->wbc);
|
|
|
- else
|
|
|
+ else if (buffer_uninit(page_bufs)) {
|
|
|
+ ext4_set_bh_endio(page_bufs, inode);
|
|
|
+ err = block_write_full_page_endio(page,
|
|
|
+ noalloc_get_block_write,
|
|
|
+ mpd->wbc, ext4_end_io_buffer_write);
|
|
|
+ } else
|
|
|
err = block_write_full_page(page,
|
|
|
noalloc_get_block_write, mpd->wbc);
|
|
|
|
|
@@ -1699,6 +1712,8 @@ static int __ext4_journalled_writepage(struct page *page,
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
+ BUG_ON(!ext4_handle_valid(handle));
|
|
|
+
|
|
|
ret = walk_page_buffers(handle, page_bufs, 0, len, NULL,
|
|
|
do_journal_get_write_access);
|
|
|
|
|
@@ -2668,8 +2683,15 @@ static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate)
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
- io_end->flag = EXT4_IO_END_UNWRITTEN;
|
|
|
+ /*
|
|
|
+ * It may be over-defensive here to check EXT4_IO_END_UNWRITTEN now,
|
|
|
+ * but being more careful is always safe for the future change.
|
|
|
+ */
|
|
|
inode = io_end->inode;
|
|
|
+ if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
|
|
|
+ io_end->flag |= EXT4_IO_END_UNWRITTEN;
|
|
|
+ atomic_inc(&EXT4_I(inode)->i_aiodio_unwritten);
|
|
|
+ }
|
|
|
|
|
|
/* Add the io_end to per-inode completed io list*/
|
|
|
spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags);
|