|
@@ -728,6 +728,27 @@ static void ocfs2_clear_page_regions(struct page *page,
|
|
|
kunmap_atomic(kaddr, KM_USER0);
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Nonsparse file systems fully allocate before we get to the write
|
|
|
+ * code. This prevents ocfs2_write() from tagging the write as an
|
|
|
+ * allocating one, which means ocfs2_map_page_blocks() might try to
|
|
|
+ * read-in the blocks at the tail of our file. Avoid reading them by
|
|
|
+ * testing i_size against each block offset.
|
|
|
+ */
|
|
|
+static int ocfs2_should_read_blk(struct inode *inode, struct page *page,
|
|
|
+ unsigned int block_start)
|
|
|
+{
|
|
|
+ u64 offset = page_offset(page) + block_start;
|
|
|
+
|
|
|
+ if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
|
|
|
+ return 1;
|
|
|
+
|
|
|
+ if (i_size_read(inode) > offset)
|
|
|
+ return 1;
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Some of this taken from block_prepare_write(). We already have our
|
|
|
* mapping by now though, and the entire write will be allocating or
|
|
@@ -781,6 +802,7 @@ int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno,
|
|
|
set_buffer_uptodate(bh);
|
|
|
} else if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
|
|
|
!buffer_new(bh) &&
|
|
|
+ ocfs2_should_read_blk(inode, page, block_start) &&
|
|
|
(block_start < from || block_end > to)) {
|
|
|
ll_rw_block(READ, 1, &bh);
|
|
|
*wait_bh++=bh;
|