|
@@ -353,6 +353,7 @@ static noinline int compress_file_range(struct inode *inode,
|
|
int i;
|
|
int i;
|
|
int will_compress;
|
|
int will_compress;
|
|
int compress_type = root->fs_info->compress_type;
|
|
int compress_type = root->fs_info->compress_type;
|
|
|
|
+ int redirty = 0;
|
|
|
|
|
|
/* if this is a small write inside eof, kick off a defrag */
|
|
/* if this is a small write inside eof, kick off a defrag */
|
|
if ((end - start + 1) < 16 * 1024 &&
|
|
if ((end - start + 1) < 16 * 1024 &&
|
|
@@ -415,6 +416,17 @@ again:
|
|
if (BTRFS_I(inode)->force_compress)
|
|
if (BTRFS_I(inode)->force_compress)
|
|
compress_type = BTRFS_I(inode)->force_compress;
|
|
compress_type = BTRFS_I(inode)->force_compress;
|
|
|
|
|
|
|
|
+ /*
|
|
|
|
+ * we need to call clear_page_dirty_for_io on each
|
|
|
|
+ * page in the range. Otherwise applications with the file
|
|
|
|
+ * mmap'd can wander in and change the page contents while
|
|
|
|
+ * we are compressing them.
|
|
|
|
+ *
|
|
|
|
+ * If the compression fails for any reason, we set the pages
|
|
|
|
+ * dirty again later on.
|
|
|
|
+ */
|
|
|
|
+ extent_range_clear_dirty_for_io(inode, start, end);
|
|
|
|
+ redirty = 1;
|
|
ret = btrfs_compress_pages(compress_type,
|
|
ret = btrfs_compress_pages(compress_type,
|
|
inode->i_mapping, start,
|
|
inode->i_mapping, start,
|
|
total_compressed, pages,
|
|
total_compressed, pages,
|
|
@@ -554,6 +566,8 @@ cleanup_and_bail_uncompressed:
|
|
__set_page_dirty_nobuffers(locked_page);
|
|
__set_page_dirty_nobuffers(locked_page);
|
|
/* unlocked later on in the async handlers */
|
|
/* unlocked later on in the async handlers */
|
|
}
|
|
}
|
|
|
|
+ if (redirty)
|
|
|
|
+ extent_range_redirty_for_io(inode, start, end);
|
|
add_async_extent(async_cow, start, end - start + 1,
|
|
add_async_extent(async_cow, start, end - start + 1,
|
|
0, NULL, 0, BTRFS_COMPRESS_NONE);
|
|
0, NULL, 0, BTRFS_COMPRESS_NONE);
|
|
*num_added += 1;
|
|
*num_added += 1;
|