|
@@ -3693,6 +3693,41 @@ unlock:
|
|
|
return err;
|
|
|
}
|
|
|
|
|
|
+int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode,
|
|
|
+ loff_t lstart, loff_t length)
|
|
|
+{
|
|
|
+ struct super_block *sb = inode->i_sb;
|
|
|
+ struct address_space *mapping = inode->i_mapping;
|
|
|
+ unsigned partial = lstart & (sb->s_blocksize - 1);
|
|
|
+ ext4_fsblk_t start, end;
|
|
|
+ loff_t byte_end = (lstart + length - 1);
|
|
|
+ int err = 0;
|
|
|
+
|
|
|
+ start = lstart >> sb->s_blocksize_bits;
|
|
|
+ end = byte_end >> sb->s_blocksize_bits;
|
|
|
+
|
|
|
+ /* Handle partial zero within the single block */
|
|
|
+ if (start == end) {
|
|
|
+ err = ext4_block_zero_page_range(handle, mapping,
|
|
|
+ lstart, length);
|
|
|
+ return err;
|
|
|
+ }
|
|
|
+ /* Handle partial zero out on the start of the range */
|
|
|
+ if (partial) {
|
|
|
+ err = ext4_block_zero_page_range(handle, mapping,
|
|
|
+ lstart, sb->s_blocksize);
|
|
|
+ if (err)
|
|
|
+ return err;
|
|
|
+ }
|
|
|
+ /* Handle partial zero out on the end of the range */
|
|
|
+ partial = byte_end & (sb->s_blocksize - 1);
|
|
|
+ if (partial != sb->s_blocksize - 1)
|
|
|
+ err = ext4_block_zero_page_range(handle, mapping,
|
|
|
+ byte_end - partial,
|
|
|
+ partial + 1);
|
|
|
+ return err;
|
|
|
+}
|
|
|
+
|
|
|
int ext4_can_truncate(struct inode *inode)
|
|
|
{
|
|
|
if (S_ISREG(inode->i_mode))
|
|
@@ -3721,8 +3756,7 @@ int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
|
|
|
struct super_block *sb = inode->i_sb;
|
|
|
ext4_lblk_t first_block, stop_block;
|
|
|
struct address_space *mapping = inode->i_mapping;
|
|
|
- loff_t first_page, last_page, page_len;
|
|
|
- loff_t first_page_offset, last_page_offset;
|
|
|
+ loff_t first_block_offset, last_block_offset;
|
|
|
handle_t *handle;
|
|
|
unsigned int credits;
|
|
|
int ret = 0;
|
|
@@ -3773,17 +3807,13 @@ int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
|
|
|
offset;
|
|
|
}
|
|
|
|
|
|
- first_page = (offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
|
|
|
- last_page = (offset + length) >> PAGE_CACHE_SHIFT;
|
|
|
+ first_block_offset = round_up(offset, sb->s_blocksize);
|
|
|
+ last_block_offset = round_down((offset + length), sb->s_blocksize) - 1;
|
|
|
|
|
|
- first_page_offset = first_page << PAGE_CACHE_SHIFT;
|
|
|
- last_page_offset = last_page << PAGE_CACHE_SHIFT;
|
|
|
-
|
|
|
- /* Now release the pages */
|
|
|
- if (last_page_offset > first_page_offset) {
|
|
|
- truncate_pagecache_range(inode, first_page_offset,
|
|
|
- last_page_offset - 1);
|
|
|
- }
|
|
|
+ /* Now release the pages and zero block aligned part of pages*/
|
|
|
+ if (last_block_offset > first_block_offset)
|
|
|
+ truncate_pagecache_range(inode, first_block_offset,
|
|
|
+ last_block_offset);
|
|
|
|
|
|
/* Wait all existing dio workers, newcomers will block on i_mutex */
|
|
|
ext4_inode_block_unlocked_dio(inode);
|
|
@@ -3803,66 +3833,10 @@ int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
|
|
|
goto out_dio;
|
|
|
}
|
|
|
|
|
|
- /*
|
|
|
- * Now we need to zero out the non-page-aligned data in the
|
|
|
- * pages at the start and tail of the hole, and unmap the
|
|
|
- * buffer heads for the block aligned regions of the page that
|
|
|
- * were completely zeroed.
|
|
|
- */
|
|
|
- if (first_page > last_page) {
|
|
|
- /*
|
|
|
- * If the file space being truncated is contained
|
|
|
- * within a page just zero out and unmap the middle of
|
|
|
- * that page
|
|
|
- */
|
|
|
- ret = ext4_discard_partial_page_buffers(handle,
|
|
|
- mapping, offset, length, 0);
|
|
|
-
|
|
|
- if (ret)
|
|
|
- goto out_stop;
|
|
|
- } else {
|
|
|
- /*
|
|
|
- * zero out and unmap the partial page that contains
|
|
|
- * the start of the hole
|
|
|
- */
|
|
|
- page_len = first_page_offset - offset;
|
|
|
- if (page_len > 0) {
|
|
|
- ret = ext4_discard_partial_page_buffers(handle, mapping,
|
|
|
- offset, page_len, 0);
|
|
|
- if (ret)
|
|
|
- goto out_stop;
|
|
|
- }
|
|
|
-
|
|
|
- /*
|
|
|
- * zero out and unmap the partial page that contains
|
|
|
- * the end of the hole
|
|
|
- */
|
|
|
- page_len = offset + length - last_page_offset;
|
|
|
- if (page_len > 0) {
|
|
|
- ret = ext4_discard_partial_page_buffers(handle, mapping,
|
|
|
- last_page_offset, page_len, 0);
|
|
|
- if (ret)
|
|
|
- goto out_stop;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- /*
|
|
|
- * If i_size is contained in the last page, we need to
|
|
|
- * unmap and zero the partial page after i_size
|
|
|
- */
|
|
|
- if (inode->i_size >> PAGE_CACHE_SHIFT == last_page &&
|
|
|
- inode->i_size % PAGE_CACHE_SIZE != 0) {
|
|
|
- page_len = PAGE_CACHE_SIZE -
|
|
|
- (inode->i_size & (PAGE_CACHE_SIZE - 1));
|
|
|
-
|
|
|
- if (page_len > 0) {
|
|
|
- ret = ext4_discard_partial_page_buffers(handle,
|
|
|
- mapping, inode->i_size, page_len, 0);
|
|
|
-
|
|
|
- if (ret)
|
|
|
- goto out_stop;
|
|
|
- }
|
|
|
- }
|
|
|
+ ret = ext4_zero_partial_blocks(handle, inode, offset,
|
|
|
+ length);
|
|
|
+ if (ret)
|
|
|
+ goto out_stop;
|
|
|
|
|
|
first_block = (offset + sb->s_blocksize - 1) >>
|
|
|
EXT4_BLOCK_SIZE_BITS(sb);
|