|
@@ -2611,7 +2611,7 @@ static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
|
|
|
|
|
|
#define EXT4_EXT_ZERO_LEN 7
|
|
|
/*
|
|
|
- * This function is called by ext4_ext_get_blocks() if someone tries to write
|
|
|
+ * This function is called by ext4_ext_map_blocks() if someone tries to write
|
|
|
* to an uninitialized extent. It may result in splitting the uninitialized
|
|
|
* extent into multiple extents (upto three - one initialized and two
|
|
|
* uninitialized).
|
|
@@ -2621,10 +2621,9 @@ static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
|
|
|
* c> Splits in three extents: Somone is writing in middle of the extent
|
|
|
*/
|
|
|
static int ext4_ext_convert_to_initialized(handle_t *handle,
|
|
|
- struct inode *inode,
|
|
|
- struct ext4_ext_path *path,
|
|
|
- ext4_lblk_t iblock,
|
|
|
- unsigned int max_blocks)
|
|
|
+ struct inode *inode,
|
|
|
+ struct ext4_map_blocks *map,
|
|
|
+ struct ext4_ext_path *path)
|
|
|
{
|
|
|
struct ext4_extent *ex, newex, orig_ex;
|
|
|
struct ext4_extent *ex1 = NULL;
|
|
@@ -2640,20 +2639,20 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
|
|
|
|
|
|
ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical"
|
|
|
"block %llu, max_blocks %u\n", inode->i_ino,
|
|
|
- (unsigned long long)iblock, max_blocks);
|
|
|
+ (unsigned long long)map->m_lblk, map->m_len);
|
|
|
|
|
|
eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
|
|
|
inode->i_sb->s_blocksize_bits;
|
|
|
- if (eof_block < iblock + max_blocks)
|
|
|
- eof_block = iblock + max_blocks;
|
|
|
+ if (eof_block < map->m_lblk + map->m_len)
|
|
|
+ eof_block = map->m_lblk + map->m_len;
|
|
|
|
|
|
depth = ext_depth(inode);
|
|
|
eh = path[depth].p_hdr;
|
|
|
ex = path[depth].p_ext;
|
|
|
ee_block = le32_to_cpu(ex->ee_block);
|
|
|
ee_len = ext4_ext_get_actual_len(ex);
|
|
|
- allocated = ee_len - (iblock - ee_block);
|
|
|
- newblock = iblock - ee_block + ext_pblock(ex);
|
|
|
+ allocated = ee_len - (map->m_lblk - ee_block);
|
|
|
+ newblock = map->m_lblk - ee_block + ext_pblock(ex);
|
|
|
|
|
|
ex2 = ex;
|
|
|
orig_ex.ee_block = ex->ee_block;
|
|
@@ -2683,10 +2682,10 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
|
|
|
return allocated;
|
|
|
}
|
|
|
|
|
|
- /* ex1: ee_block to iblock - 1 : uninitialized */
|
|
|
- if (iblock > ee_block) {
|
|
|
+ /* ex1: ee_block to map->m_lblk - 1 : uninitialized */
|
|
|
+ if (map->m_lblk > ee_block) {
|
|
|
ex1 = ex;
|
|
|
- ex1->ee_len = cpu_to_le16(iblock - ee_block);
|
|
|
+ ex1->ee_len = cpu_to_le16(map->m_lblk - ee_block);
|
|
|
ext4_ext_mark_uninitialized(ex1);
|
|
|
ex2 = &newex;
|
|
|
}
|
|
@@ -2695,15 +2694,15 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
|
|
|
* we insert ex3, if ex1 is NULL. This is to avoid temporary
|
|
|
* overlap of blocks.
|
|
|
*/
|
|
|
- if (!ex1 && allocated > max_blocks)
|
|
|
- ex2->ee_len = cpu_to_le16(max_blocks);
|
|
|
+ if (!ex1 && allocated > map->m_len)
|
|
|
+ ex2->ee_len = cpu_to_le16(map->m_len);
|
|
|
/* ex3: to ee_block + ee_len : uninitialised */
|
|
|
- if (allocated > max_blocks) {
|
|
|
+ if (allocated > map->m_len) {
|
|
|
unsigned int newdepth;
|
|
|
/* If extent has less than EXT4_EXT_ZERO_LEN zerout directly */
|
|
|
if (allocated <= EXT4_EXT_ZERO_LEN && may_zeroout) {
|
|
|
/*
|
|
|
- * iblock == ee_block is handled by the zerouout
|
|
|
+ * map->m_lblk == ee_block is handled by the zerouout
|
|
|
* at the beginning.
|
|
|
* Mark first half uninitialized.
|
|
|
* Mark second half initialized and zero out the
|
|
@@ -2716,7 +2715,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
|
|
|
ext4_ext_dirty(handle, inode, path + depth);
|
|
|
|
|
|
ex3 = &newex;
|
|
|
- ex3->ee_block = cpu_to_le32(iblock);
|
|
|
+ ex3->ee_block = cpu_to_le32(map->m_lblk);
|
|
|
ext4_ext_store_pblock(ex3, newblock);
|
|
|
ex3->ee_len = cpu_to_le16(allocated);
|
|
|
err = ext4_ext_insert_extent(handle, inode, path,
|
|
@@ -2729,7 +2728,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
|
|
|
ex->ee_len = orig_ex.ee_len;
|
|
|
ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
|
|
|
ext4_ext_dirty(handle, inode, path + depth);
|
|
|
- /* blocks available from iblock */
|
|
|
+ /* blocks available from map->m_lblk */
|
|
|
return allocated;
|
|
|
|
|
|
} else if (err)
|
|
@@ -2751,8 +2750,8 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
|
|
|
*/
|
|
|
depth = ext_depth(inode);
|
|
|
ext4_ext_drop_refs(path);
|
|
|
- path = ext4_ext_find_extent(inode,
|
|
|
- iblock, path);
|
|
|
+ path = ext4_ext_find_extent(inode, map->m_lblk,
|
|
|
+ path);
|
|
|
if (IS_ERR(path)) {
|
|
|
err = PTR_ERR(path);
|
|
|
return err;
|
|
@@ -2772,9 +2771,9 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
|
|
|
return allocated;
|
|
|
}
|
|
|
ex3 = &newex;
|
|
|
- ex3->ee_block = cpu_to_le32(iblock + max_blocks);
|
|
|
- ext4_ext_store_pblock(ex3, newblock + max_blocks);
|
|
|
- ex3->ee_len = cpu_to_le16(allocated - max_blocks);
|
|
|
+ ex3->ee_block = cpu_to_le32(map->m_lblk + map->m_len);
|
|
|
+ ext4_ext_store_pblock(ex3, newblock + map->m_len);
|
|
|
+ ex3->ee_len = cpu_to_le16(allocated - map->m_len);
|
|
|
ext4_ext_mark_uninitialized(ex3);
|
|
|
err = ext4_ext_insert_extent(handle, inode, path, ex3, 0);
|
|
|
if (err == -ENOSPC && may_zeroout) {
|
|
@@ -2787,7 +2786,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
|
|
|
ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
|
|
|
ext4_ext_dirty(handle, inode, path + depth);
|
|
|
/* zeroed the full extent */
|
|
|
- /* blocks available from iblock */
|
|
|
+ /* blocks available from map->m_lblk */
|
|
|
return allocated;
|
|
|
|
|
|
} else if (err)
|
|
@@ -2807,7 +2806,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
|
|
|
|
|
|
depth = newdepth;
|
|
|
ext4_ext_drop_refs(path);
|
|
|
- path = ext4_ext_find_extent(inode, iblock, path);
|
|
|
+ path = ext4_ext_find_extent(inode, map->m_lblk, path);
|
|
|
if (IS_ERR(path)) {
|
|
|
err = PTR_ERR(path);
|
|
|
goto out;
|
|
@@ -2821,14 +2820,14 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
|
|
|
if (err)
|
|
|
goto out;
|
|
|
|
|
|
- allocated = max_blocks;
|
|
|
+ allocated = map->m_len;
|
|
|
|
|
|
/* If extent has less than EXT4_EXT_ZERO_LEN and we are trying
|
|
|
* to insert a extent in the middle zerout directly
|
|
|
* otherwise give the extent a chance to merge to left
|
|
|
*/
|
|
|
if (le16_to_cpu(orig_ex.ee_len) <= EXT4_EXT_ZERO_LEN &&
|
|
|
- iblock != ee_block && may_zeroout) {
|
|
|
+ map->m_lblk != ee_block && may_zeroout) {
|
|
|
err = ext4_ext_zeroout(inode, &orig_ex);
|
|
|
if (err)
|
|
|
goto fix_extent_len;
|
|
@@ -2838,7 +2837,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
|
|
|
ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
|
|
|
ext4_ext_dirty(handle, inode, path + depth);
|
|
|
/* zero out the first half */
|
|
|
- /* blocks available from iblock */
|
|
|
+ /* blocks available from map->m_lblk */
|
|
|
return allocated;
|
|
|
}
|
|
|
}
|
|
@@ -2849,12 +2848,12 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
|
|
|
*/
|
|
|
if (ex1 && ex1 != ex) {
|
|
|
ex1 = ex;
|
|
|
- ex1->ee_len = cpu_to_le16(iblock - ee_block);
|
|
|
+ ex1->ee_len = cpu_to_le16(map->m_lblk - ee_block);
|
|
|
ext4_ext_mark_uninitialized(ex1);
|
|
|
ex2 = &newex;
|
|
|
}
|
|
|
- /* ex2: iblock to iblock + maxblocks-1 : initialised */
|
|
|
- ex2->ee_block = cpu_to_le32(iblock);
|
|
|
+ /* ex2: map->m_lblk to map->m_lblk + maxblocks-1 : initialised */
|
|
|
+ ex2->ee_block = cpu_to_le32(map->m_lblk);
|
|
|
ext4_ext_store_pblock(ex2, newblock);
|
|
|
ex2->ee_len = cpu_to_le16(allocated);
|
|
|
if (ex2 != ex)
|
|
@@ -2924,7 +2923,7 @@ fix_extent_len:
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * This function is called by ext4_ext_get_blocks() from
|
|
|
+ * This function is called by ext4_ext_map_blocks() from
|
|
|
* ext4_get_blocks_dio_write() when DIO to write
|
|
|
* to an uninitialized extent.
|
|
|
*
|
|
@@ -2947,9 +2946,8 @@ fix_extent_len:
|
|
|
*/
|
|
|
static int ext4_split_unwritten_extents(handle_t *handle,
|
|
|
struct inode *inode,
|
|
|
+ struct ext4_map_blocks *map,
|
|
|
struct ext4_ext_path *path,
|
|
|
- ext4_lblk_t iblock,
|
|
|
- unsigned int max_blocks,
|
|
|
int flags)
|
|
|
{
|
|
|
struct ext4_extent *ex, newex, orig_ex;
|
|
@@ -2965,20 +2963,20 @@ static int ext4_split_unwritten_extents(handle_t *handle,
|
|
|
|
|
|
ext_debug("ext4_split_unwritten_extents: inode %lu, logical"
|
|
|
"block %llu, max_blocks %u\n", inode->i_ino,
|
|
|
- (unsigned long long)iblock, max_blocks);
|
|
|
+ (unsigned long long)map->m_lblk, map->m_len);
|
|
|
|
|
|
eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
|
|
|
inode->i_sb->s_blocksize_bits;
|
|
|
- if (eof_block < iblock + max_blocks)
|
|
|
- eof_block = iblock + max_blocks;
|
|
|
+ if (eof_block < map->m_lblk + map->m_len)
|
|
|
+ eof_block = map->m_lblk + map->m_len;
|
|
|
|
|
|
depth = ext_depth(inode);
|
|
|
eh = path[depth].p_hdr;
|
|
|
ex = path[depth].p_ext;
|
|
|
ee_block = le32_to_cpu(ex->ee_block);
|
|
|
ee_len = ext4_ext_get_actual_len(ex);
|
|
|
- allocated = ee_len - (iblock - ee_block);
|
|
|
- newblock = iblock - ee_block + ext_pblock(ex);
|
|
|
+ allocated = ee_len - (map->m_lblk - ee_block);
|
|
|
+ newblock = map->m_lblk - ee_block + ext_pblock(ex);
|
|
|
|
|
|
ex2 = ex;
|
|
|
orig_ex.ee_block = ex->ee_block;
|
|
@@ -2996,16 +2994,16 @@ static int ext4_split_unwritten_extents(handle_t *handle,
|
|
|
* block where the write begins, and the write completely
|
|
|
* covers the extent, then we don't need to split it.
|
|
|
*/
|
|
|
- if ((iblock == ee_block) && (allocated <= max_blocks))
|
|
|
+ if ((map->m_lblk == ee_block) && (allocated <= map->m_len))
|
|
|
return allocated;
|
|
|
|
|
|
err = ext4_ext_get_access(handle, inode, path + depth);
|
|
|
if (err)
|
|
|
goto out;
|
|
|
- /* ex1: ee_block to iblock - 1 : uninitialized */
|
|
|
- if (iblock > ee_block) {
|
|
|
+ /* ex1: ee_block to map->m_lblk - 1 : uninitialized */
|
|
|
+ if (map->m_lblk > ee_block) {
|
|
|
ex1 = ex;
|
|
|
- ex1->ee_len = cpu_to_le16(iblock - ee_block);
|
|
|
+ ex1->ee_len = cpu_to_le16(map->m_lblk - ee_block);
|
|
|
ext4_ext_mark_uninitialized(ex1);
|
|
|
ex2 = &newex;
|
|
|
}
|
|
@@ -3014,15 +3012,15 @@ static int ext4_split_unwritten_extents(handle_t *handle,
|
|
|
* we insert ex3, if ex1 is NULL. This is to avoid temporary
|
|
|
* overlap of blocks.
|
|
|
*/
|
|
|
- if (!ex1 && allocated > max_blocks)
|
|
|
- ex2->ee_len = cpu_to_le16(max_blocks);
|
|
|
+ if (!ex1 && allocated > map->m_len)
|
|
|
+ ex2->ee_len = cpu_to_le16(map->m_len);
|
|
|
/* ex3: to ee_block + ee_len : uninitialised */
|
|
|
- if (allocated > max_blocks) {
|
|
|
+ if (allocated > map->m_len) {
|
|
|
unsigned int newdepth;
|
|
|
ex3 = &newex;
|
|
|
- ex3->ee_block = cpu_to_le32(iblock + max_blocks);
|
|
|
- ext4_ext_store_pblock(ex3, newblock + max_blocks);
|
|
|
- ex3->ee_len = cpu_to_le16(allocated - max_blocks);
|
|
|
+ ex3->ee_block = cpu_to_le32(map->m_lblk + map->m_len);
|
|
|
+ ext4_ext_store_pblock(ex3, newblock + map->m_len);
|
|
|
+ ex3->ee_len = cpu_to_le16(allocated - map->m_len);
|
|
|
ext4_ext_mark_uninitialized(ex3);
|
|
|
err = ext4_ext_insert_extent(handle, inode, path, ex3, flags);
|
|
|
if (err == -ENOSPC && may_zeroout) {
|
|
@@ -3035,7 +3033,7 @@ static int ext4_split_unwritten_extents(handle_t *handle,
|
|
|
ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
|
|
|
ext4_ext_dirty(handle, inode, path + depth);
|
|
|
/* zeroed the full extent */
|
|
|
- /* blocks available from iblock */
|
|
|
+ /* blocks available from map->m_lblk */
|
|
|
return allocated;
|
|
|
|
|
|
} else if (err)
|
|
@@ -3055,7 +3053,7 @@ static int ext4_split_unwritten_extents(handle_t *handle,
|
|
|
|
|
|
depth = newdepth;
|
|
|
ext4_ext_drop_refs(path);
|
|
|
- path = ext4_ext_find_extent(inode, iblock, path);
|
|
|
+ path = ext4_ext_find_extent(inode, map->m_lblk, path);
|
|
|
if (IS_ERR(path)) {
|
|
|
err = PTR_ERR(path);
|
|
|
goto out;
|
|
@@ -3069,7 +3067,7 @@ static int ext4_split_unwritten_extents(handle_t *handle,
|
|
|
if (err)
|
|
|
goto out;
|
|
|
|
|
|
- allocated = max_blocks;
|
|
|
+ allocated = map->m_len;
|
|
|
}
|
|
|
/*
|
|
|
* If there was a change of depth as part of the
|
|
@@ -3078,15 +3076,15 @@ static int ext4_split_unwritten_extents(handle_t *handle,
|
|
|
*/
|
|
|
if (ex1 && ex1 != ex) {
|
|
|
ex1 = ex;
|
|
|
- ex1->ee_len = cpu_to_le16(iblock - ee_block);
|
|
|
+ ex1->ee_len = cpu_to_le16(map->m_lblk - ee_block);
|
|
|
ext4_ext_mark_uninitialized(ex1);
|
|
|
ex2 = &newex;
|
|
|
}
|
|
|
/*
|
|
|
- * ex2: iblock to iblock + maxblocks-1 : to be direct IO written,
|
|
|
- * uninitialised still.
|
|
|
+ * ex2: map->m_lblk to map->m_lblk + map->m_len-1 : to be written
|
|
|
+ * using direct I/O, uninitialised still.
|
|
|
*/
|
|
|
- ex2->ee_block = cpu_to_le32(iblock);
|
|
|
+ ex2->ee_block = cpu_to_le32(map->m_lblk);
|
|
|
ext4_ext_store_pblock(ex2, newblock);
|
|
|
ex2->ee_len = cpu_to_le16(allocated);
|
|
|
ext4_ext_mark_uninitialized(ex2);
|
|
@@ -3188,10 +3186,9 @@ static void unmap_underlying_metadata_blocks(struct block_device *bdev,
|
|
|
|
|
|
static int
|
|
|
ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
|
|
|
- ext4_lblk_t iblock, unsigned int max_blocks,
|
|
|
+ struct ext4_map_blocks *map,
|
|
|
struct ext4_ext_path *path, int flags,
|
|
|
- unsigned int allocated, struct buffer_head *bh_result,
|
|
|
- ext4_fsblk_t newblock)
|
|
|
+ unsigned int allocated, ext4_fsblk_t newblock)
|
|
|
{
|
|
|
int ret = 0;
|
|
|
int err = 0;
|
|
@@ -3199,15 +3196,14 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
|
|
|
|
|
|
ext_debug("ext4_ext_handle_uninitialized_extents: inode %lu, logical"
|
|
|
"block %llu, max_blocks %u, flags %d, allocated %u",
|
|
|
- inode->i_ino, (unsigned long long)iblock, max_blocks,
|
|
|
+ inode->i_ino, (unsigned long long)map->m_lblk, map->m_len,
|
|
|
flags, allocated);
|
|
|
ext4_ext_show_leaf(inode, path);
|
|
|
|
|
|
/* get_block() before submit the IO, split the extent */
|
|
|
if ((flags & EXT4_GET_BLOCKS_PRE_IO)) {
|
|
|
- ret = ext4_split_unwritten_extents(handle,
|
|
|
- inode, path, iblock,
|
|
|
- max_blocks, flags);
|
|
|
+ ret = ext4_split_unwritten_extents(handle, inode, map,
|
|
|
+ path, flags);
|
|
|
/*
|
|
|
* Flag the inode(non aio case) or end_io struct (aio case)
|
|
|
* that this IO needs to convertion to written when IO is
|
|
@@ -3218,7 +3214,7 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
|
|
|
else
|
|
|
ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
|
|
|
if (ext4_should_dioread_nolock(inode))
|
|
|
- set_buffer_uninit(bh_result);
|
|
|
+ map->m_flags |= EXT4_MAP_UNINIT;
|
|
|
goto out;
|
|
|
}
|
|
|
/* IO end_io complete, convert the filled extent to written */
|
|
@@ -3246,14 +3242,12 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
|
|
|
* the buffer head will be unmapped so that
|
|
|
* a read from the block returns 0s.
|
|
|
*/
|
|
|
- set_buffer_unwritten(bh_result);
|
|
|
+ map->m_flags |= EXT4_MAP_UNWRITTEN;
|
|
|
goto out1;
|
|
|
}
|
|
|
|
|
|
/* buffered write, writepage time, convert*/
|
|
|
- ret = ext4_ext_convert_to_initialized(handle, inode,
|
|
|
- path, iblock,
|
|
|
- max_blocks);
|
|
|
+ ret = ext4_ext_convert_to_initialized(handle, inode, map, path);
|
|
|
if (ret >= 0)
|
|
|
ext4_update_inode_fsync_trans(handle, inode, 1);
|
|
|
out:
|
|
@@ -3262,7 +3256,7 @@ out:
|
|
|
goto out2;
|
|
|
} else
|
|
|
allocated = ret;
|
|
|
- set_buffer_new(bh_result);
|
|
|
+ map->m_flags |= EXT4_MAP_NEW;
|
|
|
/*
|
|
|
* if we allocated more blocks than requested
|
|
|
* we need to make sure we unmap the extra block
|
|
@@ -3270,11 +3264,11 @@ out:
|
|
|
* unmapped later when we find the buffer_head marked
|
|
|
* new.
|
|
|
*/
|
|
|
- if (allocated > max_blocks) {
|
|
|
+ if (allocated > map->m_len) {
|
|
|
unmap_underlying_metadata_blocks(inode->i_sb->s_bdev,
|
|
|
- newblock + max_blocks,
|
|
|
- allocated - max_blocks);
|
|
|
- allocated = max_blocks;
|
|
|
+ newblock + map->m_len,
|
|
|
+ allocated - map->m_len);
|
|
|
+ allocated = map->m_len;
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -3288,13 +3282,13 @@ out:
|
|
|
ext4_da_update_reserve_space(inode, allocated, 0);
|
|
|
|
|
|
map_out:
|
|
|
- set_buffer_mapped(bh_result);
|
|
|
+ map->m_flags |= EXT4_MAP_MAPPED;
|
|
|
out1:
|
|
|
- if (allocated > max_blocks)
|
|
|
- allocated = max_blocks;
|
|
|
+ if (allocated > map->m_len)
|
|
|
+ allocated = map->m_len;
|
|
|
ext4_ext_show_leaf(inode, path);
|
|
|
- bh_result->b_bdev = inode->i_sb->s_bdev;
|
|
|
- bh_result->b_blocknr = newblock;
|
|
|
+ map->m_pblk = newblock;
|
|
|
+ map->m_len = allocated;
|
|
|
out2:
|
|
|
if (path) {
|
|
|
ext4_ext_drop_refs(path);
|
|
@@ -3320,10 +3314,8 @@ out2:
|
|
|
*
|
|
|
* return < 0, error case.
|
|
|
*/
|
|
|
-int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
|
|
|
- ext4_lblk_t iblock,
|
|
|
- unsigned int max_blocks, struct buffer_head *bh_result,
|
|
|
- int flags)
|
|
|
+int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
|
|
|
+ struct ext4_map_blocks *map, int flags)
|
|
|
{
|
|
|
struct ext4_ext_path *path = NULL;
|
|
|
struct ext4_extent_header *eh;
|
|
@@ -3334,12 +3326,11 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
|
|
|
struct ext4_allocation_request ar;
|
|
|
ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
|
|
|
|
|
|
- __clear_bit(BH_New, &bh_result->b_state);
|
|
|
ext_debug("blocks %u/%u requested for inode %lu\n",
|
|
|
- iblock, max_blocks, inode->i_ino);
|
|
|
+ map->m_lblk, map->m_len, inode->i_ino);
|
|
|
|
|
|
/* check in cache */
|
|
|
- cache_type = ext4_ext_in_cache(inode, iblock, &newex);
|
|
|
+ cache_type = ext4_ext_in_cache(inode, map->m_lblk, &newex);
|
|
|
if (cache_type) {
|
|
|
if (cache_type == EXT4_EXT_CACHE_GAP) {
|
|
|
if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
|
|
@@ -3352,12 +3343,12 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
|
|
|
/* we should allocate requested block */
|
|
|
} else if (cache_type == EXT4_EXT_CACHE_EXTENT) {
|
|
|
/* block is already allocated */
|
|
|
- newblock = iblock
|
|
|
+ newblock = map->m_lblk
|
|
|
- le32_to_cpu(newex.ee_block)
|
|
|
+ ext_pblock(&newex);
|
|
|
/* number of remaining blocks in the extent */
|
|
|
allocated = ext4_ext_get_actual_len(&newex) -
|
|
|
- (iblock - le32_to_cpu(newex.ee_block));
|
|
|
+ (map->m_lblk - le32_to_cpu(newex.ee_block));
|
|
|
goto out;
|
|
|
} else {
|
|
|
BUG();
|
|
@@ -3365,7 +3356,7 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
|
|
|
}
|
|
|
|
|
|
/* find extent for this block */
|
|
|
- path = ext4_ext_find_extent(inode, iblock, NULL);
|
|
|
+ path = ext4_ext_find_extent(inode, map->m_lblk, NULL);
|
|
|
if (IS_ERR(path)) {
|
|
|
err = PTR_ERR(path);
|
|
|
path = NULL;
|
|
@@ -3382,7 +3373,7 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
|
|
|
if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
|
|
|
EXT4_ERROR_INODE(inode, "bad extent address "
|
|
|
"iblock: %d, depth: %d pblock %lld",
|
|
|
- iblock, depth, path[depth].p_block);
|
|
|
+ map->m_lblk, depth, path[depth].p_block);
|
|
|
err = -EIO;
|
|
|
goto out2;
|
|
|
}
|
|
@@ -3400,12 +3391,12 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
|
|
|
*/
|
|
|
ee_len = ext4_ext_get_actual_len(ex);
|
|
|
/* if found extent covers block, simply return it */
|
|
|
- if (in_range(iblock, ee_block, ee_len)) {
|
|
|
- newblock = iblock - ee_block + ee_start;
|
|
|
+ if (in_range(map->m_lblk, ee_block, ee_len)) {
|
|
|
+ newblock = map->m_lblk - ee_block + ee_start;
|
|
|
/* number of remaining blocks in the extent */
|
|
|
- allocated = ee_len - (iblock - ee_block);
|
|
|
- ext_debug("%u fit into %u:%d -> %llu\n", iblock,
|
|
|
- ee_block, ee_len, newblock);
|
|
|
+ allocated = ee_len - (map->m_lblk - ee_block);
|
|
|
+ ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk,
|
|
|
+ ee_block, ee_len, newblock);
|
|
|
|
|
|
/* Do not put uninitialized extent in the cache */
|
|
|
if (!ext4_ext_is_uninitialized(ex)) {
|
|
@@ -3415,8 +3406,8 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
|
|
|
goto out;
|
|
|
}
|
|
|
ret = ext4_ext_handle_uninitialized_extents(handle,
|
|
|
- inode, iblock, max_blocks, path,
|
|
|
- flags, allocated, bh_result, newblock);
|
|
|
+ inode, map, path, flags, allocated,
|
|
|
+ newblock);
|
|
|
return ret;
|
|
|
}
|
|
|
}
|
|
@@ -3430,7 +3421,7 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
|
|
|
* put just found gap into cache to speed up
|
|
|
* subsequent requests
|
|
|
*/
|
|
|
- ext4_ext_put_gap_in_cache(inode, path, iblock);
|
|
|
+ ext4_ext_put_gap_in_cache(inode, path, map->m_lblk);
|
|
|
goto out2;
|
|
|
}
|
|
|
/*
|
|
@@ -3438,11 +3429,11 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
|
|
|
*/
|
|
|
|
|
|
/* find neighbour allocated blocks */
|
|
|
- ar.lleft = iblock;
|
|
|
+ ar.lleft = map->m_lblk;
|
|
|
err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
|
|
|
if (err)
|
|
|
goto out2;
|
|
|
- ar.lright = iblock;
|
|
|
+ ar.lright = map->m_lblk;
|
|
|
err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright);
|
|
|
if (err)
|
|
|
goto out2;
|
|
@@ -3453,26 +3444,26 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
|
|
|
* EXT_INIT_MAX_LEN and for an uninitialized extent this limit is
|
|
|
* EXT_UNINIT_MAX_LEN.
|
|
|
*/
|
|
|
- if (max_blocks > EXT_INIT_MAX_LEN &&
|
|
|
+ if (map->m_len > EXT_INIT_MAX_LEN &&
|
|
|
!(flags & EXT4_GET_BLOCKS_UNINIT_EXT))
|
|
|
- max_blocks = EXT_INIT_MAX_LEN;
|
|
|
- else if (max_blocks > EXT_UNINIT_MAX_LEN &&
|
|
|
+ map->m_len = EXT_INIT_MAX_LEN;
|
|
|
+ else if (map->m_len > EXT_UNINIT_MAX_LEN &&
|
|
|
(flags & EXT4_GET_BLOCKS_UNINIT_EXT))
|
|
|
- max_blocks = EXT_UNINIT_MAX_LEN;
|
|
|
+ map->m_len = EXT_UNINIT_MAX_LEN;
|
|
|
|
|
|
- /* Check if we can really insert (iblock)::(iblock+max_blocks) extent */
|
|
|
- newex.ee_block = cpu_to_le32(iblock);
|
|
|
- newex.ee_len = cpu_to_le16(max_blocks);
|
|
|
+ /* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */
|
|
|
+ newex.ee_block = cpu_to_le32(map->m_lblk);
|
|
|
+ newex.ee_len = cpu_to_le16(map->m_len);
|
|
|
err = ext4_ext_check_overlap(inode, &newex, path);
|
|
|
if (err)
|
|
|
allocated = ext4_ext_get_actual_len(&newex);
|
|
|
else
|
|
|
- allocated = max_blocks;
|
|
|
+ allocated = map->m_len;
|
|
|
|
|
|
/* allocate new block */
|
|
|
ar.inode = inode;
|
|
|
- ar.goal = ext4_ext_find_goal(inode, path, iblock);
|
|
|
- ar.logical = iblock;
|
|
|
+ ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk);
|
|
|
+ ar.logical = map->m_lblk;
|
|
|
ar.len = allocated;
|
|
|
if (S_ISREG(inode->i_mode))
|
|
|
ar.flags = EXT4_MB_HINT_DATA;
|
|
@@ -3506,7 +3497,7 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
|
|
|
EXT4_STATE_DIO_UNWRITTEN);
|
|
|
}
|
|
|
if (ext4_should_dioread_nolock(inode))
|
|
|
- set_buffer_uninit(bh_result);
|
|
|
+ map->m_flags |= EXT4_MAP_UNINIT;
|
|
|
}
|
|
|
|
|
|
if (unlikely(EXT4_I(inode)->i_flags & EXT4_EOFBLOCKS_FL)) {
|
|
@@ -3518,7 +3509,7 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
|
|
|
goto out2;
|
|
|
}
|
|
|
last_ex = EXT_LAST_EXTENT(eh);
|
|
|
- if (iblock + ar.len > le32_to_cpu(last_ex->ee_block)
|
|
|
+ if (map->m_lblk + ar.len > le32_to_cpu(last_ex->ee_block)
|
|
|
+ ext4_ext_get_actual_len(last_ex))
|
|
|
EXT4_I(inode)->i_flags &= ~EXT4_EOFBLOCKS_FL;
|
|
|
}
|
|
@@ -3536,9 +3527,9 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
|
|
|
/* previous routine could use block we allocated */
|
|
|
newblock = ext_pblock(&newex);
|
|
|
allocated = ext4_ext_get_actual_len(&newex);
|
|
|
- if (allocated > max_blocks)
|
|
|
- allocated = max_blocks;
|
|
|
- set_buffer_new(bh_result);
|
|
|
+ if (allocated > map->m_len)
|
|
|
+ allocated = map->m_len;
|
|
|
+ map->m_flags |= EXT4_MAP_NEW;
|
|
|
|
|
|
/*
|
|
|
* Update reserved blocks/metadata blocks after successful
|
|
@@ -3552,18 +3543,18 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
|
|
|
* when it is _not_ an uninitialized extent.
|
|
|
*/
|
|
|
if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) {
|
|
|
- ext4_ext_put_in_cache(inode, iblock, allocated, newblock,
|
|
|
+ ext4_ext_put_in_cache(inode, map->m_lblk, allocated, newblock,
|
|
|
EXT4_EXT_CACHE_EXTENT);
|
|
|
ext4_update_inode_fsync_trans(handle, inode, 1);
|
|
|
} else
|
|
|
ext4_update_inode_fsync_trans(handle, inode, 0);
|
|
|
out:
|
|
|
- if (allocated > max_blocks)
|
|
|
- allocated = max_blocks;
|
|
|
+ if (allocated > map->m_len)
|
|
|
+ allocated = map->m_len;
|
|
|
ext4_ext_show_leaf(inode, path);
|
|
|
- set_buffer_mapped(bh_result);
|
|
|
- bh_result->b_bdev = inode->i_sb->s_bdev;
|
|
|
- bh_result->b_blocknr = newblock;
|
|
|
+ map->m_flags |= EXT4_MAP_MAPPED;
|
|
|
+ map->m_pblk = newblock;
|
|
|
+ map->m_len = allocated;
|
|
|
out2:
|
|
|
if (path) {
|
|
|
ext4_ext_drop_refs(path);
|
|
@@ -3729,7 +3720,7 @@ retry:
|
|
|
if (ret <= 0) {
|
|
|
#ifdef EXT4FS_DEBUG
|
|
|
WARN_ON(ret <= 0);
|
|
|
- printk(KERN_ERR "%s: ext4_ext_get_blocks "
|
|
|
+ printk(KERN_ERR "%s: ext4_ext_map_blocks "
|
|
|
"returned error inode#%lu, block=%u, "
|
|
|
"max_blocks=%u", __func__,
|
|
|
inode->i_ino, block, max_blocks);
|
|
@@ -3806,7 +3797,7 @@ int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
|
|
|
EXT4_GET_BLOCKS_IO_CONVERT_EXT);
|
|
|
if (ret <= 0) {
|
|
|
WARN_ON(ret <= 0);
|
|
|
- printk(KERN_ERR "%s: ext4_ext_get_blocks "
|
|
|
+ printk(KERN_ERR "%s: ext4_ext_map_blocks "
|
|
|
"returned error inode#%lu, block=%u, "
|
|
|
"max_blocks=%u", __func__,
|
|
|
inode->i_ino, block, max_blocks);
|