|
@@ -1894,12 +1894,10 @@ static int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block,
|
|
|
cbex.ec_block = start;
|
|
|
cbex.ec_len = end - start;
|
|
|
cbex.ec_start = 0;
|
|
|
- cbex.ec_type = EXT4_EXT_CACHE_GAP;
|
|
|
} else {
|
|
|
cbex.ec_block = le32_to_cpu(ex->ee_block);
|
|
|
cbex.ec_len = ext4_ext_get_actual_len(ex);
|
|
|
cbex.ec_start = ext4_ext_pblock(ex);
|
|
|
- cbex.ec_type = EXT4_EXT_CACHE_EXTENT;
|
|
|
}
|
|
|
|
|
|
if (unlikely(cbex.ec_len == 0)) {
|
|
@@ -1939,13 +1937,12 @@ static int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block,
|
|
|
|
|
|
static void
|
|
|
ext4_ext_put_in_cache(struct inode *inode, ext4_lblk_t block,
|
|
|
- __u32 len, ext4_fsblk_t start, int type)
|
|
|
+ __u32 len, ext4_fsblk_t start)
|
|
|
{
|
|
|
struct ext4_ext_cache *cex;
|
|
|
BUG_ON(len == 0);
|
|
|
spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
|
|
|
cex = &EXT4_I(inode)->i_cached_extent;
|
|
|
- cex->ec_type = type;
|
|
|
cex->ec_block = block;
|
|
|
cex->ec_len = len;
|
|
|
cex->ec_start = start;
|
|
@@ -1998,15 +1995,18 @@ ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
|
|
|
}
|
|
|
|
|
|
ext_debug(" -> %u:%lu\n", lblock, len);
|
|
|
- ext4_ext_put_in_cache(inode, lblock, len, 0, EXT4_EXT_CACHE_GAP);
|
|
|
+ ext4_ext_put_in_cache(inode, lblock, len, 0);
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Return 0 if cache is invalid; 1 if the cache is valid
|
|
|
+ */
|
|
|
static int
|
|
|
ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
|
|
|
struct ext4_extent *ex)
|
|
|
{
|
|
|
struct ext4_ext_cache *cex;
|
|
|
- int ret = EXT4_EXT_CACHE_NO;
|
|
|
+ int ret = 0;
|
|
|
|
|
|
/*
|
|
|
* We borrow i_block_reservation_lock to protect i_cached_extent
|
|
@@ -2015,11 +2015,9 @@ ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
|
|
|
cex = &EXT4_I(inode)->i_cached_extent;
|
|
|
|
|
|
/* has cache valid data? */
|
|
|
- if (cex->ec_type == EXT4_EXT_CACHE_NO)
|
|
|
+ if (cex->ec_len == 0)
|
|
|
goto errout;
|
|
|
|
|
|
- BUG_ON(cex->ec_type != EXT4_EXT_CACHE_GAP &&
|
|
|
- cex->ec_type != EXT4_EXT_CACHE_EXTENT);
|
|
|
if (in_range(block, cex->ec_block, cex->ec_len)) {
|
|
|
ex->ee_block = cpu_to_le32(cex->ec_block);
|
|
|
ext4_ext_store_pblock(ex, cex->ec_start);
|
|
@@ -2027,7 +2025,7 @@ ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
|
|
|
ext_debug("%u cached by %u:%u:%llu\n",
|
|
|
block,
|
|
|
cex->ec_block, cex->ec_len, cex->ec_start);
|
|
|
- ret = cex->ec_type;
|
|
|
+ ret = 1;
|
|
|
}
|
|
|
errout:
|
|
|
spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
|
|
@@ -3298,7 +3296,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
|
|
|
struct ext4_extent_header *eh;
|
|
|
struct ext4_extent newex, *ex;
|
|
|
ext4_fsblk_t newblock;
|
|
|
- int err = 0, depth, ret, cache_type;
|
|
|
+ int err = 0, depth, ret;
|
|
|
unsigned int allocated = 0;
|
|
|
struct ext4_allocation_request ar;
|
|
|
ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
|
|
@@ -3307,9 +3305,8 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
|
|
|
map->m_lblk, map->m_len, inode->i_ino);
|
|
|
|
|
|
/* check in cache */
|
|
|
- cache_type = ext4_ext_in_cache(inode, map->m_lblk, &newex);
|
|
|
- if (cache_type) {
|
|
|
- if (cache_type == EXT4_EXT_CACHE_GAP) {
|
|
|
+ if (ext4_ext_in_cache(inode, map->m_lblk, &newex)) {
|
|
|
+ if (!newex.ee_start_lo && !newex.ee_start_hi) {
|
|
|
if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
|
|
|
/*
|
|
|
* block isn't allocated yet and
|
|
@@ -3318,7 +3315,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
|
|
|
goto out2;
|
|
|
}
|
|
|
/* we should allocate requested block */
|
|
|
- } else if (cache_type == EXT4_EXT_CACHE_EXTENT) {
|
|
|
+ } else {
|
|
|
/* block is already allocated */
|
|
|
newblock = map->m_lblk
|
|
|
- le32_to_cpu(newex.ee_block)
|
|
@@ -3327,8 +3324,6 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
|
|
|
allocated = ext4_ext_get_actual_len(&newex) -
|
|
|
(map->m_lblk - le32_to_cpu(newex.ee_block));
|
|
|
goto out;
|
|
|
- } else {
|
|
|
- BUG();
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -3379,8 +3374,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
|
|
|
/* Do not put uninitialized extent in the cache */
|
|
|
if (!ext4_ext_is_uninitialized(ex)) {
|
|
|
ext4_ext_put_in_cache(inode, ee_block,
|
|
|
- ee_len, ee_start,
|
|
|
- EXT4_EXT_CACHE_EXTENT);
|
|
|
+ ee_len, ee_start);
|
|
|
goto out;
|
|
|
}
|
|
|
ret = ext4_ext_handle_uninitialized_extents(handle,
|
|
@@ -3512,8 +3506,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
|
|
|
* when it is _not_ an uninitialized extent.
|
|
|
*/
|
|
|
if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) {
|
|
|
- ext4_ext_put_in_cache(inode, map->m_lblk, allocated, newblock,
|
|
|
- EXT4_EXT_CACHE_EXTENT);
|
|
|
+ ext4_ext_put_in_cache(inode, map->m_lblk, allocated, newblock);
|
|
|
ext4_update_inode_fsync_trans(handle, inode, 1);
|
|
|
} else
|
|
|
ext4_update_inode_fsync_trans(handle, inode, 0);
|
|
@@ -3789,7 +3782,7 @@ static int ext4_ext_fiemap_cb(struct inode *inode, struct ext4_ext_path *path,
|
|
|
|
|
|
logical = (__u64)newex->ec_block << blksize_bits;
|
|
|
|
|
|
- if (newex->ec_type == EXT4_EXT_CACHE_GAP) {
|
|
|
+ if (newex->ec_start == 0) {
|
|
|
pgoff_t offset;
|
|
|
struct page *page;
|
|
|
struct buffer_head *bh = NULL;
|