|
@@ -3461,115 +3461,34 @@ out:
|
|
|
/**
|
|
|
* ext4_find_delalloc_range: find delayed allocated block in the given range.
|
|
|
*
|
|
|
- * Goes through the buffer heads in the range [lblk_start, lblk_end] and returns
|
|
|
- * whether there are any buffers marked for delayed allocation. It returns '1'
|
|
|
- * on the first delalloc'ed buffer head found. If no buffer head in the given
|
|
|
- * range is marked for delalloc, it returns 0.
|
|
|
- * lblk_start should always be <= lblk_end.
|
|
|
- * search_hint_reverse is to indicate that searching in reverse from lblk_end to
|
|
|
- * lblk_start might be more efficient (i.e., we will likely hit the delalloc'ed
|
|
|
- * block sooner). This is useful when blocks are truncated sequentially from
|
|
|
- * lblk_start towards lblk_end.
|
|
|
+ * Return 1 if there is a delalloc block in the range, otherwise 0.
|
|
|
*/
|
|
|
static int ext4_find_delalloc_range(struct inode *inode,
|
|
|
ext4_lblk_t lblk_start,
|
|
|
- ext4_lblk_t lblk_end,
|
|
|
- int search_hint_reverse)
|
|
|
+ ext4_lblk_t lblk_end)
|
|
|
{
|
|
|
- struct address_space *mapping = inode->i_mapping;
|
|
|
- struct buffer_head *head, *bh = NULL;
|
|
|
- struct page *page;
|
|
|
- ext4_lblk_t i, pg_lblk;
|
|
|
- pgoff_t index;
|
|
|
-
|
|
|
- if (!test_opt(inode->i_sb, DELALLOC))
|
|
|
- return 0;
|
|
|
-
|
|
|
- /* reverse search wont work if fs block size is less than page size */
|
|
|
- if (inode->i_blkbits < PAGE_CACHE_SHIFT)
|
|
|
- search_hint_reverse = 0;
|
|
|
+ struct extent_status es;
|
|
|
|
|
|
- if (search_hint_reverse)
|
|
|
- i = lblk_end;
|
|
|
+ es.start = lblk_start;
|
|
|
+ ext4_es_find_extent(inode, &es);
|
|
|
+ if (es.len == 0)
|
|
|
+ return 0; /* there is no delay extent in this tree */
|
|
|
+ else if (es.start <= lblk_start && lblk_start < es.start + es.len)
|
|
|
+ return 1;
|
|
|
+ else if (lblk_start <= es.start && es.start <= lblk_end)
|
|
|
+ return 1;
|
|
|
else
|
|
|
- i = lblk_start;
|
|
|
-
|
|
|
- index = i >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
|
|
|
-
|
|
|
- while ((i >= lblk_start) && (i <= lblk_end)) {
|
|
|
- page = find_get_page(mapping, index);
|
|
|
- if (!page)
|
|
|
- goto nextpage;
|
|
|
-
|
|
|
- if (!page_has_buffers(page))
|
|
|
- goto nextpage;
|
|
|
-
|
|
|
- head = page_buffers(page);
|
|
|
- if (!head)
|
|
|
- goto nextpage;
|
|
|
-
|
|
|
- bh = head;
|
|
|
- pg_lblk = index << (PAGE_CACHE_SHIFT -
|
|
|
- inode->i_blkbits);
|
|
|
- do {
|
|
|
- if (unlikely(pg_lblk < lblk_start)) {
|
|
|
- /*
|
|
|
- * This is possible when fs block size is less
|
|
|
- * than page size and our cluster starts/ends in
|
|
|
- * middle of the page. So we need to skip the
|
|
|
- * initial few blocks till we reach the 'lblk'
|
|
|
- */
|
|
|
- pg_lblk++;
|
|
|
- continue;
|
|
|
- }
|
|
|
-
|
|
|
- /* Check if the buffer is delayed allocated and that it
|
|
|
- * is not yet mapped. (when da-buffers are mapped during
|
|
|
- * their writeout, their da_mapped bit is set.)
|
|
|
- */
|
|
|
- if (buffer_delay(bh) && !buffer_da_mapped(bh)) {
|
|
|
- page_cache_release(page);
|
|
|
- trace_ext4_find_delalloc_range(inode,
|
|
|
- lblk_start, lblk_end,
|
|
|
- search_hint_reverse,
|
|
|
- 1, i);
|
|
|
- return 1;
|
|
|
- }
|
|
|
- if (search_hint_reverse)
|
|
|
- i--;
|
|
|
- else
|
|
|
- i++;
|
|
|
- } while ((i >= lblk_start) && (i <= lblk_end) &&
|
|
|
- ((bh = bh->b_this_page) != head));
|
|
|
-nextpage:
|
|
|
- if (page)
|
|
|
- page_cache_release(page);
|
|
|
- /*
|
|
|
- * Move to next page. 'i' will be the first lblk in the next
|
|
|
- * page.
|
|
|
- */
|
|
|
- if (search_hint_reverse)
|
|
|
- index--;
|
|
|
- else
|
|
|
- index++;
|
|
|
- i = index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
|
|
|
- }
|
|
|
-
|
|
|
- trace_ext4_find_delalloc_range(inode, lblk_start, lblk_end,
|
|
|
- search_hint_reverse, 0, 0);
|
|
|
- return 0;
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
-int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk,
|
|
|
- int search_hint_reverse)
|
|
|
+int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk)
|
|
|
{
|
|
|
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
|
|
|
ext4_lblk_t lblk_start, lblk_end;
|
|
|
lblk_start = lblk & (~(sbi->s_cluster_ratio - 1));
|
|
|
lblk_end = lblk_start + sbi->s_cluster_ratio - 1;
|
|
|
|
|
|
- return ext4_find_delalloc_range(inode, lblk_start, lblk_end,
|
|
|
- search_hint_reverse);
|
|
|
+ return ext4_find_delalloc_range(inode, lblk_start, lblk_end);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -3630,7 +3549,7 @@ get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start,
|
|
|
lblk_from = lblk_start & (~(sbi->s_cluster_ratio - 1));
|
|
|
lblk_to = lblk_from + c_offset - 1;
|
|
|
|
|
|
- if (ext4_find_delalloc_range(inode, lblk_from, lblk_to, 0))
|
|
|
+ if (ext4_find_delalloc_range(inode, lblk_from, lblk_to))
|
|
|
allocated_clusters--;
|
|
|
}
|
|
|
|
|
@@ -3640,7 +3559,7 @@ get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start,
|
|
|
lblk_from = lblk_start + num_blks;
|
|
|
lblk_to = lblk_from + (sbi->s_cluster_ratio - c_offset) - 1;
|
|
|
|
|
|
- if (ext4_find_delalloc_range(inode, lblk_from, lblk_to, 0))
|
|
|
+ if (ext4_find_delalloc_range(inode, lblk_from, lblk_to))
|
|
|
allocated_clusters--;
|
|
|
}
|
|
|
|
|
@@ -3927,7 +3846,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
|
|
|
if (ext4_ext_in_cache(inode, map->m_lblk, &newex)) {
|
|
|
if (!newex.ee_start_lo && !newex.ee_start_hi) {
|
|
|
if ((sbi->s_cluster_ratio > 1) &&
|
|
|
- ext4_find_delalloc_cluster(inode, map->m_lblk, 0))
|
|
|
+ ext4_find_delalloc_cluster(inode, map->m_lblk))
|
|
|
map->m_flags |= EXT4_MAP_FROM_CLUSTER;
|
|
|
|
|
|
if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
|
|
@@ -4015,7 +3934,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
|
|
|
}
|
|
|
|
|
|
if ((sbi->s_cluster_ratio > 1) &&
|
|
|
- ext4_find_delalloc_cluster(inode, map->m_lblk, 0))
|
|
|
+ ext4_find_delalloc_cluster(inode, map->m_lblk))
|
|
|
map->m_flags |= EXT4_MAP_FROM_CLUSTER;
|
|
|
|
|
|
/*
|