|
@@ -1433,12 +1433,13 @@ int extent_clear_unlock_delalloc(struct inode *inode,
|
|
*/
|
|
*/
|
|
u64 count_range_bits(struct extent_io_tree *tree,
|
|
u64 count_range_bits(struct extent_io_tree *tree,
|
|
u64 *start, u64 search_end, u64 max_bytes,
|
|
u64 *start, u64 search_end, u64 max_bytes,
|
|
- unsigned long bits)
|
|
|
|
|
|
+ unsigned long bits, int contig)
|
|
{
|
|
{
|
|
struct rb_node *node;
|
|
struct rb_node *node;
|
|
struct extent_state *state;
|
|
struct extent_state *state;
|
|
u64 cur_start = *start;
|
|
u64 cur_start = *start;
|
|
u64 total_bytes = 0;
|
|
u64 total_bytes = 0;
|
|
|
|
+ u64 last = 0;
|
|
int found = 0;
|
|
int found = 0;
|
|
|
|
|
|
if (search_end <= cur_start) {
|
|
if (search_end <= cur_start) {
|
|
@@ -1463,7 +1464,9 @@ u64 count_range_bits(struct extent_io_tree *tree,
|
|
state = rb_entry(node, struct extent_state, rb_node);
|
|
state = rb_entry(node, struct extent_state, rb_node);
|
|
if (state->start > search_end)
|
|
if (state->start > search_end)
|
|
break;
|
|
break;
|
|
- if (state->end >= cur_start && (state->state & bits)) {
|
|
|
|
|
|
+ if (contig && found && state->start > last + 1)
|
|
|
|
+ break;
|
|
|
|
+ if (state->end >= cur_start && (state->state & bits) == bits) {
|
|
total_bytes += min(search_end, state->end) + 1 -
|
|
total_bytes += min(search_end, state->end) + 1 -
|
|
max(cur_start, state->start);
|
|
max(cur_start, state->start);
|
|
if (total_bytes >= max_bytes)
|
|
if (total_bytes >= max_bytes)
|
|
@@ -1472,6 +1475,9 @@ u64 count_range_bits(struct extent_io_tree *tree,
|
|
*start = state->start;
|
|
*start = state->start;
|
|
found = 1;
|
|
found = 1;
|
|
}
|
|
}
|
|
|
|
+ last = state->end;
|
|
|
|
+ } else if (contig && found) {
|
|
|
|
+ break;
|
|
}
|
|
}
|
|
node = rb_next(node);
|
|
node = rb_next(node);
|
|
if (!node)
|
|
if (!node)
|
|
@@ -2912,6 +2918,46 @@ out:
|
|
return sector;
|
|
return sector;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+ * helper function for fiemap, which doesn't want to see any holes.
|
|
|
|
+ * This maps until we find something past 'last'
|
|
|
|
+ */
|
|
|
|
+static struct extent_map *get_extent_skip_holes(struct inode *inode,
|
|
|
|
+ u64 offset,
|
|
|
|
+ u64 last,
|
|
|
|
+ get_extent_t *get_extent)
|
|
|
|
+{
|
|
|
|
+ u64 sectorsize = BTRFS_I(inode)->root->sectorsize;
|
|
|
|
+ struct extent_map *em;
|
|
|
|
+ u64 len;
|
|
|
|
+
|
|
|
|
+ if (offset >= last)
|
|
|
|
+ return NULL;
|
|
|
|
+
|
|
|
|
+ while(1) {
|
|
|
|
+ len = last - offset;
|
|
|
|
+ if (len == 0)
|
|
|
|
+ break;
|
|
|
|
+ len = (len + sectorsize - 1) & ~(sectorsize - 1);
|
|
|
|
+ em = get_extent(inode, NULL, 0, offset, len, 0);
|
|
|
|
+ if (!em || IS_ERR(em))
|
|
|
|
+ return em;
|
|
|
|
+
|
|
|
|
+ /* if this isn't a hole return it */
|
|
|
|
+ if (!test_bit(EXTENT_FLAG_VACANCY, &em->flags) &&
|
|
|
|
+ em->block_start != EXTENT_MAP_HOLE) {
|
|
|
|
+ return em;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* this is a hole, advance to the next extent */
|
|
|
|
+ offset = extent_map_end(em);
|
|
|
|
+ free_extent_map(em);
|
|
|
|
+ if (offset >= last)
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ return NULL;
|
|
|
|
+}
|
|
|
|
+
|
|
int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
|
|
int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
|
|
__u64 start, __u64 len, get_extent_t *get_extent)
|
|
__u64 start, __u64 len, get_extent_t *get_extent)
|
|
{
|
|
{
|
|
@@ -2921,16 +2967,19 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
|
|
u32 flags = 0;
|
|
u32 flags = 0;
|
|
u32 found_type;
|
|
u32 found_type;
|
|
u64 last;
|
|
u64 last;
|
|
|
|
+ u64 last_for_get_extent = 0;
|
|
u64 disko = 0;
|
|
u64 disko = 0;
|
|
|
|
+ u64 isize = i_size_read(inode);
|
|
struct btrfs_key found_key;
|
|
struct btrfs_key found_key;
|
|
struct extent_map *em = NULL;
|
|
struct extent_map *em = NULL;
|
|
struct extent_state *cached_state = NULL;
|
|
struct extent_state *cached_state = NULL;
|
|
struct btrfs_path *path;
|
|
struct btrfs_path *path;
|
|
struct btrfs_file_extent_item *item;
|
|
struct btrfs_file_extent_item *item;
|
|
int end = 0;
|
|
int end = 0;
|
|
- u64 em_start = 0, em_len = 0;
|
|
|
|
|
|
+ u64 em_start = 0;
|
|
|
|
+ u64 em_len = 0;
|
|
|
|
+ u64 em_end = 0;
|
|
unsigned long emflags;
|
|
unsigned long emflags;
|
|
- int hole = 0;
|
|
|
|
|
|
|
|
if (len == 0)
|
|
if (len == 0)
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
@@ -2940,6 +2989,10 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
path->leave_spinning = 1;
|
|
path->leave_spinning = 1;
|
|
|
|
|
|
|
|
+ /*
|
|
|
|
+ * lookup the last file extent. We're not using i_size here
|
|
|
|
+ * because there might be preallocation past i_size
|
|
|
|
+ */
|
|
ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root,
|
|
ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root,
|
|
path, inode->i_ino, -1, 0);
|
|
path, inode->i_ino, -1, 0);
|
|
if (ret < 0) {
|
|
if (ret < 0) {
|
|
@@ -2953,18 +3006,38 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
|
|
btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
|
|
btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
|
|
found_type = btrfs_key_type(&found_key);
|
|
found_type = btrfs_key_type(&found_key);
|
|
|
|
|
|
- /* No extents, just return */
|
|
|
|
|
|
+ /* No extents, but there might be delalloc bits */
|
|
if (found_key.objectid != inode->i_ino ||
|
|
if (found_key.objectid != inode->i_ino ||
|
|
found_type != BTRFS_EXTENT_DATA_KEY) {
|
|
found_type != BTRFS_EXTENT_DATA_KEY) {
|
|
- btrfs_free_path(path);
|
|
|
|
- return 0;
|
|
|
|
|
|
+ /* have to trust i_size as the end */
|
|
|
|
+ last = (u64)-1;
|
|
|
|
+ last_for_get_extent = isize;
|
|
|
|
+ } else {
|
|
|
|
+ /*
|
|
|
|
+ * remember the start of the last extent. There are a
|
|
|
|
+ * bunch of different factors that go into the length of the
|
|
|
|
+ * extent, so its much less complex to remember where it started
|
|
|
|
+ */
|
|
|
|
+ last = found_key.offset;
|
|
|
|
+ last_for_get_extent = last + 1;
|
|
}
|
|
}
|
|
- last = found_key.offset;
|
|
|
|
btrfs_free_path(path);
|
|
btrfs_free_path(path);
|
|
|
|
|
|
|
|
+ /*
|
|
|
|
+ * we might have some extents allocated but more delalloc past those
|
|
|
|
+ * extents. so, we trust isize unless the start of the last extent is
|
|
|
|
+ * beyond isize
|
|
|
|
+ */
|
|
|
|
+ if (last < isize) {
|
|
|
|
+ last = (u64)-1;
|
|
|
|
+ last_for_get_extent = isize;
|
|
|
|
+ }
|
|
|
|
+
|
|
lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0,
|
|
lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0,
|
|
&cached_state, GFP_NOFS);
|
|
&cached_state, GFP_NOFS);
|
|
- em = get_extent(inode, NULL, 0, off, max - off, 0);
|
|
|
|
|
|
+
|
|
|
|
+ em = get_extent_skip_holes(inode, off, last_for_get_extent,
|
|
|
|
+ get_extent);
|
|
if (!em)
|
|
if (!em)
|
|
goto out;
|
|
goto out;
|
|
if (IS_ERR(em)) {
|
|
if (IS_ERR(em)) {
|
|
@@ -2973,19 +3046,14 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
|
|
}
|
|
}
|
|
|
|
|
|
while (!end) {
|
|
while (!end) {
|
|
- hole = 0;
|
|
|
|
- off = em->start + em->len;
|
|
|
|
|
|
+ off = extent_map_end(em);
|
|
if (off >= max)
|
|
if (off >= max)
|
|
end = 1;
|
|
end = 1;
|
|
|
|
|
|
- if (em->block_start == EXTENT_MAP_HOLE) {
|
|
|
|
- hole = 1;
|
|
|
|
- goto next;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
em_start = em->start;
|
|
em_start = em->start;
|
|
em_len = em->len;
|
|
em_len = em->len;
|
|
-
|
|
|
|
|
|
+ em_end = extent_map_end(em);
|
|
|
|
+ emflags = em->flags;
|
|
disko = 0;
|
|
disko = 0;
|
|
flags = 0;
|
|
flags = 0;
|
|
|
|
|
|
@@ -3004,37 +3072,29 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
|
|
if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
|
|
if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
|
|
flags |= FIEMAP_EXTENT_ENCODED;
|
|
flags |= FIEMAP_EXTENT_ENCODED;
|
|
|
|
|
|
-next:
|
|
|
|
- emflags = em->flags;
|
|
|
|
free_extent_map(em);
|
|
free_extent_map(em);
|
|
em = NULL;
|
|
em = NULL;
|
|
- if (!end) {
|
|
|
|
- em = get_extent(inode, NULL, 0, off, max - off, 0);
|
|
|
|
- if (!em)
|
|
|
|
- goto out;
|
|
|
|
- if (IS_ERR(em)) {
|
|
|
|
- ret = PTR_ERR(em);
|
|
|
|
- goto out;
|
|
|
|
- }
|
|
|
|
- emflags = em->flags;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- if (test_bit(EXTENT_FLAG_VACANCY, &emflags)) {
|
|
|
|
|
|
+ if ((em_start >= last) || em_len == (u64)-1 ||
|
|
|
|
+ (last == (u64)-1 && isize <= em_end)) {
|
|
flags |= FIEMAP_EXTENT_LAST;
|
|
flags |= FIEMAP_EXTENT_LAST;
|
|
end = 1;
|
|
end = 1;
|
|
}
|
|
}
|
|
|
|
|
|
- if (em_start == last) {
|
|
|
|
|
|
+ /* now scan forward to see if this is really the last extent. */
|
|
|
|
+ em = get_extent_skip_holes(inode, off, last_for_get_extent,
|
|
|
|
+ get_extent);
|
|
|
|
+ if (IS_ERR(em)) {
|
|
|
|
+ ret = PTR_ERR(em);
|
|
|
|
+ goto out;
|
|
|
|
+ }
|
|
|
|
+ if (!em) {
|
|
flags |= FIEMAP_EXTENT_LAST;
|
|
flags |= FIEMAP_EXTENT_LAST;
|
|
end = 1;
|
|
end = 1;
|
|
}
|
|
}
|
|
-
|
|
|
|
- if (!hole) {
|
|
|
|
- ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
|
|
|
|
- em_len, flags);
|
|
|
|
- if (ret)
|
|
|
|
- goto out_free;
|
|
|
|
- }
|
|
|
|
|
|
+ ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
|
|
|
|
+ em_len, flags);
|
|
|
|
+ if (ret)
|
|
|
|
+ goto out_free;
|
|
}
|
|
}
|
|
out_free:
|
|
out_free:
|
|
free_extent_map(em);
|
|
free_extent_map(em);
|