|
@@ -182,6 +182,7 @@ static int link_free_space(struct btrfs_block_group_cache *block_group,
|
|
int ret = 0;
|
|
int ret = 0;
|
|
|
|
|
|
|
|
|
|
|
|
+ BUG_ON(!info->bytes);
|
|
ret = tree_insert_offset(&block_group->free_space_offset, info->offset,
|
|
ret = tree_insert_offset(&block_group->free_space_offset, info->offset,
|
|
&info->offset_index);
|
|
&info->offset_index);
|
|
if (ret)
|
|
if (ret)
|
|
@@ -195,14 +196,23 @@ static int link_free_space(struct btrfs_block_group_cache *block_group,
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
-static int __btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
|
|
|
|
- u64 offset, u64 bytes)
|
|
|
|
|
|
+int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
|
|
|
|
+ u64 offset, u64 bytes)
|
|
{
|
|
{
|
|
struct btrfs_free_space *right_info;
|
|
struct btrfs_free_space *right_info;
|
|
struct btrfs_free_space *left_info;
|
|
struct btrfs_free_space *left_info;
|
|
struct btrfs_free_space *info = NULL;
|
|
struct btrfs_free_space *info = NULL;
|
|
int ret = 0;
|
|
int ret = 0;
|
|
|
|
|
|
|
|
+ info = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS);
|
|
|
|
+ if (!info)
|
|
|
|
+ return -ENOMEM;
|
|
|
|
+
|
|
|
|
+ info->offset = offset;
|
|
|
|
+ info->bytes = bytes;
|
|
|
|
+
|
|
|
|
+ spin_lock(&block_group->tree_lock);
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* first we want to see if there is free space adjacent to the range we
|
|
* first we want to see if there is free space adjacent to the range we
|
|
* are adding, if there is remove that struct and add a new one to
|
|
* are adding, if there is remove that struct and add a new one to
|
|
@@ -215,42 +225,23 @@ static int __btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
|
|
|
|
|
|
if (right_info) {
|
|
if (right_info) {
|
|
unlink_free_space(block_group, right_info);
|
|
unlink_free_space(block_group, right_info);
|
|
- info = right_info;
|
|
|
|
- info->offset = offset;
|
|
|
|
- info->bytes += bytes;
|
|
|
|
|
|
+ info->bytes += right_info->bytes;
|
|
|
|
+ kfree(right_info);
|
|
}
|
|
}
|
|
|
|
|
|
if (left_info && left_info->offset + left_info->bytes == offset) {
|
|
if (left_info && left_info->offset + left_info->bytes == offset) {
|
|
unlink_free_space(block_group, left_info);
|
|
unlink_free_space(block_group, left_info);
|
|
-
|
|
|
|
- if (info) {
|
|
|
|
- info->offset = left_info->offset;
|
|
|
|
- info->bytes += left_info->bytes;
|
|
|
|
- kfree(left_info);
|
|
|
|
- } else {
|
|
|
|
- info = left_info;
|
|
|
|
- info->bytes += bytes;
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- if (info) {
|
|
|
|
- ret = link_free_space(block_group, info);
|
|
|
|
- if (ret)
|
|
|
|
- kfree(info);
|
|
|
|
- goto out;
|
|
|
|
|
|
+ info->offset = left_info->offset;
|
|
|
|
+ info->bytes += left_info->bytes;
|
|
|
|
+ kfree(left_info);
|
|
}
|
|
}
|
|
|
|
|
|
- info = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS);
|
|
|
|
- if (!info)
|
|
|
|
- return -ENOMEM;
|
|
|
|
-
|
|
|
|
- info->offset = offset;
|
|
|
|
- info->bytes = bytes;
|
|
|
|
-
|
|
|
|
ret = link_free_space(block_group, info);
|
|
ret = link_free_space(block_group, info);
|
|
if (ret)
|
|
if (ret)
|
|
kfree(info);
|
|
kfree(info);
|
|
-out:
|
|
|
|
|
|
+
|
|
|
|
+ spin_unlock(&block_group->tree_lock);
|
|
|
|
+
|
|
if (ret) {
|
|
if (ret) {
|
|
printk(KERN_ERR "btrfs: unable to add free space :%d\n", ret);
|
|
printk(KERN_ERR "btrfs: unable to add free space :%d\n", ret);
|
|
if (ret == -EEXIST)
|
|
if (ret == -EEXIST)
|
|
@@ -260,17 +251,16 @@ out:
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
-static int
|
|
|
|
-__btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
|
|
|
|
- u64 offset, u64 bytes)
|
|
|
|
|
|
+int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
|
|
|
|
+ u64 offset, u64 bytes)
|
|
{
|
|
{
|
|
struct btrfs_free_space *info;
|
|
struct btrfs_free_space *info;
|
|
int ret = 0;
|
|
int ret = 0;
|
|
|
|
|
|
- BUG_ON(!block_group->cached);
|
|
|
|
|
|
+ spin_lock(&block_group->tree_lock);
|
|
|
|
+
|
|
info = tree_search_offset(&block_group->free_space_offset, offset, 0,
|
|
info = tree_search_offset(&block_group->free_space_offset, offset, 0,
|
|
1);
|
|
1);
|
|
-
|
|
|
|
if (info && info->offset == offset) {
|
|
if (info && info->offset == offset) {
|
|
if (info->bytes < bytes) {
|
|
if (info->bytes < bytes) {
|
|
printk(KERN_ERR "Found free space at %llu, size %llu,"
|
|
printk(KERN_ERR "Found free space at %llu, size %llu,"
|
|
@@ -280,12 +270,14 @@ __btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
|
|
(unsigned long long)bytes);
|
|
(unsigned long long)bytes);
|
|
WARN_ON(1);
|
|
WARN_ON(1);
|
|
ret = -EINVAL;
|
|
ret = -EINVAL;
|
|
|
|
+ spin_unlock(&block_group->tree_lock);
|
|
goto out;
|
|
goto out;
|
|
}
|
|
}
|
|
unlink_free_space(block_group, info);
|
|
unlink_free_space(block_group, info);
|
|
|
|
|
|
if (info->bytes == bytes) {
|
|
if (info->bytes == bytes) {
|
|
kfree(info);
|
|
kfree(info);
|
|
|
|
+ spin_unlock(&block_group->tree_lock);
|
|
goto out;
|
|
goto out;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -293,6 +285,7 @@ __btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
|
|
info->bytes -= bytes;
|
|
info->bytes -= bytes;
|
|
|
|
|
|
ret = link_free_space(block_group, info);
|
|
ret = link_free_space(block_group, info);
|
|
|
|
+ spin_unlock(&block_group->tree_lock);
|
|
BUG_ON(ret);
|
|
BUG_ON(ret);
|
|
} else if (info && info->offset < offset &&
|
|
} else if (info && info->offset < offset &&
|
|
info->offset + info->bytes >= offset + bytes) {
|
|
info->offset + info->bytes >= offset + bytes) {
|
|
@@ -318,14 +311,15 @@ __btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
|
|
*/
|
|
*/
|
|
kfree(info);
|
|
kfree(info);
|
|
}
|
|
}
|
|
-
|
|
|
|
|
|
+ spin_unlock(&block_group->tree_lock);
|
|
/* step two, insert a new info struct to cover anything
|
|
/* step two, insert a new info struct to cover anything
|
|
* before the hole
|
|
* before the hole
|
|
*/
|
|
*/
|
|
- ret = __btrfs_add_free_space(block_group, old_start,
|
|
|
|
- offset - old_start);
|
|
|
|
|
|
+ ret = btrfs_add_free_space(block_group, old_start,
|
|
|
|
+ offset - old_start);
|
|
BUG_ON(ret);
|
|
BUG_ON(ret);
|
|
} else {
|
|
} else {
|
|
|
|
+ spin_unlock(&block_group->tree_lock);
|
|
if (!info) {
|
|
if (!info) {
|
|
printk(KERN_ERR "couldn't find space %llu to free\n",
|
|
printk(KERN_ERR "couldn't find space %llu to free\n",
|
|
(unsigned long long)offset);
|
|
(unsigned long long)offset);
|
|
@@ -344,50 +338,6 @@ out:
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
-int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
|
|
|
|
- u64 offset, u64 bytes)
|
|
|
|
-{
|
|
|
|
- int ret;
|
|
|
|
-
|
|
|
|
- mutex_lock(&block_group->alloc_mutex);
|
|
|
|
- ret = __btrfs_add_free_space(block_group, offset, bytes);
|
|
|
|
- mutex_unlock(&block_group->alloc_mutex);
|
|
|
|
-
|
|
|
|
- return ret;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-int btrfs_add_free_space_lock(struct btrfs_block_group_cache *block_group,
|
|
|
|
- u64 offset, u64 bytes)
|
|
|
|
-{
|
|
|
|
- int ret;
|
|
|
|
-
|
|
|
|
- ret = __btrfs_add_free_space(block_group, offset, bytes);
|
|
|
|
-
|
|
|
|
- return ret;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
|
|
|
|
- u64 offset, u64 bytes)
|
|
|
|
-{
|
|
|
|
- int ret = 0;
|
|
|
|
-
|
|
|
|
- mutex_lock(&block_group->alloc_mutex);
|
|
|
|
- ret = __btrfs_remove_free_space(block_group, offset, bytes);
|
|
|
|
- mutex_unlock(&block_group->alloc_mutex);
|
|
|
|
-
|
|
|
|
- return ret;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-int btrfs_remove_free_space_lock(struct btrfs_block_group_cache *block_group,
|
|
|
|
- u64 offset, u64 bytes)
|
|
|
|
-{
|
|
|
|
- int ret;
|
|
|
|
-
|
|
|
|
- ret = __btrfs_remove_free_space(block_group, offset, bytes);
|
|
|
|
-
|
|
|
|
- return ret;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
|
|
void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
|
|
u64 bytes)
|
|
u64 bytes)
|
|
{
|
|
{
|
|
@@ -426,63 +376,44 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
|
|
struct btrfs_free_space *info;
|
|
struct btrfs_free_space *info;
|
|
struct rb_node *node;
|
|
struct rb_node *node;
|
|
|
|
|
|
- mutex_lock(&block_group->alloc_mutex);
|
|
|
|
|
|
+ spin_lock(&block_group->tree_lock);
|
|
while ((node = rb_last(&block_group->free_space_bytes)) != NULL) {
|
|
while ((node = rb_last(&block_group->free_space_bytes)) != NULL) {
|
|
info = rb_entry(node, struct btrfs_free_space, bytes_index);
|
|
info = rb_entry(node, struct btrfs_free_space, bytes_index);
|
|
unlink_free_space(block_group, info);
|
|
unlink_free_space(block_group, info);
|
|
kfree(info);
|
|
kfree(info);
|
|
if (need_resched()) {
|
|
if (need_resched()) {
|
|
- mutex_unlock(&block_group->alloc_mutex);
|
|
|
|
|
|
+ spin_unlock(&block_group->tree_lock);
|
|
cond_resched();
|
|
cond_resched();
|
|
- mutex_lock(&block_group->alloc_mutex);
|
|
|
|
|
|
+ spin_lock(&block_group->tree_lock);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
- mutex_unlock(&block_group->alloc_mutex);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-#if 0
|
|
|
|
-static struct btrfs_free_space *btrfs_find_free_space_offset(struct
|
|
|
|
- btrfs_block_group_cache
|
|
|
|
- *block_group, u64 offset,
|
|
|
|
- u64 bytes)
|
|
|
|
-{
|
|
|
|
- struct btrfs_free_space *ret;
|
|
|
|
-
|
|
|
|
- mutex_lock(&block_group->alloc_mutex);
|
|
|
|
- ret = tree_search_offset(&block_group->free_space_offset, offset,
|
|
|
|
- bytes, 0);
|
|
|
|
- mutex_unlock(&block_group->alloc_mutex);
|
|
|
|
-
|
|
|
|
- return ret;
|
|
|
|
|
|
+ spin_unlock(&block_group->tree_lock);
|
|
}
|
|
}
|
|
|
|
|
|
-static struct btrfs_free_space *btrfs_find_free_space_bytes(struct
|
|
|
|
- btrfs_block_group_cache
|
|
|
|
- *block_group, u64 offset,
|
|
|
|
- u64 bytes)
|
|
|
|
|
|
+u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
|
|
|
|
+ u64 offset, u64 bytes, u64 empty_size)
|
|
{
|
|
{
|
|
- struct btrfs_free_space *ret;
|
|
|
|
-
|
|
|
|
- mutex_lock(&block_group->alloc_mutex);
|
|
|
|
-
|
|
|
|
- ret = tree_search_bytes(&block_group->free_space_bytes, offset, bytes);
|
|
|
|
- mutex_unlock(&block_group->alloc_mutex);
|
|
|
|
-
|
|
|
|
- return ret;
|
|
|
|
-}
|
|
|
|
-#endif
|
|
|
|
-
|
|
|
|
-struct btrfs_free_space *btrfs_find_free_space(struct btrfs_block_group_cache
|
|
|
|
- *block_group, u64 offset,
|
|
|
|
- u64 bytes)
|
|
|
|
-{
|
|
|
|
- struct btrfs_free_space *ret = NULL;
|
|
|
|
|
|
+ struct btrfs_free_space *entry = NULL;
|
|
|
|
+ u64 ret = 0;
|
|
|
|
|
|
- ret = tree_search_offset(&block_group->free_space_offset, offset,
|
|
|
|
- bytes, 1);
|
|
|
|
- if (!ret)
|
|
|
|
- ret = tree_search_bytes(&block_group->free_space_bytes,
|
|
|
|
- offset, bytes);
|
|
|
|
|
|
+ spin_lock(&block_group->tree_lock);
|
|
|
|
+ entry = tree_search_offset(&block_group->free_space_offset, offset,
|
|
|
|
+ bytes + empty_size, 1);
|
|
|
|
+ if (!entry)
|
|
|
|
+ entry = tree_search_bytes(&block_group->free_space_bytes,
|
|
|
|
+ offset, bytes + empty_size);
|
|
|
|
+ if (entry) {
|
|
|
|
+ unlink_free_space(block_group, entry);
|
|
|
|
+ ret = entry->offset;
|
|
|
|
+ entry->offset += bytes;
|
|
|
|
+ entry->bytes -= bytes;
|
|
|
|
+
|
|
|
|
+ if (!entry->bytes)
|
|
|
|
+ kfree(entry);
|
|
|
|
+ else
|
|
|
|
+ link_free_space(block_group, entry);
|
|
|
|
+ }
|
|
|
|
+ spin_unlock(&block_group->tree_lock);
|
|
|
|
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|