|
@@ -29,6 +29,7 @@ struct btrfs_leaf_ref *btrfs_alloc_leaf_ref(int nr_extents)
|
|
|
if (ref) {
|
|
|
memset(ref, 0, sizeof(*ref));
|
|
|
atomic_set(&ref->usage, 1);
|
|
|
+ INIT_LIST_HEAD(&ref->list);
|
|
|
}
|
|
|
return ref;
|
|
|
}
|
|
@@ -44,40 +45,21 @@ void btrfs_free_leaf_ref(struct btrfs_leaf_ref *ref)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-static int comp_keys(struct btrfs_key *k1, struct btrfs_key *k2)
|
|
|
-{
|
|
|
- if (k1->objectid > k2->objectid)
|
|
|
- return 1;
|
|
|
- if (k1->objectid < k2->objectid)
|
|
|
- return -1;
|
|
|
- if (k1->type > k2->type)
|
|
|
- return 1;
|
|
|
- if (k1->type < k2->type)
|
|
|
- return -1;
|
|
|
- if (k1->offset > k2->offset)
|
|
|
- return 1;
|
|
|
- if (k1->offset < k2->offset)
|
|
|
- return -1;
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-static struct rb_node *tree_insert(struct rb_root *root, struct btrfs_key *key,
|
|
|
+static struct rb_node *tree_insert(struct rb_root *root, u64 bytenr,
|
|
|
struct rb_node *node)
|
|
|
{
|
|
|
struct rb_node ** p = &root->rb_node;
|
|
|
struct rb_node * parent = NULL;
|
|
|
struct btrfs_leaf_ref *entry;
|
|
|
- int ret;
|
|
|
|
|
|
while(*p) {
|
|
|
parent = *p;
|
|
|
entry = rb_entry(parent, struct btrfs_leaf_ref, rb_node);
|
|
|
WARN_ON(!entry->in_tree);
|
|
|
|
|
|
- ret = comp_keys(key, &entry->key);
|
|
|
- if (ret < 0)
|
|
|
+ if (bytenr < entry->bytenr)
|
|
|
p = &(*p)->rb_left;
|
|
|
- else if (ret > 0)
|
|
|
+ else if (bytenr > entry->bytenr)
|
|
|
p = &(*p)->rb_right;
|
|
|
else
|
|
|
return parent;
|
|
@@ -90,20 +72,18 @@ static struct rb_node *tree_insert(struct rb_root *root, struct btrfs_key *key,
|
|
|
return NULL;
|
|
|
}
|
|
|
|
|
|
-static struct rb_node *tree_search(struct rb_root *root, struct btrfs_key *key)
|
|
|
+static struct rb_node *tree_search(struct rb_root *root, u64 bytenr)
|
|
|
{
|
|
|
struct rb_node * n = root->rb_node;
|
|
|
struct btrfs_leaf_ref *entry;
|
|
|
- int ret;
|
|
|
|
|
|
while(n) {
|
|
|
entry = rb_entry(n, struct btrfs_leaf_ref, rb_node);
|
|
|
WARN_ON(!entry->in_tree);
|
|
|
|
|
|
- ret = comp_keys(key, &entry->key);
|
|
|
- if (ret < 0)
|
|
|
+ if (bytenr < entry->bytenr)
|
|
|
n = n->rb_left;
|
|
|
- else if (ret > 0)
|
|
|
+ else if (bytenr > entry->bytenr)
|
|
|
n = n->rb_right;
|
|
|
else
|
|
|
return n;
|
|
@@ -122,11 +102,11 @@ int btrfs_remove_leaf_refs(struct btrfs_root *root)
|
|
|
|
|
|
spin_lock(&tree->lock);
|
|
|
while(!btrfs_leaf_ref_tree_empty(tree)) {
|
|
|
- tree->last = NULL;
|
|
|
rb = rb_first(&tree->root);
|
|
|
ref = rb_entry(rb, struct btrfs_leaf_ref, rb_node);
|
|
|
rb_erase(&ref->rb_node, &tree->root);
|
|
|
ref->in_tree = 0;
|
|
|
+ list_del_init(&ref->list);
|
|
|
|
|
|
spin_unlock(&tree->lock);
|
|
|
|
|
@@ -140,7 +120,7 @@ int btrfs_remove_leaf_refs(struct btrfs_root *root)
|
|
|
}
|
|
|
|
|
|
struct btrfs_leaf_ref *btrfs_lookup_leaf_ref(struct btrfs_root *root,
|
|
|
- struct btrfs_key *key)
|
|
|
+ u64 bytenr)
|
|
|
{
|
|
|
struct rb_node *rb;
|
|
|
struct btrfs_leaf_ref *ref = NULL;
|
|
@@ -150,15 +130,9 @@ struct btrfs_leaf_ref *btrfs_lookup_leaf_ref(struct btrfs_root *root,
|
|
|
return NULL;
|
|
|
|
|
|
spin_lock(&tree->lock);
|
|
|
- if (tree->last && comp_keys(key, &tree->last->key) == 0) {
|
|
|
- ref = tree->last;
|
|
|
- } else {
|
|
|
- rb = tree_search(&tree->root, key);
|
|
|
- if (rb) {
|
|
|
- ref = rb_entry(rb, struct btrfs_leaf_ref, rb_node);
|
|
|
- tree->last = ref;
|
|
|
- }
|
|
|
- }
|
|
|
+ rb = tree_search(&tree->root, bytenr);
|
|
|
+ if (rb)
|
|
|
+ ref = rb_entry(rb, struct btrfs_leaf_ref, rb_node);
|
|
|
if (ref)
|
|
|
atomic_inc(&ref->usage);
|
|
|
spin_unlock(&tree->lock);
|
|
@@ -171,21 +145,17 @@ int btrfs_add_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref)
|
|
|
struct rb_node *rb;
|
|
|
size_t size = btrfs_leaf_ref_size(ref->nritems);
|
|
|
struct btrfs_leaf_ref_tree *tree = root->ref_tree;
|
|
|
- struct btrfs_transaction *trans = root->fs_info->running_transaction;
|
|
|
|
|
|
spin_lock(&tree->lock);
|
|
|
- rb = tree_insert(&tree->root, &ref->key, &ref->rb_node);
|
|
|
+ rb = tree_insert(&tree->root, ref->bytenr, &ref->rb_node);
|
|
|
if (rb) {
|
|
|
ret = -EEXIST;
|
|
|
} else {
|
|
|
spin_lock(&root->fs_info->ref_cache_lock);
|
|
|
root->fs_info->total_ref_cache_size += size;
|
|
|
- if (trans && tree->generation == trans->transid)
|
|
|
- root->fs_info->running_ref_cache_size += size;
|
|
|
spin_unlock(&root->fs_info->ref_cache_lock);
|
|
|
-
|
|
|
- tree->last = ref;
|
|
|
atomic_inc(&ref->usage);
|
|
|
+ list_add_tail(&ref->list, &tree->list);
|
|
|
}
|
|
|
spin_unlock(&tree->lock);
|
|
|
return ret;
|
|
@@ -195,28 +165,17 @@ int btrfs_remove_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref)
|
|
|
{
|
|
|
size_t size = btrfs_leaf_ref_size(ref->nritems);
|
|
|
struct btrfs_leaf_ref_tree *tree = root->ref_tree;
|
|
|
- struct btrfs_transaction *trans = root->fs_info->running_transaction;
|
|
|
|
|
|
BUG_ON(!ref->in_tree);
|
|
|
spin_lock(&tree->lock);
|
|
|
|
|
|
spin_lock(&root->fs_info->ref_cache_lock);
|
|
|
root->fs_info->total_ref_cache_size -= size;
|
|
|
- if (trans && tree->generation == trans->transid)
|
|
|
- root->fs_info->running_ref_cache_size -= size;
|
|
|
spin_unlock(&root->fs_info->ref_cache_lock);
|
|
|
|
|
|
- if (tree->last == ref) {
|
|
|
- struct rb_node *next = rb_next(&ref->rb_node);
|
|
|
- if (next) {
|
|
|
- tree->last = rb_entry(next, struct btrfs_leaf_ref,
|
|
|
- rb_node);
|
|
|
- } else
|
|
|
- tree->last = NULL;
|
|
|
- }
|
|
|
-
|
|
|
rb_erase(&ref->rb_node, &tree->root);
|
|
|
ref->in_tree = 0;
|
|
|
+ list_del_init(&ref->list);
|
|
|
|
|
|
spin_unlock(&tree->lock);
|
|
|
|