ref-cache.c 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230
  1. /*
  2. * Copyright (C) 2008 Oracle. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/sched.h>
  19. #include "ctree.h"
  20. #include "ref-cache.h"
  21. #include "transaction.h"
  22. /*
  23. * leaf refs are used to cache the information about which extents
  24. * a given leaf has references on. This allows us to process that leaf
  25. * in btrfs_drop_snapshot without needing to read it back from disk.
  26. */
  27. /*
  28. * kmalloc a leaf reference struct and update the counters for the
  29. * total ref cache size
  30. */
  31. struct btrfs_leaf_ref *btrfs_alloc_leaf_ref(struct btrfs_root *root,
  32. int nr_extents)
  33. {
  34. struct btrfs_leaf_ref *ref;
  35. size_t size = btrfs_leaf_ref_size(nr_extents);
  36. ref = kmalloc(size, GFP_NOFS);
  37. if (ref) {
  38. spin_lock(&root->fs_info->ref_cache_lock);
  39. root->fs_info->total_ref_cache_size += size;
  40. spin_unlock(&root->fs_info->ref_cache_lock);
  41. memset(ref, 0, sizeof(*ref));
  42. atomic_set(&ref->usage, 1);
  43. INIT_LIST_HEAD(&ref->list);
  44. }
  45. return ref;
  46. }
  47. /*
  48. * free a leaf reference struct and update the counters for the
  49. * total ref cache size
  50. */
  51. void btrfs_free_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref)
  52. {
  53. if (!ref)
  54. return;
  55. WARN_ON(atomic_read(&ref->usage) == 0);
  56. if (atomic_dec_and_test(&ref->usage)) {
  57. size_t size = btrfs_leaf_ref_size(ref->nritems);
  58. BUG_ON(ref->in_tree);
  59. kfree(ref);
  60. spin_lock(&root->fs_info->ref_cache_lock);
  61. root->fs_info->total_ref_cache_size -= size;
  62. spin_unlock(&root->fs_info->ref_cache_lock);
  63. }
  64. }
  65. static struct rb_node *tree_insert(struct rb_root *root, u64 bytenr,
  66. struct rb_node *node)
  67. {
  68. struct rb_node **p = &root->rb_node;
  69. struct rb_node *parent = NULL;
  70. struct btrfs_leaf_ref *entry;
  71. while (*p) {
  72. parent = *p;
  73. entry = rb_entry(parent, struct btrfs_leaf_ref, rb_node);
  74. if (bytenr < entry->bytenr)
  75. p = &(*p)->rb_left;
  76. else if (bytenr > entry->bytenr)
  77. p = &(*p)->rb_right;
  78. else
  79. return parent;
  80. }
  81. entry = rb_entry(node, struct btrfs_leaf_ref, rb_node);
  82. rb_link_node(node, parent, p);
  83. rb_insert_color(node, root);
  84. return NULL;
  85. }
  86. static struct rb_node *tree_search(struct rb_root *root, u64 bytenr)
  87. {
  88. struct rb_node *n = root->rb_node;
  89. struct btrfs_leaf_ref *entry;
  90. while (n) {
  91. entry = rb_entry(n, struct btrfs_leaf_ref, rb_node);
  92. WARN_ON(!entry->in_tree);
  93. if (bytenr < entry->bytenr)
  94. n = n->rb_left;
  95. else if (bytenr > entry->bytenr)
  96. n = n->rb_right;
  97. else
  98. return n;
  99. }
  100. return NULL;
  101. }
  102. int btrfs_remove_leaf_refs(struct btrfs_root *root, u64 max_root_gen,
  103. int shared)
  104. {
  105. struct btrfs_leaf_ref *ref = NULL;
  106. struct btrfs_leaf_ref_tree *tree = root->ref_tree;
  107. if (shared)
  108. tree = &root->fs_info->shared_ref_tree;
  109. if (!tree)
  110. return 0;
  111. spin_lock(&tree->lock);
  112. while (!list_empty(&tree->list)) {
  113. ref = list_entry(tree->list.next, struct btrfs_leaf_ref, list);
  114. BUG_ON(ref->tree != tree);
  115. if (ref->root_gen > max_root_gen)
  116. break;
  117. if (!xchg(&ref->in_tree, 0)) {
  118. cond_resched_lock(&tree->lock);
  119. continue;
  120. }
  121. rb_erase(&ref->rb_node, &tree->root);
  122. list_del_init(&ref->list);
  123. spin_unlock(&tree->lock);
  124. btrfs_free_leaf_ref(root, ref);
  125. cond_resched();
  126. spin_lock(&tree->lock);
  127. }
  128. spin_unlock(&tree->lock);
  129. return 0;
  130. }
  131. /*
  132. * find the leaf ref for a given extent. This returns the ref struct with
  133. * a usage reference incremented
  134. */
  135. struct btrfs_leaf_ref *btrfs_lookup_leaf_ref(struct btrfs_root *root,
  136. u64 bytenr)
  137. {
  138. struct rb_node *rb;
  139. struct btrfs_leaf_ref *ref = NULL;
  140. struct btrfs_leaf_ref_tree *tree = root->ref_tree;
  141. again:
  142. if (tree) {
  143. spin_lock(&tree->lock);
  144. rb = tree_search(&tree->root, bytenr);
  145. if (rb)
  146. ref = rb_entry(rb, struct btrfs_leaf_ref, rb_node);
  147. if (ref)
  148. atomic_inc(&ref->usage);
  149. spin_unlock(&tree->lock);
  150. if (ref)
  151. return ref;
  152. }
  153. if (tree != &root->fs_info->shared_ref_tree) {
  154. tree = &root->fs_info->shared_ref_tree;
  155. goto again;
  156. }
  157. return NULL;
  158. }
  159. /*
  160. * add a fully filled in leaf ref struct
  161. * remove all the refs older than a given root generation
  162. */
  163. int btrfs_add_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref,
  164. int shared)
  165. {
  166. int ret = 0;
  167. struct rb_node *rb;
  168. struct btrfs_leaf_ref_tree *tree = root->ref_tree;
  169. if (shared)
  170. tree = &root->fs_info->shared_ref_tree;
  171. spin_lock(&tree->lock);
  172. rb = tree_insert(&tree->root, ref->bytenr, &ref->rb_node);
  173. if (rb) {
  174. ret = -EEXIST;
  175. } else {
  176. atomic_inc(&ref->usage);
  177. ref->tree = tree;
  178. ref->in_tree = 1;
  179. list_add_tail(&ref->list, &tree->list);
  180. }
  181. spin_unlock(&tree->lock);
  182. return ret;
  183. }
  184. /*
  185. * remove a single leaf ref from the tree. This drops the ref held by the tree
  186. * only
  187. */
  188. int btrfs_remove_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref)
  189. {
  190. struct btrfs_leaf_ref_tree *tree;
  191. if (!xchg(&ref->in_tree, 0))
  192. return 0;
  193. tree = ref->tree;
  194. spin_lock(&tree->lock);
  195. rb_erase(&ref->rb_node, &tree->root);
  196. list_del_init(&ref->list);
  197. spin_unlock(&tree->lock);
  198. btrfs_free_leaf_ref(root, ref);
  199. return 0;
  200. }