ref-cache.c 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232
  1. /*
  2. * Copyright (C) 2008 Oracle. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/sched.h>
  19. #include <linux/slab.h>
  20. #include <linux/sort.h>
  21. #include "ctree.h"
  22. #include "ref-cache.h"
  23. #include "transaction.h"
  24. /*
  25. * leaf refs are used to cache the information about which extents
  26. * a given leaf has references on. This allows us to process that leaf
  27. * in btrfs_drop_snapshot without needing to read it back from disk.
  28. */
  29. /*
  30. * kmalloc a leaf reference struct and update the counters for the
  31. * total ref cache size
  32. */
  33. struct btrfs_leaf_ref *btrfs_alloc_leaf_ref(struct btrfs_root *root,
  34. int nr_extents)
  35. {
  36. struct btrfs_leaf_ref *ref;
  37. size_t size = btrfs_leaf_ref_size(nr_extents);
  38. ref = kmalloc(size, GFP_NOFS);
  39. if (ref) {
  40. spin_lock(&root->fs_info->ref_cache_lock);
  41. root->fs_info->total_ref_cache_size += size;
  42. spin_unlock(&root->fs_info->ref_cache_lock);
  43. memset(ref, 0, sizeof(*ref));
  44. atomic_set(&ref->usage, 1);
  45. INIT_LIST_HEAD(&ref->list);
  46. }
  47. return ref;
  48. }
  49. /*
  50. * free a leaf reference struct and update the counters for the
  51. * total ref cache size
  52. */
  53. void btrfs_free_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref)
  54. {
  55. if (!ref)
  56. return;
  57. WARN_ON(atomic_read(&ref->usage) == 0);
  58. if (atomic_dec_and_test(&ref->usage)) {
  59. size_t size = btrfs_leaf_ref_size(ref->nritems);
  60. BUG_ON(ref->in_tree);
  61. kfree(ref);
  62. spin_lock(&root->fs_info->ref_cache_lock);
  63. root->fs_info->total_ref_cache_size -= size;
  64. spin_unlock(&root->fs_info->ref_cache_lock);
  65. }
  66. }
  67. static struct rb_node *tree_insert(struct rb_root *root, u64 bytenr,
  68. struct rb_node *node)
  69. {
  70. struct rb_node **p = &root->rb_node;
  71. struct rb_node *parent = NULL;
  72. struct btrfs_leaf_ref *entry;
  73. while (*p) {
  74. parent = *p;
  75. entry = rb_entry(parent, struct btrfs_leaf_ref, rb_node);
  76. if (bytenr < entry->bytenr)
  77. p = &(*p)->rb_left;
  78. else if (bytenr > entry->bytenr)
  79. p = &(*p)->rb_right;
  80. else
  81. return parent;
  82. }
  83. entry = rb_entry(node, struct btrfs_leaf_ref, rb_node);
  84. rb_link_node(node, parent, p);
  85. rb_insert_color(node, root);
  86. return NULL;
  87. }
  88. static struct rb_node *tree_search(struct rb_root *root, u64 bytenr)
  89. {
  90. struct rb_node *n = root->rb_node;
  91. struct btrfs_leaf_ref *entry;
  92. while (n) {
  93. entry = rb_entry(n, struct btrfs_leaf_ref, rb_node);
  94. WARN_ON(!entry->in_tree);
  95. if (bytenr < entry->bytenr)
  96. n = n->rb_left;
  97. else if (bytenr > entry->bytenr)
  98. n = n->rb_right;
  99. else
  100. return n;
  101. }
  102. return NULL;
  103. }
  104. int btrfs_remove_leaf_refs(struct btrfs_root *root, u64 max_root_gen,
  105. int shared)
  106. {
  107. struct btrfs_leaf_ref *ref = NULL;
  108. struct btrfs_leaf_ref_tree *tree = root->ref_tree;
  109. if (shared)
  110. tree = &root->fs_info->shared_ref_tree;
  111. if (!tree)
  112. return 0;
  113. spin_lock(&tree->lock);
  114. while (!list_empty(&tree->list)) {
  115. ref = list_entry(tree->list.next, struct btrfs_leaf_ref, list);
  116. BUG_ON(ref->tree != tree);
  117. if (ref->root_gen > max_root_gen)
  118. break;
  119. if (!xchg(&ref->in_tree, 0)) {
  120. cond_resched_lock(&tree->lock);
  121. continue;
  122. }
  123. rb_erase(&ref->rb_node, &tree->root);
  124. list_del_init(&ref->list);
  125. spin_unlock(&tree->lock);
  126. btrfs_free_leaf_ref(root, ref);
  127. cond_resched();
  128. spin_lock(&tree->lock);
  129. }
  130. spin_unlock(&tree->lock);
  131. return 0;
  132. }
  133. /*
  134. * find the leaf ref for a given extent. This returns the ref struct with
  135. * a usage reference incremented
  136. */
  137. struct btrfs_leaf_ref *btrfs_lookup_leaf_ref(struct btrfs_root *root,
  138. u64 bytenr)
  139. {
  140. struct rb_node *rb;
  141. struct btrfs_leaf_ref *ref = NULL;
  142. struct btrfs_leaf_ref_tree *tree = root->ref_tree;
  143. again:
  144. if (tree) {
  145. spin_lock(&tree->lock);
  146. rb = tree_search(&tree->root, bytenr);
  147. if (rb)
  148. ref = rb_entry(rb, struct btrfs_leaf_ref, rb_node);
  149. if (ref)
  150. atomic_inc(&ref->usage);
  151. spin_unlock(&tree->lock);
  152. if (ref)
  153. return ref;
  154. }
  155. if (tree != &root->fs_info->shared_ref_tree) {
  156. tree = &root->fs_info->shared_ref_tree;
  157. goto again;
  158. }
  159. return NULL;
  160. }
  161. /*
  162. * add a fully filled in leaf ref struct
  163. * remove all the refs older than a given root generation
  164. */
  165. int btrfs_add_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref,
  166. int shared)
  167. {
  168. int ret = 0;
  169. struct rb_node *rb;
  170. struct btrfs_leaf_ref_tree *tree = root->ref_tree;
  171. if (shared)
  172. tree = &root->fs_info->shared_ref_tree;
  173. spin_lock(&tree->lock);
  174. rb = tree_insert(&tree->root, ref->bytenr, &ref->rb_node);
  175. if (rb) {
  176. ret = -EEXIST;
  177. } else {
  178. atomic_inc(&ref->usage);
  179. ref->tree = tree;
  180. ref->in_tree = 1;
  181. list_add_tail(&ref->list, &tree->list);
  182. }
  183. spin_unlock(&tree->lock);
  184. return ret;
  185. }
  186. /*
  187. * remove a single leaf ref from the tree. This drops the ref held by the tree
  188. * only
  189. */
  190. int btrfs_remove_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref)
  191. {
  192. struct btrfs_leaf_ref_tree *tree;
  193. if (!xchg(&ref->in_tree, 0))
  194. return 0;
  195. tree = ref->tree;
  196. spin_lock(&tree->lock);
  197. rb_erase(&ref->rb_node, &tree->root);
  198. list_del_init(&ref->list);
  199. spin_unlock(&tree->lock);
  200. btrfs_free_leaf_ref(root, ref);
  201. return 0;
  202. }