ref-cache.c 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231
  1. /*
  2. * Copyright (C) 2008 Oracle. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/sched.h>
  19. #include <linux/sort.h>
  20. #include "ctree.h"
  21. #include "ref-cache.h"
  22. #include "transaction.h"
  23. /*
  24. * leaf refs are used to cache the information about which extents
  25. * a given leaf has references on. This allows us to process that leaf
  26. * in btrfs_drop_snapshot without needing to read it back from disk.
  27. */
  28. /*
  29. * kmalloc a leaf reference struct and update the counters for the
  30. * total ref cache size
  31. */
  32. struct btrfs_leaf_ref *btrfs_alloc_leaf_ref(struct btrfs_root *root,
  33. int nr_extents)
  34. {
  35. struct btrfs_leaf_ref *ref;
  36. size_t size = btrfs_leaf_ref_size(nr_extents);
  37. ref = kmalloc(size, GFP_NOFS);
  38. if (ref) {
  39. spin_lock(&root->fs_info->ref_cache_lock);
  40. root->fs_info->total_ref_cache_size += size;
  41. spin_unlock(&root->fs_info->ref_cache_lock);
  42. memset(ref, 0, sizeof(*ref));
  43. atomic_set(&ref->usage, 1);
  44. INIT_LIST_HEAD(&ref->list);
  45. }
  46. return ref;
  47. }
  48. /*
  49. * free a leaf reference struct and update the counters for the
  50. * total ref cache size
  51. */
  52. void btrfs_free_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref)
  53. {
  54. if (!ref)
  55. return;
  56. WARN_ON(atomic_read(&ref->usage) == 0);
  57. if (atomic_dec_and_test(&ref->usage)) {
  58. size_t size = btrfs_leaf_ref_size(ref->nritems);
  59. BUG_ON(ref->in_tree);
  60. kfree(ref);
  61. spin_lock(&root->fs_info->ref_cache_lock);
  62. root->fs_info->total_ref_cache_size -= size;
  63. spin_unlock(&root->fs_info->ref_cache_lock);
  64. }
  65. }
  66. static struct rb_node *tree_insert(struct rb_root *root, u64 bytenr,
  67. struct rb_node *node)
  68. {
  69. struct rb_node **p = &root->rb_node;
  70. struct rb_node *parent = NULL;
  71. struct btrfs_leaf_ref *entry;
  72. while (*p) {
  73. parent = *p;
  74. entry = rb_entry(parent, struct btrfs_leaf_ref, rb_node);
  75. if (bytenr < entry->bytenr)
  76. p = &(*p)->rb_left;
  77. else if (bytenr > entry->bytenr)
  78. p = &(*p)->rb_right;
  79. else
  80. return parent;
  81. }
  82. entry = rb_entry(node, struct btrfs_leaf_ref, rb_node);
  83. rb_link_node(node, parent, p);
  84. rb_insert_color(node, root);
  85. return NULL;
  86. }
  87. static struct rb_node *tree_search(struct rb_root *root, u64 bytenr)
  88. {
  89. struct rb_node *n = root->rb_node;
  90. struct btrfs_leaf_ref *entry;
  91. while (n) {
  92. entry = rb_entry(n, struct btrfs_leaf_ref, rb_node);
  93. WARN_ON(!entry->in_tree);
  94. if (bytenr < entry->bytenr)
  95. n = n->rb_left;
  96. else if (bytenr > entry->bytenr)
  97. n = n->rb_right;
  98. else
  99. return n;
  100. }
  101. return NULL;
  102. }
  103. int btrfs_remove_leaf_refs(struct btrfs_root *root, u64 max_root_gen,
  104. int shared)
  105. {
  106. struct btrfs_leaf_ref *ref = NULL;
  107. struct btrfs_leaf_ref_tree *tree = root->ref_tree;
  108. if (shared)
  109. tree = &root->fs_info->shared_ref_tree;
  110. if (!tree)
  111. return 0;
  112. spin_lock(&tree->lock);
  113. while (!list_empty(&tree->list)) {
  114. ref = list_entry(tree->list.next, struct btrfs_leaf_ref, list);
  115. BUG_ON(ref->tree != tree);
  116. if (ref->root_gen > max_root_gen)
  117. break;
  118. if (!xchg(&ref->in_tree, 0)) {
  119. cond_resched_lock(&tree->lock);
  120. continue;
  121. }
  122. rb_erase(&ref->rb_node, &tree->root);
  123. list_del_init(&ref->list);
  124. spin_unlock(&tree->lock);
  125. btrfs_free_leaf_ref(root, ref);
  126. cond_resched();
  127. spin_lock(&tree->lock);
  128. }
  129. spin_unlock(&tree->lock);
  130. return 0;
  131. }
  132. /*
  133. * find the leaf ref for a given extent. This returns the ref struct with
  134. * a usage reference incremented
  135. */
  136. struct btrfs_leaf_ref *btrfs_lookup_leaf_ref(struct btrfs_root *root,
  137. u64 bytenr)
  138. {
  139. struct rb_node *rb;
  140. struct btrfs_leaf_ref *ref = NULL;
  141. struct btrfs_leaf_ref_tree *tree = root->ref_tree;
  142. again:
  143. if (tree) {
  144. spin_lock(&tree->lock);
  145. rb = tree_search(&tree->root, bytenr);
  146. if (rb)
  147. ref = rb_entry(rb, struct btrfs_leaf_ref, rb_node);
  148. if (ref)
  149. atomic_inc(&ref->usage);
  150. spin_unlock(&tree->lock);
  151. if (ref)
  152. return ref;
  153. }
  154. if (tree != &root->fs_info->shared_ref_tree) {
  155. tree = &root->fs_info->shared_ref_tree;
  156. goto again;
  157. }
  158. return NULL;
  159. }
  160. /*
  161. * add a fully filled in leaf ref struct
  162. * remove all the refs older than a given root generation
  163. */
  164. int btrfs_add_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref,
  165. int shared)
  166. {
  167. int ret = 0;
  168. struct rb_node *rb;
  169. struct btrfs_leaf_ref_tree *tree = root->ref_tree;
  170. if (shared)
  171. tree = &root->fs_info->shared_ref_tree;
  172. spin_lock(&tree->lock);
  173. rb = tree_insert(&tree->root, ref->bytenr, &ref->rb_node);
  174. if (rb) {
  175. ret = -EEXIST;
  176. } else {
  177. atomic_inc(&ref->usage);
  178. ref->tree = tree;
  179. ref->in_tree = 1;
  180. list_add_tail(&ref->list, &tree->list);
  181. }
  182. spin_unlock(&tree->lock);
  183. return ret;
  184. }
  185. /*
  186. * remove a single leaf ref from the tree. This drops the ref held by the tree
  187. * only
  188. */
  189. int btrfs_remove_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref)
  190. {
  191. struct btrfs_leaf_ref_tree *tree;
  192. if (!xchg(&ref->in_tree, 0))
  193. return 0;
  194. tree = ref->tree;
  195. spin_lock(&tree->lock);
  196. rb_erase(&ref->rb_node, &tree->root);
  197. list_del_init(&ref->list);
  198. spin_unlock(&tree->lock);
  199. btrfs_free_leaf_ref(root, ref);
  200. return 0;
  201. }