extent_map.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429
  1. #include <linux/err.h>
  2. #include <linux/slab.h>
  3. #include <linux/spinlock.h>
  4. #include <linux/hardirq.h>
  5. #include "ctree.h"
  6. #include "extent_map.h"
  7. static struct kmem_cache *extent_map_cache;
  8. int __init extent_map_init(void)
  9. {
  10. extent_map_cache = kmem_cache_create("btrfs_extent_map",
  11. sizeof(struct extent_map), 0,
  12. SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
  13. if (!extent_map_cache)
  14. return -ENOMEM;
  15. return 0;
  16. }
  17. void extent_map_exit(void)
  18. {
  19. if (extent_map_cache)
  20. kmem_cache_destroy(extent_map_cache);
  21. }
  22. /**
  23. * extent_map_tree_init - initialize extent map tree
  24. * @tree: tree to initialize
  25. *
  26. * Initialize the extent tree @tree. Should be called for each new inode
  27. * or other user of the extent_map interface.
  28. */
  29. void extent_map_tree_init(struct extent_map_tree *tree)
  30. {
  31. tree->map = RB_ROOT;
  32. INIT_LIST_HEAD(&tree->modified_extents);
  33. rwlock_init(&tree->lock);
  34. }
  35. /**
  36. * alloc_extent_map - allocate new extent map structure
  37. *
  38. * Allocate a new extent_map structure. The new structure is
  39. * returned with a reference count of one and needs to be
  40. * freed using free_extent_map()
  41. */
  42. struct extent_map *alloc_extent_map(void)
  43. {
  44. struct extent_map *em;
  45. em = kmem_cache_zalloc(extent_map_cache, GFP_NOFS);
  46. if (!em)
  47. return NULL;
  48. em->in_tree = 0;
  49. em->flags = 0;
  50. em->compress_type = BTRFS_COMPRESS_NONE;
  51. em->generation = 0;
  52. atomic_set(&em->refs, 1);
  53. INIT_LIST_HEAD(&em->list);
  54. return em;
  55. }
  56. /**
  57. * free_extent_map - drop reference count of an extent_map
  58. * @em: extent map beeing releasead
  59. *
  60. * Drops the reference out on @em by one and free the structure
  61. * if the reference count hits zero.
  62. */
  63. void free_extent_map(struct extent_map *em)
  64. {
  65. if (!em)
  66. return;
  67. WARN_ON(atomic_read(&em->refs) == 0);
  68. if (atomic_dec_and_test(&em->refs)) {
  69. WARN_ON(em->in_tree);
  70. WARN_ON(!list_empty(&em->list));
  71. kmem_cache_free(extent_map_cache, em);
  72. }
  73. }
  74. static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
  75. struct rb_node *node)
  76. {
  77. struct rb_node **p = &root->rb_node;
  78. struct rb_node *parent = NULL;
  79. struct extent_map *entry;
  80. while (*p) {
  81. parent = *p;
  82. entry = rb_entry(parent, struct extent_map, rb_node);
  83. WARN_ON(!entry->in_tree);
  84. if (offset < entry->start)
  85. p = &(*p)->rb_left;
  86. else if (offset >= extent_map_end(entry))
  87. p = &(*p)->rb_right;
  88. else
  89. return parent;
  90. }
  91. entry = rb_entry(node, struct extent_map, rb_node);
  92. entry->in_tree = 1;
  93. rb_link_node(node, parent, p);
  94. rb_insert_color(node, root);
  95. return NULL;
  96. }
  97. /*
  98. * search through the tree for an extent_map with a given offset. If
  99. * it can't be found, try to find some neighboring extents
  100. */
  101. static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
  102. struct rb_node **prev_ret,
  103. struct rb_node **next_ret)
  104. {
  105. struct rb_node *n = root->rb_node;
  106. struct rb_node *prev = NULL;
  107. struct rb_node *orig_prev = NULL;
  108. struct extent_map *entry;
  109. struct extent_map *prev_entry = NULL;
  110. while (n) {
  111. entry = rb_entry(n, struct extent_map, rb_node);
  112. prev = n;
  113. prev_entry = entry;
  114. WARN_ON(!entry->in_tree);
  115. if (offset < entry->start)
  116. n = n->rb_left;
  117. else if (offset >= extent_map_end(entry))
  118. n = n->rb_right;
  119. else
  120. return n;
  121. }
  122. if (prev_ret) {
  123. orig_prev = prev;
  124. while (prev && offset >= extent_map_end(prev_entry)) {
  125. prev = rb_next(prev);
  126. prev_entry = rb_entry(prev, struct extent_map, rb_node);
  127. }
  128. *prev_ret = prev;
  129. prev = orig_prev;
  130. }
  131. if (next_ret) {
  132. prev_entry = rb_entry(prev, struct extent_map, rb_node);
  133. while (prev && offset < prev_entry->start) {
  134. prev = rb_prev(prev);
  135. prev_entry = rb_entry(prev, struct extent_map, rb_node);
  136. }
  137. *next_ret = prev;
  138. }
  139. return NULL;
  140. }
  141. /* check to see if two extent_map structs are adjacent and safe to merge */
  142. static int mergable_maps(struct extent_map *prev, struct extent_map *next)
  143. {
  144. if (test_bit(EXTENT_FLAG_PINNED, &prev->flags))
  145. return 0;
  146. /*
  147. * don't merge compressed extents, we need to know their
  148. * actual size
  149. */
  150. if (test_bit(EXTENT_FLAG_COMPRESSED, &prev->flags))
  151. return 0;
  152. if (test_bit(EXTENT_FLAG_LOGGING, &prev->flags) ||
  153. test_bit(EXTENT_FLAG_LOGGING, &next->flags))
  154. return 0;
  155. /*
  156. * We don't want to merge stuff that hasn't been written to the log yet
  157. * since it may not reflect exactly what is on disk, and that would be
  158. * bad.
  159. */
  160. if (!list_empty(&prev->list) || !list_empty(&next->list))
  161. return 0;
  162. if (extent_map_end(prev) == next->start &&
  163. prev->flags == next->flags &&
  164. prev->bdev == next->bdev &&
  165. ((next->block_start == EXTENT_MAP_HOLE &&
  166. prev->block_start == EXTENT_MAP_HOLE) ||
  167. (next->block_start == EXTENT_MAP_INLINE &&
  168. prev->block_start == EXTENT_MAP_INLINE) ||
  169. (next->block_start == EXTENT_MAP_DELALLOC &&
  170. prev->block_start == EXTENT_MAP_DELALLOC) ||
  171. (next->block_start < EXTENT_MAP_LAST_BYTE - 1 &&
  172. next->block_start == extent_map_block_end(prev)))) {
  173. return 1;
  174. }
  175. return 0;
  176. }
  177. static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em)
  178. {
  179. struct extent_map *merge = NULL;
  180. struct rb_node *rb;
  181. if (em->start != 0) {
  182. rb = rb_prev(&em->rb_node);
  183. if (rb)
  184. merge = rb_entry(rb, struct extent_map, rb_node);
  185. if (rb && mergable_maps(merge, em)) {
  186. em->start = merge->start;
  187. em->orig_start = merge->orig_start;
  188. em->len += merge->len;
  189. em->block_len += merge->block_len;
  190. em->block_start = merge->block_start;
  191. merge->in_tree = 0;
  192. em->mod_len = (em->mod_len + em->mod_start) - merge->mod_start;
  193. em->mod_start = merge->mod_start;
  194. em->generation = max(em->generation, merge->generation);
  195. rb_erase(&merge->rb_node, &tree->map);
  196. free_extent_map(merge);
  197. }
  198. }
  199. rb = rb_next(&em->rb_node);
  200. if (rb)
  201. merge = rb_entry(rb, struct extent_map, rb_node);
  202. if (rb && mergable_maps(em, merge)) {
  203. em->len += merge->len;
  204. em->block_len += merge->len;
  205. rb_erase(&merge->rb_node, &tree->map);
  206. merge->in_tree = 0;
  207. em->mod_len = (merge->mod_start + merge->mod_len) - em->mod_start;
  208. em->generation = max(em->generation, merge->generation);
  209. free_extent_map(merge);
  210. }
  211. }
  212. /**
  213. * unpin_extent_cache - unpin an extent from the cache
  214. * @tree: tree to unpin the extent in
  215. * @start: logical offset in the file
  216. * @len: length of the extent
  217. * @gen: generation that this extent has been modified in
  218. *
  219. * Called after an extent has been written to disk properly. Set the generation
  220. * to the generation that actually added the file item to the inode so we know
  221. * we need to sync this extent when we call fsync().
  222. */
  223. int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len,
  224. u64 gen)
  225. {
  226. int ret = 0;
  227. struct extent_map *em;
  228. bool prealloc = false;
  229. write_lock(&tree->lock);
  230. em = lookup_extent_mapping(tree, start, len);
  231. WARN_ON(!em || em->start != start);
  232. if (!em)
  233. goto out;
  234. if (!test_bit(EXTENT_FLAG_LOGGING, &em->flags))
  235. list_move(&em->list, &tree->modified_extents);
  236. em->generation = gen;
  237. clear_bit(EXTENT_FLAG_PINNED, &em->flags);
  238. em->mod_start = em->start;
  239. em->mod_len = em->len;
  240. if (test_bit(EXTENT_FLAG_FILLING, &em->flags)) {
  241. prealloc = true;
  242. clear_bit(EXTENT_FLAG_FILLING, &em->flags);
  243. }
  244. try_merge_map(tree, em);
  245. if (prealloc) {
  246. em->mod_start = em->start;
  247. em->mod_len = em->len;
  248. }
  249. free_extent_map(em);
  250. out:
  251. write_unlock(&tree->lock);
  252. return ret;
  253. }
  254. void clear_em_logging(struct extent_map_tree *tree, struct extent_map *em)
  255. {
  256. clear_bit(EXTENT_FLAG_LOGGING, &em->flags);
  257. if (em->in_tree)
  258. try_merge_map(tree, em);
  259. }
  260. /**
  261. * add_extent_mapping - add new extent map to the extent tree
  262. * @tree: tree to insert new map in
  263. * @em: map to insert
  264. *
  265. * Insert @em into @tree or perform a simple forward/backward merge with
  266. * existing mappings. The extent_map struct passed in will be inserted
  267. * into the tree directly, with an additional reference taken, or a
  268. * reference dropped if the merge attempt was successful.
  269. */
  270. int add_extent_mapping(struct extent_map_tree *tree,
  271. struct extent_map *em, int modified)
  272. {
  273. int ret = 0;
  274. struct rb_node *rb;
  275. struct extent_map *exist;
  276. exist = lookup_extent_mapping(tree, em->start, em->len);
  277. if (exist) {
  278. free_extent_map(exist);
  279. ret = -EEXIST;
  280. goto out;
  281. }
  282. rb = tree_insert(&tree->map, em->start, &em->rb_node);
  283. if (rb) {
  284. ret = -EEXIST;
  285. goto out;
  286. }
  287. atomic_inc(&em->refs);
  288. em->mod_start = em->start;
  289. em->mod_len = em->len;
  290. if (modified)
  291. list_move(&em->list, &tree->modified_extents);
  292. else
  293. try_merge_map(tree, em);
  294. out:
  295. return ret;
  296. }
  297. /* simple helper to do math around the end of an extent, handling wrap */
  298. static u64 range_end(u64 start, u64 len)
  299. {
  300. if (start + len < start)
  301. return (u64)-1;
  302. return start + len;
  303. }
  304. static struct extent_map *
  305. __lookup_extent_mapping(struct extent_map_tree *tree,
  306. u64 start, u64 len, int strict)
  307. {
  308. struct extent_map *em;
  309. struct rb_node *rb_node;
  310. struct rb_node *prev = NULL;
  311. struct rb_node *next = NULL;
  312. u64 end = range_end(start, len);
  313. rb_node = __tree_search(&tree->map, start, &prev, &next);
  314. if (!rb_node) {
  315. if (prev)
  316. rb_node = prev;
  317. else if (next)
  318. rb_node = next;
  319. else
  320. return NULL;
  321. }
  322. em = rb_entry(rb_node, struct extent_map, rb_node);
  323. if (strict && !(end > em->start && start < extent_map_end(em)))
  324. return NULL;
  325. atomic_inc(&em->refs);
  326. return em;
  327. }
  328. /**
  329. * lookup_extent_mapping - lookup extent_map
  330. * @tree: tree to lookup in
  331. * @start: byte offset to start the search
  332. * @len: length of the lookup range
  333. *
  334. * Find and return the first extent_map struct in @tree that intersects the
  335. * [start, len] range. There may be additional objects in the tree that
  336. * intersect, so check the object returned carefully to make sure that no
  337. * additional lookups are needed.
  338. */
  339. struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
  340. u64 start, u64 len)
  341. {
  342. return __lookup_extent_mapping(tree, start, len, 1);
  343. }
  344. /**
  345. * search_extent_mapping - find a nearby extent map
  346. * @tree: tree to lookup in
  347. * @start: byte offset to start the search
  348. * @len: length of the lookup range
  349. *
  350. * Find and return the first extent_map struct in @tree that intersects the
  351. * [start, len] range.
  352. *
  353. * If one can't be found, any nearby extent may be returned
  354. */
  355. struct extent_map *search_extent_mapping(struct extent_map_tree *tree,
  356. u64 start, u64 len)
  357. {
  358. return __lookup_extent_mapping(tree, start, len, 0);
  359. }
  360. /**
  361. * remove_extent_mapping - removes an extent_map from the extent tree
  362. * @tree: extent tree to remove from
  363. * @em: extent map beeing removed
  364. *
  365. * Removes @em from @tree. No reference counts are dropped, and no checks
  366. * are done to see if the range is in use
  367. */
  368. int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
  369. {
  370. int ret = 0;
  371. WARN_ON(test_bit(EXTENT_FLAG_PINNED, &em->flags));
  372. rb_erase(&em->rb_node, &tree->map);
  373. if (!test_bit(EXTENT_FLAG_LOGGING, &em->flags))
  374. list_del_init(&em->list);
  375. em->in_tree = 0;
  376. return ret;
  377. }