extent_map.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433
  1. #include <linux/err.h>
  2. #include <linux/gfp.h>
  3. #include <linux/slab.h>
  4. #include <linux/module.h>
  5. #include <linux/spinlock.h>
  6. #include <linux/hardirq.h>
  7. #include "extent_map.h"
  8. static struct kmem_cache *extent_map_cache;
  9. int __init extent_map_init(void)
  10. {
  11. extent_map_cache = kmem_cache_create("extent_map",
  12. sizeof(struct extent_map), 0,
  13. SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
  14. if (!extent_map_cache)
  15. return -ENOMEM;
  16. return 0;
  17. }
  18. void extent_map_exit(void)
  19. {
  20. if (extent_map_cache)
  21. kmem_cache_destroy(extent_map_cache);
  22. }
  23. /**
  24. * extent_map_tree_init - initialize extent map tree
  25. * @tree: tree to initialize
  26. * @mask: flags for memory allocations during tree operations
  27. *
  28. * Initialize the extent tree @tree. Should be called for each new inode
  29. * or other user of the extent_map interface.
  30. */
  31. void extent_map_tree_init(struct extent_map_tree *tree, gfp_t mask)
  32. {
  33. tree->map.rb_node = NULL;
  34. rwlock_init(&tree->lock);
  35. }
  36. /**
  37. * alloc_extent_map - allocate new extent map structure
  38. * @mask: memory allocation flags
  39. *
  40. * Allocate a new extent_map structure. The new structure is
  41. * returned with a reference count of one and needs to be
  42. * freed using free_extent_map()
  43. */
  44. struct extent_map *alloc_extent_map(gfp_t mask)
  45. {
  46. struct extent_map *em;
  47. em = kmem_cache_alloc(extent_map_cache, mask);
  48. if (!em || IS_ERR(em))
  49. return em;
  50. em->in_tree = 0;
  51. em->flags = 0;
  52. atomic_set(&em->refs, 1);
  53. return em;
  54. }
  55. /**
  56. * free_extent_map - drop reference count of an extent_map
  57. * @em: extent map beeing releasead
  58. *
  59. * Drops the reference out on @em by one and free the structure
  60. * if the reference count hits zero.
  61. */
  62. void free_extent_map(struct extent_map *em)
  63. {
  64. if (!em)
  65. return;
  66. WARN_ON(atomic_read(&em->refs) == 0);
  67. if (atomic_dec_and_test(&em->refs)) {
  68. WARN_ON(em->in_tree);
  69. kmem_cache_free(extent_map_cache, em);
  70. }
  71. }
  72. static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
  73. struct rb_node *node)
  74. {
  75. struct rb_node **p = &root->rb_node;
  76. struct rb_node *parent = NULL;
  77. struct extent_map *entry;
  78. while (*p) {
  79. parent = *p;
  80. entry = rb_entry(parent, struct extent_map, rb_node);
  81. WARN_ON(!entry->in_tree);
  82. if (offset < entry->start)
  83. p = &(*p)->rb_left;
  84. else if (offset >= extent_map_end(entry))
  85. p = &(*p)->rb_right;
  86. else
  87. return parent;
  88. }
  89. entry = rb_entry(node, struct extent_map, rb_node);
  90. entry->in_tree = 1;
  91. rb_link_node(node, parent, p);
  92. rb_insert_color(node, root);
  93. return NULL;
  94. }
  95. /*
  96. * search through the tree for an extent_map with a given offset. If
  97. * it can't be found, try to find some neighboring extents
  98. */
  99. static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
  100. struct rb_node **prev_ret,
  101. struct rb_node **next_ret)
  102. {
  103. struct rb_node *n = root->rb_node;
  104. struct rb_node *prev = NULL;
  105. struct rb_node *orig_prev = NULL;
  106. struct extent_map *entry;
  107. struct extent_map *prev_entry = NULL;
  108. while (n) {
  109. entry = rb_entry(n, struct extent_map, rb_node);
  110. prev = n;
  111. prev_entry = entry;
  112. WARN_ON(!entry->in_tree);
  113. if (offset < entry->start)
  114. n = n->rb_left;
  115. else if (offset >= extent_map_end(entry))
  116. n = n->rb_right;
  117. else
  118. return n;
  119. }
  120. if (prev_ret) {
  121. orig_prev = prev;
  122. while (prev && offset >= extent_map_end(prev_entry)) {
  123. prev = rb_next(prev);
  124. prev_entry = rb_entry(prev, struct extent_map, rb_node);
  125. }
  126. *prev_ret = prev;
  127. prev = orig_prev;
  128. }
  129. if (next_ret) {
  130. prev_entry = rb_entry(prev, struct extent_map, rb_node);
  131. while (prev && offset < prev_entry->start) {
  132. prev = rb_prev(prev);
  133. prev_entry = rb_entry(prev, struct extent_map, rb_node);
  134. }
  135. *next_ret = prev;
  136. }
  137. return NULL;
  138. }
  139. /*
  140. * look for an offset in the tree, and if it can't be found, return
  141. * the first offset we can find smaller than 'offset'.
  142. */
  143. static inline struct rb_node *tree_search(struct rb_root *root, u64 offset)
  144. {
  145. struct rb_node *prev;
  146. struct rb_node *ret;
  147. ret = __tree_search(root, offset, &prev, NULL);
  148. if (!ret)
  149. return prev;
  150. return ret;
  151. }
  152. /* check to see if two extent_map structs are adjacent and safe to merge */
  153. static int mergable_maps(struct extent_map *prev, struct extent_map *next)
  154. {
  155. if (test_bit(EXTENT_FLAG_PINNED, &prev->flags))
  156. return 0;
  157. /*
  158. * don't merge compressed extents, we need to know their
  159. * actual size
  160. */
  161. if (test_bit(EXTENT_FLAG_COMPRESSED, &prev->flags))
  162. return 0;
  163. if (extent_map_end(prev) == next->start &&
  164. prev->flags == next->flags &&
  165. prev->bdev == next->bdev &&
  166. ((next->block_start == EXTENT_MAP_HOLE &&
  167. prev->block_start == EXTENT_MAP_HOLE) ||
  168. (next->block_start == EXTENT_MAP_INLINE &&
  169. prev->block_start == EXTENT_MAP_INLINE) ||
  170. (next->block_start == EXTENT_MAP_DELALLOC &&
  171. prev->block_start == EXTENT_MAP_DELALLOC) ||
  172. (next->block_start < EXTENT_MAP_LAST_BYTE - 1 &&
  173. next->block_start == extent_map_block_end(prev)))) {
  174. return 1;
  175. }
  176. return 0;
  177. }
  178. int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len)
  179. {
  180. int ret = 0;
  181. struct extent_map *merge = NULL;
  182. struct rb_node *rb;
  183. struct extent_map *em;
  184. write_lock(&tree->lock);
  185. em = lookup_extent_mapping(tree, start, len);
  186. WARN_ON(!em || em->start != start);
  187. if (!em)
  188. goto out;
  189. clear_bit(EXTENT_FLAG_PINNED, &em->flags);
  190. if (em->start != 0) {
  191. rb = rb_prev(&em->rb_node);
  192. if (rb)
  193. merge = rb_entry(rb, struct extent_map, rb_node);
  194. if (rb && mergable_maps(merge, em)) {
  195. em->start = merge->start;
  196. em->len += merge->len;
  197. em->block_len += merge->block_len;
  198. em->block_start = merge->block_start;
  199. merge->in_tree = 0;
  200. rb_erase(&merge->rb_node, &tree->map);
  201. free_extent_map(merge);
  202. }
  203. }
  204. rb = rb_next(&em->rb_node);
  205. if (rb)
  206. merge = rb_entry(rb, struct extent_map, rb_node);
  207. if (rb && mergable_maps(em, merge)) {
  208. em->len += merge->len;
  209. em->block_len += merge->len;
  210. rb_erase(&merge->rb_node, &tree->map);
  211. merge->in_tree = 0;
  212. free_extent_map(merge);
  213. }
  214. free_extent_map(em);
  215. out:
  216. write_unlock(&tree->lock);
  217. return ret;
  218. }
  219. /**
  220. * add_extent_mapping - add new extent map to the extent tree
  221. * @tree: tree to insert new map in
  222. * @em: map to insert
  223. *
  224. * Insert @em into @tree or perform a simple forward/backward merge with
  225. * existing mappings. The extent_map struct passed in will be inserted
  226. * into the tree directly, with an additional reference taken, or a
  227. * reference dropped if the merge attempt was sucessfull.
  228. */
  229. int add_extent_mapping(struct extent_map_tree *tree,
  230. struct extent_map *em)
  231. {
  232. int ret = 0;
  233. struct extent_map *merge = NULL;
  234. struct rb_node *rb;
  235. struct extent_map *exist;
  236. exist = lookup_extent_mapping(tree, em->start, em->len);
  237. if (exist) {
  238. free_extent_map(exist);
  239. ret = -EEXIST;
  240. goto out;
  241. }
  242. rb = tree_insert(&tree->map, em->start, &em->rb_node);
  243. if (rb) {
  244. ret = -EEXIST;
  245. goto out;
  246. }
  247. atomic_inc(&em->refs);
  248. if (em->start != 0) {
  249. rb = rb_prev(&em->rb_node);
  250. if (rb)
  251. merge = rb_entry(rb, struct extent_map, rb_node);
  252. if (rb && mergable_maps(merge, em)) {
  253. em->start = merge->start;
  254. em->len += merge->len;
  255. em->block_len += merge->block_len;
  256. em->block_start = merge->block_start;
  257. merge->in_tree = 0;
  258. rb_erase(&merge->rb_node, &tree->map);
  259. free_extent_map(merge);
  260. }
  261. }
  262. rb = rb_next(&em->rb_node);
  263. if (rb)
  264. merge = rb_entry(rb, struct extent_map, rb_node);
  265. if (rb && mergable_maps(em, merge)) {
  266. em->len += merge->len;
  267. em->block_len += merge->len;
  268. rb_erase(&merge->rb_node, &tree->map);
  269. merge->in_tree = 0;
  270. free_extent_map(merge);
  271. }
  272. out:
  273. return ret;
  274. }
  275. /* simple helper to do math around the end of an extent, handling wrap */
  276. static u64 range_end(u64 start, u64 len)
  277. {
  278. if (start + len < start)
  279. return (u64)-1;
  280. return start + len;
  281. }
  282. /**
  283. * lookup_extent_mapping - lookup extent_map
  284. * @tree: tree to lookup in
  285. * @start: byte offset to start the search
  286. * @len: length of the lookup range
  287. *
  288. * Find and return the first extent_map struct in @tree that intersects the
  289. * [start, len] range. There may be additional objects in the tree that
  290. * intersect, so check the object returned carefully to make sure that no
  291. * additional lookups are needed.
  292. */
  293. struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
  294. u64 start, u64 len)
  295. {
  296. struct extent_map *em;
  297. struct rb_node *rb_node;
  298. struct rb_node *prev = NULL;
  299. struct rb_node *next = NULL;
  300. u64 end = range_end(start, len);
  301. rb_node = __tree_search(&tree->map, start, &prev, &next);
  302. if (!rb_node && prev) {
  303. em = rb_entry(prev, struct extent_map, rb_node);
  304. if (end > em->start && start < extent_map_end(em))
  305. goto found;
  306. }
  307. if (!rb_node && next) {
  308. em = rb_entry(next, struct extent_map, rb_node);
  309. if (end > em->start && start < extent_map_end(em))
  310. goto found;
  311. }
  312. if (!rb_node) {
  313. em = NULL;
  314. goto out;
  315. }
  316. if (IS_ERR(rb_node)) {
  317. em = ERR_PTR(PTR_ERR(rb_node));
  318. goto out;
  319. }
  320. em = rb_entry(rb_node, struct extent_map, rb_node);
  321. if (end > em->start && start < extent_map_end(em))
  322. goto found;
  323. em = NULL;
  324. goto out;
  325. found:
  326. atomic_inc(&em->refs);
  327. out:
  328. return em;
  329. }
  330. /**
  331. * search_extent_mapping - find a nearby extent map
  332. * @tree: tree to lookup in
  333. * @start: byte offset to start the search
  334. * @len: length of the lookup range
  335. *
  336. * Find and return the first extent_map struct in @tree that intersects the
  337. * [start, len] range.
  338. *
  339. * If one can't be found, any nearby extent may be returned
  340. */
  341. struct extent_map *search_extent_mapping(struct extent_map_tree *tree,
  342. u64 start, u64 len)
  343. {
  344. struct extent_map *em;
  345. struct rb_node *rb_node;
  346. struct rb_node *prev = NULL;
  347. struct rb_node *next = NULL;
  348. rb_node = __tree_search(&tree->map, start, &prev, &next);
  349. if (!rb_node && prev) {
  350. em = rb_entry(prev, struct extent_map, rb_node);
  351. goto found;
  352. }
  353. if (!rb_node && next) {
  354. em = rb_entry(next, struct extent_map, rb_node);
  355. goto found;
  356. }
  357. if (!rb_node) {
  358. em = NULL;
  359. goto out;
  360. }
  361. if (IS_ERR(rb_node)) {
  362. em = ERR_PTR(PTR_ERR(rb_node));
  363. goto out;
  364. }
  365. em = rb_entry(rb_node, struct extent_map, rb_node);
  366. goto found;
  367. em = NULL;
  368. goto out;
  369. found:
  370. atomic_inc(&em->refs);
  371. out:
  372. return em;
  373. }
  374. /**
  375. * remove_extent_mapping - removes an extent_map from the extent tree
  376. * @tree: extent tree to remove from
  377. * @em: extent map beeing removed
  378. *
  379. * Removes @em from @tree. No reference counts are dropped, and no checks
  380. * are done to see if the range is in use
  381. */
  382. int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
  383. {
  384. int ret = 0;
  385. WARN_ON(test_bit(EXTENT_FLAG_PINNED, &em->flags));
  386. rb_erase(&em->rb_node, &tree->map);
  387. em->in_tree = 0;
  388. return ret;
  389. }