delayed-ref.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928
  1. /*
  2. * Copyright (C) 2009 Oracle. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/sched.h>
  19. #include <linux/slab.h>
  20. #include <linux/sort.h>
  21. #include "ctree.h"
  22. #include "delayed-ref.h"
  23. #include "transaction.h"
  24. struct kmem_cache *btrfs_delayed_ref_head_cachep;
  25. struct kmem_cache *btrfs_delayed_tree_ref_cachep;
  26. struct kmem_cache *btrfs_delayed_data_ref_cachep;
  27. struct kmem_cache *btrfs_delayed_extent_op_cachep;
  28. /*
  29. * delayed back reference update tracking. For subvolume trees
  30. * we queue up extent allocations and backref maintenance for
  31. * delayed processing. This avoids deep call chains where we
  32. * add extents in the middle of btrfs_search_slot, and it allows
  33. * us to buffer up frequently modified backrefs in an rb tree instead
  34. * of hammering updates on the extent allocation tree.
  35. */
  36. /*
  37. * compare two delayed tree backrefs with same bytenr and type
  38. */
  39. static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref2,
  40. struct btrfs_delayed_tree_ref *ref1, int type)
  41. {
  42. if (type == BTRFS_TREE_BLOCK_REF_KEY) {
  43. if (ref1->root < ref2->root)
  44. return -1;
  45. if (ref1->root > ref2->root)
  46. return 1;
  47. } else {
  48. if (ref1->parent < ref2->parent)
  49. return -1;
  50. if (ref1->parent > ref2->parent)
  51. return 1;
  52. }
  53. return 0;
  54. }
  55. /*
  56. * compare two delayed data backrefs with same bytenr and type
  57. */
  58. static int comp_data_refs(struct btrfs_delayed_data_ref *ref2,
  59. struct btrfs_delayed_data_ref *ref1)
  60. {
  61. if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
  62. if (ref1->root < ref2->root)
  63. return -1;
  64. if (ref1->root > ref2->root)
  65. return 1;
  66. if (ref1->objectid < ref2->objectid)
  67. return -1;
  68. if (ref1->objectid > ref2->objectid)
  69. return 1;
  70. if (ref1->offset < ref2->offset)
  71. return -1;
  72. if (ref1->offset > ref2->offset)
  73. return 1;
  74. } else {
  75. if (ref1->parent < ref2->parent)
  76. return -1;
  77. if (ref1->parent > ref2->parent)
  78. return 1;
  79. }
  80. return 0;
  81. }
  82. /*
  83. * entries in the rb tree are ordered by the byte number of the extent,
  84. * type of the delayed backrefs and content of delayed backrefs.
  85. */
  86. static int comp_entry(struct btrfs_delayed_ref_node *ref2,
  87. struct btrfs_delayed_ref_node *ref1,
  88. bool compare_seq)
  89. {
  90. if (ref1->bytenr < ref2->bytenr)
  91. return -1;
  92. if (ref1->bytenr > ref2->bytenr)
  93. return 1;
  94. if (ref1->is_head && ref2->is_head)
  95. return 0;
  96. if (ref2->is_head)
  97. return -1;
  98. if (ref1->is_head)
  99. return 1;
  100. if (ref1->type < ref2->type)
  101. return -1;
  102. if (ref1->type > ref2->type)
  103. return 1;
  104. /* merging of sequenced refs is not allowed */
  105. if (compare_seq) {
  106. if (ref1->seq < ref2->seq)
  107. return -1;
  108. if (ref1->seq > ref2->seq)
  109. return 1;
  110. }
  111. if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
  112. ref1->type == BTRFS_SHARED_BLOCK_REF_KEY) {
  113. return comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref2),
  114. btrfs_delayed_node_to_tree_ref(ref1),
  115. ref1->type);
  116. } else if (ref1->type == BTRFS_EXTENT_DATA_REF_KEY ||
  117. ref1->type == BTRFS_SHARED_DATA_REF_KEY) {
  118. return comp_data_refs(btrfs_delayed_node_to_data_ref(ref2),
  119. btrfs_delayed_node_to_data_ref(ref1));
  120. }
  121. BUG();
  122. return 0;
  123. }
  124. /*
  125. * insert a new ref into the rbtree. This returns any existing refs
  126. * for the same (bytenr,parent) tuple, or NULL if the new node was properly
  127. * inserted.
  128. */
  129. static struct btrfs_delayed_ref_node *tree_insert(struct rb_root *root,
  130. struct rb_node *node)
  131. {
  132. struct rb_node **p = &root->rb_node;
  133. struct rb_node *parent_node = NULL;
  134. struct btrfs_delayed_ref_node *entry;
  135. struct btrfs_delayed_ref_node *ins;
  136. int cmp;
  137. ins = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
  138. while (*p) {
  139. parent_node = *p;
  140. entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
  141. rb_node);
  142. cmp = comp_entry(entry, ins, 1);
  143. if (cmp < 0)
  144. p = &(*p)->rb_left;
  145. else if (cmp > 0)
  146. p = &(*p)->rb_right;
  147. else
  148. return entry;
  149. }
  150. rb_link_node(node, parent_node, p);
  151. rb_insert_color(node, root);
  152. return NULL;
  153. }
  154. /*
  155. * find an head entry based on bytenr. This returns the delayed ref
  156. * head if it was able to find one, or NULL if nothing was in that spot.
  157. * If return_bigger is given, the next bigger entry is returned if no exact
  158. * match is found.
  159. */
  160. static struct btrfs_delayed_ref_node *find_ref_head(struct rb_root *root,
  161. u64 bytenr,
  162. struct btrfs_delayed_ref_node **last,
  163. int return_bigger)
  164. {
  165. struct rb_node *n;
  166. struct btrfs_delayed_ref_node *entry;
  167. int cmp = 0;
  168. again:
  169. n = root->rb_node;
  170. entry = NULL;
  171. while (n) {
  172. entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
  173. WARN_ON(!entry->in_tree);
  174. if (last)
  175. *last = entry;
  176. if (bytenr < entry->bytenr)
  177. cmp = -1;
  178. else if (bytenr > entry->bytenr)
  179. cmp = 1;
  180. else if (!btrfs_delayed_ref_is_head(entry))
  181. cmp = 1;
  182. else
  183. cmp = 0;
  184. if (cmp < 0)
  185. n = n->rb_left;
  186. else if (cmp > 0)
  187. n = n->rb_right;
  188. else
  189. return entry;
  190. }
  191. if (entry && return_bigger) {
  192. if (cmp > 0) {
  193. n = rb_next(&entry->rb_node);
  194. if (!n)
  195. n = rb_first(root);
  196. entry = rb_entry(n, struct btrfs_delayed_ref_node,
  197. rb_node);
  198. bytenr = entry->bytenr;
  199. return_bigger = 0;
  200. goto again;
  201. }
  202. return entry;
  203. }
  204. return NULL;
  205. }
  206. int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
  207. struct btrfs_delayed_ref_head *head)
  208. {
  209. struct btrfs_delayed_ref_root *delayed_refs;
  210. delayed_refs = &trans->transaction->delayed_refs;
  211. assert_spin_locked(&delayed_refs->lock);
  212. if (mutex_trylock(&head->mutex))
  213. return 0;
  214. atomic_inc(&head->node.refs);
  215. spin_unlock(&delayed_refs->lock);
  216. mutex_lock(&head->mutex);
  217. spin_lock(&delayed_refs->lock);
  218. if (!head->node.in_tree) {
  219. mutex_unlock(&head->mutex);
  220. btrfs_put_delayed_ref(&head->node);
  221. return -EAGAIN;
  222. }
  223. btrfs_put_delayed_ref(&head->node);
  224. return 0;
  225. }
  226. static void inline drop_delayed_ref(struct btrfs_trans_handle *trans,
  227. struct btrfs_delayed_ref_root *delayed_refs,
  228. struct btrfs_delayed_ref_node *ref)
  229. {
  230. rb_erase(&ref->rb_node, &delayed_refs->root);
  231. ref->in_tree = 0;
  232. btrfs_put_delayed_ref(ref);
  233. delayed_refs->num_entries--;
  234. if (trans->delayed_ref_updates)
  235. trans->delayed_ref_updates--;
  236. }
  237. static int merge_ref(struct btrfs_trans_handle *trans,
  238. struct btrfs_delayed_ref_root *delayed_refs,
  239. struct btrfs_delayed_ref_node *ref, u64 seq)
  240. {
  241. struct rb_node *node;
  242. int merged = 0;
  243. int mod = 0;
  244. int done = 0;
  245. node = rb_prev(&ref->rb_node);
  246. while (node) {
  247. struct btrfs_delayed_ref_node *next;
  248. next = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
  249. node = rb_prev(node);
  250. if (next->bytenr != ref->bytenr)
  251. break;
  252. if (seq && next->seq >= seq)
  253. break;
  254. if (comp_entry(ref, next, 0))
  255. continue;
  256. if (ref->action == next->action) {
  257. mod = next->ref_mod;
  258. } else {
  259. if (ref->ref_mod < next->ref_mod) {
  260. struct btrfs_delayed_ref_node *tmp;
  261. tmp = ref;
  262. ref = next;
  263. next = tmp;
  264. done = 1;
  265. }
  266. mod = -next->ref_mod;
  267. }
  268. merged++;
  269. drop_delayed_ref(trans, delayed_refs, next);
  270. ref->ref_mod += mod;
  271. if (ref->ref_mod == 0) {
  272. drop_delayed_ref(trans, delayed_refs, ref);
  273. break;
  274. } else {
  275. /*
  276. * You can't have multiples of the same ref on a tree
  277. * block.
  278. */
  279. WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
  280. ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
  281. }
  282. if (done)
  283. break;
  284. node = rb_prev(&ref->rb_node);
  285. }
  286. return merged;
  287. }
  288. void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
  289. struct btrfs_fs_info *fs_info,
  290. struct btrfs_delayed_ref_root *delayed_refs,
  291. struct btrfs_delayed_ref_head *head)
  292. {
  293. struct rb_node *node;
  294. u64 seq = 0;
  295. spin_lock(&fs_info->tree_mod_seq_lock);
  296. if (!list_empty(&fs_info->tree_mod_seq_list)) {
  297. struct seq_list *elem;
  298. elem = list_first_entry(&fs_info->tree_mod_seq_list,
  299. struct seq_list, list);
  300. seq = elem->seq;
  301. }
  302. spin_unlock(&fs_info->tree_mod_seq_lock);
  303. node = rb_prev(&head->node.rb_node);
  304. while (node) {
  305. struct btrfs_delayed_ref_node *ref;
  306. ref = rb_entry(node, struct btrfs_delayed_ref_node,
  307. rb_node);
  308. if (ref->bytenr != head->node.bytenr)
  309. break;
  310. /* We can't merge refs that are outside of our seq count */
  311. if (seq && ref->seq >= seq)
  312. break;
  313. if (merge_ref(trans, delayed_refs, ref, seq))
  314. node = rb_prev(&head->node.rb_node);
  315. else
  316. node = rb_prev(node);
  317. }
  318. }
  319. int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
  320. struct btrfs_delayed_ref_root *delayed_refs,
  321. u64 seq)
  322. {
  323. struct seq_list *elem;
  324. int ret = 0;
  325. spin_lock(&fs_info->tree_mod_seq_lock);
  326. if (!list_empty(&fs_info->tree_mod_seq_list)) {
  327. elem = list_first_entry(&fs_info->tree_mod_seq_list,
  328. struct seq_list, list);
  329. if (seq >= elem->seq) {
  330. pr_debug("holding back delayed_ref %#x.%x, lowest is %#x.%x (%p)\n",
  331. (u32)(seq >> 32), (u32)seq,
  332. (u32)(elem->seq >> 32), (u32)elem->seq,
  333. delayed_refs);
  334. ret = 1;
  335. }
  336. }
  337. spin_unlock(&fs_info->tree_mod_seq_lock);
  338. return ret;
  339. }
  340. int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans,
  341. struct list_head *cluster, u64 start)
  342. {
  343. int count = 0;
  344. struct btrfs_delayed_ref_root *delayed_refs;
  345. struct rb_node *node;
  346. struct btrfs_delayed_ref_node *ref;
  347. struct btrfs_delayed_ref_head *head;
  348. delayed_refs = &trans->transaction->delayed_refs;
  349. if (start == 0) {
  350. node = rb_first(&delayed_refs->root);
  351. } else {
  352. ref = NULL;
  353. find_ref_head(&delayed_refs->root, start + 1, &ref, 1);
  354. if (ref) {
  355. node = &ref->rb_node;
  356. } else
  357. node = rb_first(&delayed_refs->root);
  358. }
  359. again:
  360. while (node && count < 32) {
  361. ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
  362. if (btrfs_delayed_ref_is_head(ref)) {
  363. head = btrfs_delayed_node_to_head(ref);
  364. if (list_empty(&head->cluster)) {
  365. list_add_tail(&head->cluster, cluster);
  366. delayed_refs->run_delayed_start =
  367. head->node.bytenr;
  368. count++;
  369. WARN_ON(delayed_refs->num_heads_ready == 0);
  370. delayed_refs->num_heads_ready--;
  371. } else if (count) {
  372. /* the goal of the clustering is to find extents
  373. * that are likely to end up in the same extent
  374. * leaf on disk. So, we don't want them spread
  375. * all over the tree. Stop now if we've hit
  376. * a head that was already in use
  377. */
  378. break;
  379. }
  380. }
  381. node = rb_next(node);
  382. }
  383. if (count) {
  384. return 0;
  385. } else if (start) {
  386. /*
  387. * we've gone to the end of the rbtree without finding any
  388. * clusters. start from the beginning and try again
  389. */
  390. start = 0;
  391. node = rb_first(&delayed_refs->root);
  392. goto again;
  393. }
  394. return 1;
  395. }
  396. void btrfs_release_ref_cluster(struct list_head *cluster)
  397. {
  398. struct list_head *pos, *q;
  399. list_for_each_safe(pos, q, cluster)
  400. list_del_init(pos);
  401. }
  402. /*
  403. * helper function to update an extent delayed ref in the
  404. * rbtree. existing and update must both have the same
  405. * bytenr and parent
  406. *
  407. * This may free existing if the update cancels out whatever
  408. * operation it was doing.
  409. */
  410. static noinline void
  411. update_existing_ref(struct btrfs_trans_handle *trans,
  412. struct btrfs_delayed_ref_root *delayed_refs,
  413. struct btrfs_delayed_ref_node *existing,
  414. struct btrfs_delayed_ref_node *update)
  415. {
  416. if (update->action != existing->action) {
  417. /*
  418. * this is effectively undoing either an add or a
  419. * drop. We decrement the ref_mod, and if it goes
  420. * down to zero we just delete the entry without
  421. * every changing the extent allocation tree.
  422. */
  423. existing->ref_mod--;
  424. if (existing->ref_mod == 0)
  425. drop_delayed_ref(trans, delayed_refs, existing);
  426. else
  427. WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY ||
  428. existing->type == BTRFS_SHARED_BLOCK_REF_KEY);
  429. } else {
  430. WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY ||
  431. existing->type == BTRFS_SHARED_BLOCK_REF_KEY);
  432. /*
  433. * the action on the existing ref matches
  434. * the action on the ref we're trying to add.
  435. * Bump the ref_mod by one so the backref that
  436. * is eventually added/removed has the correct
  437. * reference count
  438. */
  439. existing->ref_mod += update->ref_mod;
  440. }
  441. }
  442. /*
  443. * helper function to update the accounting in the head ref
  444. * existing and update must have the same bytenr
  445. */
  446. static noinline void
  447. update_existing_head_ref(struct btrfs_delayed_ref_node *existing,
  448. struct btrfs_delayed_ref_node *update)
  449. {
  450. struct btrfs_delayed_ref_head *existing_ref;
  451. struct btrfs_delayed_ref_head *ref;
  452. existing_ref = btrfs_delayed_node_to_head(existing);
  453. ref = btrfs_delayed_node_to_head(update);
  454. BUG_ON(existing_ref->is_data != ref->is_data);
  455. if (ref->must_insert_reserved) {
  456. /* if the extent was freed and then
  457. * reallocated before the delayed ref
  458. * entries were processed, we can end up
  459. * with an existing head ref without
  460. * the must_insert_reserved flag set.
  461. * Set it again here
  462. */
  463. existing_ref->must_insert_reserved = ref->must_insert_reserved;
  464. /*
  465. * update the num_bytes so we make sure the accounting
  466. * is done correctly
  467. */
  468. existing->num_bytes = update->num_bytes;
  469. }
  470. if (ref->extent_op) {
  471. if (!existing_ref->extent_op) {
  472. existing_ref->extent_op = ref->extent_op;
  473. } else {
  474. if (ref->extent_op->update_key) {
  475. memcpy(&existing_ref->extent_op->key,
  476. &ref->extent_op->key,
  477. sizeof(ref->extent_op->key));
  478. existing_ref->extent_op->update_key = 1;
  479. }
  480. if (ref->extent_op->update_flags) {
  481. existing_ref->extent_op->flags_to_set |=
  482. ref->extent_op->flags_to_set;
  483. existing_ref->extent_op->update_flags = 1;
  484. }
  485. btrfs_free_delayed_extent_op(ref->extent_op);
  486. }
  487. }
  488. /*
  489. * update the reference mod on the head to reflect this new operation
  490. */
  491. existing->ref_mod += update->ref_mod;
  492. }
  493. /*
  494. * helper function to actually insert a head node into the rbtree.
  495. * this does all the dirty work in terms of maintaining the correct
  496. * overall modification count.
  497. */
  498. static noinline void add_delayed_ref_head(struct btrfs_fs_info *fs_info,
  499. struct btrfs_trans_handle *trans,
  500. struct btrfs_delayed_ref_node *ref,
  501. u64 bytenr, u64 num_bytes,
  502. int action, int is_data)
  503. {
  504. struct btrfs_delayed_ref_node *existing;
  505. struct btrfs_delayed_ref_head *head_ref = NULL;
  506. struct btrfs_delayed_ref_root *delayed_refs;
  507. int count_mod = 1;
  508. int must_insert_reserved = 0;
  509. /*
  510. * the head node stores the sum of all the mods, so dropping a ref
  511. * should drop the sum in the head node by one.
  512. */
  513. if (action == BTRFS_UPDATE_DELAYED_HEAD)
  514. count_mod = 0;
  515. else if (action == BTRFS_DROP_DELAYED_REF)
  516. count_mod = -1;
  517. /*
  518. * BTRFS_ADD_DELAYED_EXTENT means that we need to update
  519. * the reserved accounting when the extent is finally added, or
  520. * if a later modification deletes the delayed ref without ever
  521. * inserting the extent into the extent allocation tree.
  522. * ref->must_insert_reserved is the flag used to record
  523. * that accounting mods are required.
  524. *
  525. * Once we record must_insert_reserved, switch the action to
  526. * BTRFS_ADD_DELAYED_REF because other special casing is not required.
  527. */
  528. if (action == BTRFS_ADD_DELAYED_EXTENT)
  529. must_insert_reserved = 1;
  530. else
  531. must_insert_reserved = 0;
  532. delayed_refs = &trans->transaction->delayed_refs;
  533. /* first set the basic ref node struct up */
  534. atomic_set(&ref->refs, 1);
  535. ref->bytenr = bytenr;
  536. ref->num_bytes = num_bytes;
  537. ref->ref_mod = count_mod;
  538. ref->type = 0;
  539. ref->action = 0;
  540. ref->is_head = 1;
  541. ref->in_tree = 1;
  542. ref->seq = 0;
  543. head_ref = btrfs_delayed_node_to_head(ref);
  544. head_ref->must_insert_reserved = must_insert_reserved;
  545. head_ref->is_data = is_data;
  546. INIT_LIST_HEAD(&head_ref->cluster);
  547. mutex_init(&head_ref->mutex);
  548. trace_add_delayed_ref_head(ref, head_ref, action);
  549. existing = tree_insert(&delayed_refs->root, &ref->rb_node);
  550. if (existing) {
  551. update_existing_head_ref(existing, ref);
  552. /*
  553. * we've updated the existing ref, free the newly
  554. * allocated ref
  555. */
  556. kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
  557. } else {
  558. delayed_refs->num_heads++;
  559. delayed_refs->num_heads_ready++;
  560. delayed_refs->num_entries++;
  561. trans->delayed_ref_updates++;
  562. }
  563. }
  564. /*
  565. * helper to insert a delayed tree ref into the rbtree.
  566. */
  567. static noinline void add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
  568. struct btrfs_trans_handle *trans,
  569. struct btrfs_delayed_ref_node *ref,
  570. u64 bytenr, u64 num_bytes, u64 parent,
  571. u64 ref_root, int level, int action,
  572. int for_cow)
  573. {
  574. struct btrfs_delayed_ref_node *existing;
  575. struct btrfs_delayed_tree_ref *full_ref;
  576. struct btrfs_delayed_ref_root *delayed_refs;
  577. u64 seq = 0;
  578. if (action == BTRFS_ADD_DELAYED_EXTENT)
  579. action = BTRFS_ADD_DELAYED_REF;
  580. delayed_refs = &trans->transaction->delayed_refs;
  581. /* first set the basic ref node struct up */
  582. atomic_set(&ref->refs, 1);
  583. ref->bytenr = bytenr;
  584. ref->num_bytes = num_bytes;
  585. ref->ref_mod = 1;
  586. ref->action = action;
  587. ref->is_head = 0;
  588. ref->in_tree = 1;
  589. if (need_ref_seq(for_cow, ref_root))
  590. seq = btrfs_get_tree_mod_seq(fs_info, &trans->delayed_ref_elem);
  591. ref->seq = seq;
  592. full_ref = btrfs_delayed_node_to_tree_ref(ref);
  593. full_ref->parent = parent;
  594. full_ref->root = ref_root;
  595. if (parent)
  596. ref->type = BTRFS_SHARED_BLOCK_REF_KEY;
  597. else
  598. ref->type = BTRFS_TREE_BLOCK_REF_KEY;
  599. full_ref->level = level;
  600. trace_add_delayed_tree_ref(ref, full_ref, action);
  601. existing = tree_insert(&delayed_refs->root, &ref->rb_node);
  602. if (existing) {
  603. update_existing_ref(trans, delayed_refs, existing, ref);
  604. /*
  605. * we've updated the existing ref, free the newly
  606. * allocated ref
  607. */
  608. kmem_cache_free(btrfs_delayed_tree_ref_cachep, full_ref);
  609. } else {
  610. delayed_refs->num_entries++;
  611. trans->delayed_ref_updates++;
  612. }
  613. }
  614. /*
  615. * helper to insert a delayed data ref into the rbtree.
  616. */
  617. static noinline void add_delayed_data_ref(struct btrfs_fs_info *fs_info,
  618. struct btrfs_trans_handle *trans,
  619. struct btrfs_delayed_ref_node *ref,
  620. u64 bytenr, u64 num_bytes, u64 parent,
  621. u64 ref_root, u64 owner, u64 offset,
  622. int action, int for_cow)
  623. {
  624. struct btrfs_delayed_ref_node *existing;
  625. struct btrfs_delayed_data_ref *full_ref;
  626. struct btrfs_delayed_ref_root *delayed_refs;
  627. u64 seq = 0;
  628. if (action == BTRFS_ADD_DELAYED_EXTENT)
  629. action = BTRFS_ADD_DELAYED_REF;
  630. delayed_refs = &trans->transaction->delayed_refs;
  631. /* first set the basic ref node struct up */
  632. atomic_set(&ref->refs, 1);
  633. ref->bytenr = bytenr;
  634. ref->num_bytes = num_bytes;
  635. ref->ref_mod = 1;
  636. ref->action = action;
  637. ref->is_head = 0;
  638. ref->in_tree = 1;
  639. if (need_ref_seq(for_cow, ref_root))
  640. seq = btrfs_get_tree_mod_seq(fs_info, &trans->delayed_ref_elem);
  641. ref->seq = seq;
  642. full_ref = btrfs_delayed_node_to_data_ref(ref);
  643. full_ref->parent = parent;
  644. full_ref->root = ref_root;
  645. if (parent)
  646. ref->type = BTRFS_SHARED_DATA_REF_KEY;
  647. else
  648. ref->type = BTRFS_EXTENT_DATA_REF_KEY;
  649. full_ref->objectid = owner;
  650. full_ref->offset = offset;
  651. trace_add_delayed_data_ref(ref, full_ref, action);
  652. existing = tree_insert(&delayed_refs->root, &ref->rb_node);
  653. if (existing) {
  654. update_existing_ref(trans, delayed_refs, existing, ref);
  655. /*
  656. * we've updated the existing ref, free the newly
  657. * allocated ref
  658. */
  659. kmem_cache_free(btrfs_delayed_data_ref_cachep, full_ref);
  660. } else {
  661. delayed_refs->num_entries++;
  662. trans->delayed_ref_updates++;
  663. }
  664. }
  665. /*
  666. * add a delayed tree ref. This does all of the accounting required
  667. * to make sure the delayed ref is eventually processed before this
  668. * transaction commits.
  669. */
  670. int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
  671. struct btrfs_trans_handle *trans,
  672. u64 bytenr, u64 num_bytes, u64 parent,
  673. u64 ref_root, int level, int action,
  674. struct btrfs_delayed_extent_op *extent_op,
  675. int for_cow)
  676. {
  677. struct btrfs_delayed_tree_ref *ref;
  678. struct btrfs_delayed_ref_head *head_ref;
  679. struct btrfs_delayed_ref_root *delayed_refs;
  680. BUG_ON(extent_op && extent_op->is_data);
  681. ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
  682. if (!ref)
  683. return -ENOMEM;
  684. head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
  685. if (!head_ref) {
  686. kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
  687. return -ENOMEM;
  688. }
  689. head_ref->extent_op = extent_op;
  690. delayed_refs = &trans->transaction->delayed_refs;
  691. spin_lock(&delayed_refs->lock);
  692. /*
  693. * insert both the head node and the new ref without dropping
  694. * the spin lock
  695. */
  696. add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
  697. num_bytes, action, 0);
  698. add_delayed_tree_ref(fs_info, trans, &ref->node, bytenr,
  699. num_bytes, parent, ref_root, level, action,
  700. for_cow);
  701. spin_unlock(&delayed_refs->lock);
  702. if (need_ref_seq(for_cow, ref_root))
  703. btrfs_qgroup_record_ref(trans, &ref->node, extent_op);
  704. return 0;
  705. }
  706. /*
  707. * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
  708. */
  709. int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
  710. struct btrfs_trans_handle *trans,
  711. u64 bytenr, u64 num_bytes,
  712. u64 parent, u64 ref_root,
  713. u64 owner, u64 offset, int action,
  714. struct btrfs_delayed_extent_op *extent_op,
  715. int for_cow)
  716. {
  717. struct btrfs_delayed_data_ref *ref;
  718. struct btrfs_delayed_ref_head *head_ref;
  719. struct btrfs_delayed_ref_root *delayed_refs;
  720. BUG_ON(extent_op && !extent_op->is_data);
  721. ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
  722. if (!ref)
  723. return -ENOMEM;
  724. head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
  725. if (!head_ref) {
  726. kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
  727. return -ENOMEM;
  728. }
  729. head_ref->extent_op = extent_op;
  730. delayed_refs = &trans->transaction->delayed_refs;
  731. spin_lock(&delayed_refs->lock);
  732. /*
  733. * insert both the head node and the new ref without dropping
  734. * the spin lock
  735. */
  736. add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
  737. num_bytes, action, 1);
  738. add_delayed_data_ref(fs_info, trans, &ref->node, bytenr,
  739. num_bytes, parent, ref_root, owner, offset,
  740. action, for_cow);
  741. spin_unlock(&delayed_refs->lock);
  742. if (need_ref_seq(for_cow, ref_root))
  743. btrfs_qgroup_record_ref(trans, &ref->node, extent_op);
  744. return 0;
  745. }
  746. int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
  747. struct btrfs_trans_handle *trans,
  748. u64 bytenr, u64 num_bytes,
  749. struct btrfs_delayed_extent_op *extent_op)
  750. {
  751. struct btrfs_delayed_ref_head *head_ref;
  752. struct btrfs_delayed_ref_root *delayed_refs;
  753. head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
  754. if (!head_ref)
  755. return -ENOMEM;
  756. head_ref->extent_op = extent_op;
  757. delayed_refs = &trans->transaction->delayed_refs;
  758. spin_lock(&delayed_refs->lock);
  759. add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
  760. num_bytes, BTRFS_UPDATE_DELAYED_HEAD,
  761. extent_op->is_data);
  762. spin_unlock(&delayed_refs->lock);
  763. return 0;
  764. }
  765. /*
  766. * this does a simple search for the head node for a given extent.
  767. * It must be called with the delayed ref spinlock held, and it returns
  768. * the head node if any where found, or NULL if not.
  769. */
  770. struct btrfs_delayed_ref_head *
  771. btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr)
  772. {
  773. struct btrfs_delayed_ref_node *ref;
  774. struct btrfs_delayed_ref_root *delayed_refs;
  775. delayed_refs = &trans->transaction->delayed_refs;
  776. ref = find_ref_head(&delayed_refs->root, bytenr, NULL, 0);
  777. if (ref)
  778. return btrfs_delayed_node_to_head(ref);
  779. return NULL;
  780. }
  781. void btrfs_delayed_ref_exit(void)
  782. {
  783. if (btrfs_delayed_ref_head_cachep)
  784. kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
  785. if (btrfs_delayed_tree_ref_cachep)
  786. kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
  787. if (btrfs_delayed_data_ref_cachep)
  788. kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
  789. if (btrfs_delayed_extent_op_cachep)
  790. kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
  791. }
  792. int btrfs_delayed_ref_init(void)
  793. {
  794. btrfs_delayed_ref_head_cachep = kmem_cache_create(
  795. "btrfs_delayed_ref_head",
  796. sizeof(struct btrfs_delayed_ref_head), 0,
  797. SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
  798. if (!btrfs_delayed_ref_head_cachep)
  799. goto fail;
  800. btrfs_delayed_tree_ref_cachep = kmem_cache_create(
  801. "btrfs_delayed_tree_ref",
  802. sizeof(struct btrfs_delayed_tree_ref), 0,
  803. SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
  804. if (!btrfs_delayed_tree_ref_cachep)
  805. goto fail;
  806. btrfs_delayed_data_ref_cachep = kmem_cache_create(
  807. "btrfs_delayed_data_ref",
  808. sizeof(struct btrfs_delayed_data_ref), 0,
  809. SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
  810. if (!btrfs_delayed_data_ref_cachep)
  811. goto fail;
  812. btrfs_delayed_extent_op_cachep = kmem_cache_create(
  813. "btrfs_delayed_extent_op",
  814. sizeof(struct btrfs_delayed_extent_op), 0,
  815. SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
  816. if (!btrfs_delayed_extent_op_cachep)
  817. goto fail;
  818. return 0;
  819. fail:
  820. btrfs_delayed_ref_exit();
  821. return -ENOMEM;
  822. }