delayed-ref.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920
  1. /*
  2. * Copyright (C) 2009 Oracle. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/sched.h>
  19. #include <linux/slab.h>
  20. #include <linux/sort.h>
  21. #include "ctree.h"
  22. #include "delayed-ref.h"
  23. #include "transaction.h"
  24. /*
  25. * delayed back reference update tracking. For subvolume trees
  26. * we queue up extent allocations and backref maintenance for
  27. * delayed processing. This avoids deep call chains where we
  28. * add extents in the middle of btrfs_search_slot, and it allows
  29. * us to buffer up frequently modified backrefs in an rb tree instead
  30. * of hammering updates on the extent allocation tree.
  31. */
  32. /*
  33. * compare two delayed tree backrefs with same bytenr and type
  34. */
  35. static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref2,
  36. struct btrfs_delayed_tree_ref *ref1)
  37. {
  38. if (ref1->node.type == BTRFS_TREE_BLOCK_REF_KEY) {
  39. if (ref1->root < ref2->root)
  40. return -1;
  41. if (ref1->root > ref2->root)
  42. return 1;
  43. } else {
  44. if (ref1->parent < ref2->parent)
  45. return -1;
  46. if (ref1->parent > ref2->parent)
  47. return 1;
  48. }
  49. return 0;
  50. }
  51. /*
  52. * compare two delayed data backrefs with same bytenr and type
  53. */
  54. static int comp_data_refs(struct btrfs_delayed_data_ref *ref2,
  55. struct btrfs_delayed_data_ref *ref1)
  56. {
  57. if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
  58. if (ref1->root < ref2->root)
  59. return -1;
  60. if (ref1->root > ref2->root)
  61. return 1;
  62. if (ref1->objectid < ref2->objectid)
  63. return -1;
  64. if (ref1->objectid > ref2->objectid)
  65. return 1;
  66. if (ref1->offset < ref2->offset)
  67. return -1;
  68. if (ref1->offset > ref2->offset)
  69. return 1;
  70. } else {
  71. if (ref1->parent < ref2->parent)
  72. return -1;
  73. if (ref1->parent > ref2->parent)
  74. return 1;
  75. }
  76. return 0;
  77. }
  78. /*
  79. * entries in the rb tree are ordered by the byte number of the extent,
  80. * type of the delayed backrefs and content of delayed backrefs.
  81. */
  82. static int comp_entry(struct btrfs_delayed_ref_node *ref2,
  83. struct btrfs_delayed_ref_node *ref1)
  84. {
  85. if (ref1->bytenr < ref2->bytenr)
  86. return -1;
  87. if (ref1->bytenr > ref2->bytenr)
  88. return 1;
  89. if (ref1->is_head && ref2->is_head)
  90. return 0;
  91. if (ref2->is_head)
  92. return -1;
  93. if (ref1->is_head)
  94. return 1;
  95. if (ref1->type < ref2->type)
  96. return -1;
  97. if (ref1->type > ref2->type)
  98. return 1;
  99. if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
  100. ref1->type == BTRFS_SHARED_BLOCK_REF_KEY) {
  101. return comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref2),
  102. btrfs_delayed_node_to_tree_ref(ref1));
  103. } else if (ref1->type == BTRFS_EXTENT_DATA_REF_KEY ||
  104. ref1->type == BTRFS_SHARED_DATA_REF_KEY) {
  105. return comp_data_refs(btrfs_delayed_node_to_data_ref(ref2),
  106. btrfs_delayed_node_to_data_ref(ref1));
  107. }
  108. BUG();
  109. return 0;
  110. }
  111. /*
  112. * insert a new ref into the rbtree. This returns any existing refs
  113. * for the same (bytenr,parent) tuple, or NULL if the new node was properly
  114. * inserted.
  115. */
  116. static struct btrfs_delayed_ref_node *tree_insert(struct rb_root *root,
  117. struct rb_node *node)
  118. {
  119. struct rb_node **p = &root->rb_node;
  120. struct rb_node *parent_node = NULL;
  121. struct btrfs_delayed_ref_node *entry;
  122. struct btrfs_delayed_ref_node *ins;
  123. int cmp;
  124. ins = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
  125. while (*p) {
  126. parent_node = *p;
  127. entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
  128. rb_node);
  129. cmp = comp_entry(entry, ins);
  130. if (cmp < 0)
  131. p = &(*p)->rb_left;
  132. else if (cmp > 0)
  133. p = &(*p)->rb_right;
  134. else
  135. return entry;
  136. }
  137. rb_link_node(node, parent_node, p);
  138. rb_insert_color(node, root);
  139. return NULL;
  140. }
  141. /*
  142. * find an head entry based on bytenr. This returns the delayed ref
  143. * head if it was able to find one, or NULL if nothing was in that spot
  144. */
  145. static struct btrfs_delayed_ref_node *find_ref_head(struct rb_root *root,
  146. u64 bytenr,
  147. struct btrfs_delayed_ref_node **last)
  148. {
  149. struct rb_node *n = root->rb_node;
  150. struct btrfs_delayed_ref_node *entry;
  151. int cmp;
  152. while (n) {
  153. entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
  154. WARN_ON(!entry->in_tree);
  155. if (last)
  156. *last = entry;
  157. if (bytenr < entry->bytenr)
  158. cmp = -1;
  159. else if (bytenr > entry->bytenr)
  160. cmp = 1;
  161. else if (!btrfs_delayed_ref_is_head(entry))
  162. cmp = 1;
  163. else
  164. cmp = 0;
  165. if (cmp < 0)
  166. n = n->rb_left;
  167. else if (cmp > 0)
  168. n = n->rb_right;
  169. else
  170. return entry;
  171. }
  172. return NULL;
  173. }
  174. int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
  175. struct btrfs_delayed_ref_head *head)
  176. {
  177. struct btrfs_delayed_ref_root *delayed_refs;
  178. delayed_refs = &trans->transaction->delayed_refs;
  179. assert_spin_locked(&delayed_refs->lock);
  180. if (mutex_trylock(&head->mutex))
  181. return 0;
  182. atomic_inc(&head->node.refs);
  183. spin_unlock(&delayed_refs->lock);
  184. mutex_lock(&head->mutex);
  185. spin_lock(&delayed_refs->lock);
  186. if (!head->node.in_tree) {
  187. mutex_unlock(&head->mutex);
  188. btrfs_put_delayed_ref(&head->node);
  189. return -EAGAIN;
  190. }
  191. btrfs_put_delayed_ref(&head->node);
  192. return 0;
  193. }
  194. int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans,
  195. struct list_head *cluster, u64 start)
  196. {
  197. int count = 0;
  198. struct btrfs_delayed_ref_root *delayed_refs;
  199. struct rb_node *node;
  200. struct btrfs_delayed_ref_node *ref;
  201. struct btrfs_delayed_ref_head *head;
  202. delayed_refs = &trans->transaction->delayed_refs;
  203. if (start == 0) {
  204. node = rb_first(&delayed_refs->root);
  205. } else {
  206. ref = NULL;
  207. find_ref_head(&delayed_refs->root, start, &ref);
  208. if (ref) {
  209. struct btrfs_delayed_ref_node *tmp;
  210. node = rb_prev(&ref->rb_node);
  211. while (node) {
  212. tmp = rb_entry(node,
  213. struct btrfs_delayed_ref_node,
  214. rb_node);
  215. if (tmp->bytenr < start)
  216. break;
  217. ref = tmp;
  218. node = rb_prev(&ref->rb_node);
  219. }
  220. node = &ref->rb_node;
  221. } else
  222. node = rb_first(&delayed_refs->root);
  223. }
  224. again:
  225. while (node && count < 32) {
  226. ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
  227. if (btrfs_delayed_ref_is_head(ref)) {
  228. head = btrfs_delayed_node_to_head(ref);
  229. if (list_empty(&head->cluster)) {
  230. list_add_tail(&head->cluster, cluster);
  231. delayed_refs->run_delayed_start =
  232. head->node.bytenr;
  233. count++;
  234. WARN_ON(delayed_refs->num_heads_ready == 0);
  235. delayed_refs->num_heads_ready--;
  236. } else if (count) {
  237. /* the goal of the clustering is to find extents
  238. * that are likely to end up in the same extent
  239. * leaf on disk. So, we don't want them spread
  240. * all over the tree. Stop now if we've hit
  241. * a head that was already in use
  242. */
  243. break;
  244. }
  245. }
  246. node = rb_next(node);
  247. }
  248. if (count) {
  249. return 0;
  250. } else if (start) {
  251. /*
  252. * we've gone to the end of the rbtree without finding any
  253. * clusters. start from the beginning and try again
  254. */
  255. start = 0;
  256. node = rb_first(&delayed_refs->root);
  257. goto again;
  258. }
  259. return 1;
  260. }
  261. /*
  262. * This checks to see if there are any delayed refs in the
  263. * btree for a given bytenr. It returns one if it finds any
  264. * and zero otherwise.
  265. *
  266. * If it only finds a head node, it returns 0.
  267. *
  268. * The idea is to use this when deciding if you can safely delete an
  269. * extent from the extent allocation tree. There may be a pending
  270. * ref in the rbtree that adds or removes references, so as long as this
  271. * returns one you need to leave the BTRFS_EXTENT_ITEM in the extent
  272. * allocation tree.
  273. */
  274. int btrfs_delayed_ref_pending(struct btrfs_trans_handle *trans, u64 bytenr)
  275. {
  276. struct btrfs_delayed_ref_node *ref;
  277. struct btrfs_delayed_ref_root *delayed_refs;
  278. struct rb_node *prev_node;
  279. int ret = 0;
  280. delayed_refs = &trans->transaction->delayed_refs;
  281. spin_lock(&delayed_refs->lock);
  282. ref = find_ref_head(&delayed_refs->root, bytenr, NULL);
  283. if (ref) {
  284. prev_node = rb_prev(&ref->rb_node);
  285. if (!prev_node)
  286. goto out;
  287. ref = rb_entry(prev_node, struct btrfs_delayed_ref_node,
  288. rb_node);
  289. if (ref->bytenr == bytenr)
  290. ret = 1;
  291. }
  292. out:
  293. spin_unlock(&delayed_refs->lock);
  294. return ret;
  295. }
  296. /*
  297. * helper function to lookup reference count and flags of extent.
  298. *
  299. * the head node for delayed ref is used to store the sum of all the
  300. * reference count modifications queued up in the rbtree. the head
  301. * node may also store the extent flags to set. This way you can check
  302. * to see what the reference count and extent flags would be if all of
  303. * the delayed refs are not processed.
  304. */
  305. int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
  306. struct btrfs_root *root, u64 bytenr,
  307. u64 num_bytes, u64 *refs, u64 *flags)
  308. {
  309. struct btrfs_delayed_ref_node *ref;
  310. struct btrfs_delayed_ref_head *head;
  311. struct btrfs_delayed_ref_root *delayed_refs;
  312. struct btrfs_path *path;
  313. struct btrfs_extent_item *ei;
  314. struct extent_buffer *leaf;
  315. struct btrfs_key key;
  316. u32 item_size;
  317. u64 num_refs;
  318. u64 extent_flags;
  319. int ret;
  320. path = btrfs_alloc_path();
  321. if (!path)
  322. return -ENOMEM;
  323. key.objectid = bytenr;
  324. key.type = BTRFS_EXTENT_ITEM_KEY;
  325. key.offset = num_bytes;
  326. delayed_refs = &trans->transaction->delayed_refs;
  327. again:
  328. ret = btrfs_search_slot(trans, root->fs_info->extent_root,
  329. &key, path, 0, 0);
  330. if (ret < 0)
  331. goto out;
  332. if (ret == 0) {
  333. leaf = path->nodes[0];
  334. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  335. if (item_size >= sizeof(*ei)) {
  336. ei = btrfs_item_ptr(leaf, path->slots[0],
  337. struct btrfs_extent_item);
  338. num_refs = btrfs_extent_refs(leaf, ei);
  339. extent_flags = btrfs_extent_flags(leaf, ei);
  340. } else {
  341. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  342. struct btrfs_extent_item_v0 *ei0;
  343. BUG_ON(item_size != sizeof(*ei0));
  344. ei0 = btrfs_item_ptr(leaf, path->slots[0],
  345. struct btrfs_extent_item_v0);
  346. num_refs = btrfs_extent_refs_v0(leaf, ei0);
  347. /* FIXME: this isn't correct for data */
  348. extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
  349. #else
  350. BUG();
  351. #endif
  352. }
  353. BUG_ON(num_refs == 0);
  354. } else {
  355. num_refs = 0;
  356. extent_flags = 0;
  357. ret = 0;
  358. }
  359. spin_lock(&delayed_refs->lock);
  360. ref = find_ref_head(&delayed_refs->root, bytenr, NULL);
  361. if (ref) {
  362. head = btrfs_delayed_node_to_head(ref);
  363. if (!mutex_trylock(&head->mutex)) {
  364. atomic_inc(&ref->refs);
  365. spin_unlock(&delayed_refs->lock);
  366. btrfs_release_path(root->fs_info->extent_root, path);
  367. mutex_lock(&head->mutex);
  368. mutex_unlock(&head->mutex);
  369. btrfs_put_delayed_ref(ref);
  370. goto again;
  371. }
  372. if (head->extent_op && head->extent_op->update_flags)
  373. extent_flags |= head->extent_op->flags_to_set;
  374. else
  375. BUG_ON(num_refs == 0);
  376. num_refs += ref->ref_mod;
  377. mutex_unlock(&head->mutex);
  378. }
  379. WARN_ON(num_refs == 0);
  380. if (refs)
  381. *refs = num_refs;
  382. if (flags)
  383. *flags = extent_flags;
  384. out:
  385. spin_unlock(&delayed_refs->lock);
  386. btrfs_free_path(path);
  387. return ret;
  388. }
  389. /*
  390. * helper function to update an extent delayed ref in the
  391. * rbtree. existing and update must both have the same
  392. * bytenr and parent
  393. *
  394. * This may free existing if the update cancels out whatever
  395. * operation it was doing.
  396. */
  397. static noinline void
  398. update_existing_ref(struct btrfs_trans_handle *trans,
  399. struct btrfs_delayed_ref_root *delayed_refs,
  400. struct btrfs_delayed_ref_node *existing,
  401. struct btrfs_delayed_ref_node *update)
  402. {
  403. if (update->action != existing->action) {
  404. /*
  405. * this is effectively undoing either an add or a
  406. * drop. We decrement the ref_mod, and if it goes
  407. * down to zero we just delete the entry without
  408. * every changing the extent allocation tree.
  409. */
  410. existing->ref_mod--;
  411. if (existing->ref_mod == 0) {
  412. rb_erase(&existing->rb_node,
  413. &delayed_refs->root);
  414. existing->in_tree = 0;
  415. btrfs_put_delayed_ref(existing);
  416. delayed_refs->num_entries--;
  417. if (trans->delayed_ref_updates)
  418. trans->delayed_ref_updates--;
  419. } else {
  420. WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY ||
  421. existing->type == BTRFS_SHARED_BLOCK_REF_KEY);
  422. }
  423. } else {
  424. WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY ||
  425. existing->type == BTRFS_SHARED_BLOCK_REF_KEY);
  426. /*
  427. * the action on the existing ref matches
  428. * the action on the ref we're trying to add.
  429. * Bump the ref_mod by one so the backref that
  430. * is eventually added/removed has the correct
  431. * reference count
  432. */
  433. existing->ref_mod += update->ref_mod;
  434. }
  435. }
  436. /*
  437. * helper function to update the accounting in the head ref
  438. * existing and update must have the same bytenr
  439. */
  440. static noinline void
  441. update_existing_head_ref(struct btrfs_delayed_ref_node *existing,
  442. struct btrfs_delayed_ref_node *update)
  443. {
  444. struct btrfs_delayed_ref_head *existing_ref;
  445. struct btrfs_delayed_ref_head *ref;
  446. existing_ref = btrfs_delayed_node_to_head(existing);
  447. ref = btrfs_delayed_node_to_head(update);
  448. BUG_ON(existing_ref->is_data != ref->is_data);
  449. if (ref->must_insert_reserved) {
  450. /* if the extent was freed and then
  451. * reallocated before the delayed ref
  452. * entries were processed, we can end up
  453. * with an existing head ref without
  454. * the must_insert_reserved flag set.
  455. * Set it again here
  456. */
  457. existing_ref->must_insert_reserved = ref->must_insert_reserved;
  458. /*
  459. * update the num_bytes so we make sure the accounting
  460. * is done correctly
  461. */
  462. existing->num_bytes = update->num_bytes;
  463. }
  464. if (ref->extent_op) {
  465. if (!existing_ref->extent_op) {
  466. existing_ref->extent_op = ref->extent_op;
  467. } else {
  468. if (ref->extent_op->update_key) {
  469. memcpy(&existing_ref->extent_op->key,
  470. &ref->extent_op->key,
  471. sizeof(ref->extent_op->key));
  472. existing_ref->extent_op->update_key = 1;
  473. }
  474. if (ref->extent_op->update_flags) {
  475. existing_ref->extent_op->flags_to_set |=
  476. ref->extent_op->flags_to_set;
  477. existing_ref->extent_op->update_flags = 1;
  478. }
  479. kfree(ref->extent_op);
  480. }
  481. }
  482. /*
  483. * update the reference mod on the head to reflect this new operation
  484. */
  485. existing->ref_mod += update->ref_mod;
  486. }
  487. /*
  488. * helper function to actually insert a head node into the rbtree.
  489. * this does all the dirty work in terms of maintaining the correct
  490. * overall modification count.
  491. */
  492. static noinline int add_delayed_ref_head(struct btrfs_trans_handle *trans,
  493. struct btrfs_delayed_ref_node *ref,
  494. u64 bytenr, u64 num_bytes,
  495. int action, int is_data)
  496. {
  497. struct btrfs_delayed_ref_node *existing;
  498. struct btrfs_delayed_ref_head *head_ref = NULL;
  499. struct btrfs_delayed_ref_root *delayed_refs;
  500. int count_mod = 1;
  501. int must_insert_reserved = 0;
  502. /*
  503. * the head node stores the sum of all the mods, so dropping a ref
  504. * should drop the sum in the head node by one.
  505. */
  506. if (action == BTRFS_UPDATE_DELAYED_HEAD)
  507. count_mod = 0;
  508. else if (action == BTRFS_DROP_DELAYED_REF)
  509. count_mod = -1;
  510. /*
  511. * BTRFS_ADD_DELAYED_EXTENT means that we need to update
  512. * the reserved accounting when the extent is finally added, or
  513. * if a later modification deletes the delayed ref without ever
  514. * inserting the extent into the extent allocation tree.
  515. * ref->must_insert_reserved is the flag used to record
  516. * that accounting mods are required.
  517. *
  518. * Once we record must_insert_reserved, switch the action to
  519. * BTRFS_ADD_DELAYED_REF because other special casing is not required.
  520. */
  521. if (action == BTRFS_ADD_DELAYED_EXTENT)
  522. must_insert_reserved = 1;
  523. else
  524. must_insert_reserved = 0;
  525. delayed_refs = &trans->transaction->delayed_refs;
  526. /* first set the basic ref node struct up */
  527. atomic_set(&ref->refs, 1);
  528. ref->bytenr = bytenr;
  529. ref->num_bytes = num_bytes;
  530. ref->ref_mod = count_mod;
  531. ref->type = 0;
  532. ref->action = 0;
  533. ref->is_head = 1;
  534. ref->in_tree = 1;
  535. head_ref = btrfs_delayed_node_to_head(ref);
  536. head_ref->must_insert_reserved = must_insert_reserved;
  537. head_ref->is_data = is_data;
  538. INIT_LIST_HEAD(&head_ref->cluster);
  539. mutex_init(&head_ref->mutex);
  540. existing = tree_insert(&delayed_refs->root, &ref->rb_node);
  541. if (existing) {
  542. update_existing_head_ref(existing, ref);
  543. /*
  544. * we've updated the existing ref, free the newly
  545. * allocated ref
  546. */
  547. kfree(ref);
  548. } else {
  549. delayed_refs->num_heads++;
  550. delayed_refs->num_heads_ready++;
  551. delayed_refs->num_entries++;
  552. trans->delayed_ref_updates++;
  553. }
  554. return 0;
  555. }
  556. /*
  557. * helper to insert a delayed tree ref into the rbtree.
  558. */
  559. static noinline int add_delayed_tree_ref(struct btrfs_trans_handle *trans,
  560. struct btrfs_delayed_ref_node *ref,
  561. u64 bytenr, u64 num_bytes, u64 parent,
  562. u64 ref_root, int level, int action)
  563. {
  564. struct btrfs_delayed_ref_node *existing;
  565. struct btrfs_delayed_tree_ref *full_ref;
  566. struct btrfs_delayed_ref_root *delayed_refs;
  567. if (action == BTRFS_ADD_DELAYED_EXTENT)
  568. action = BTRFS_ADD_DELAYED_REF;
  569. delayed_refs = &trans->transaction->delayed_refs;
  570. /* first set the basic ref node struct up */
  571. atomic_set(&ref->refs, 1);
  572. ref->bytenr = bytenr;
  573. ref->num_bytes = num_bytes;
  574. ref->ref_mod = 1;
  575. ref->action = action;
  576. ref->is_head = 0;
  577. ref->in_tree = 1;
  578. full_ref = btrfs_delayed_node_to_tree_ref(ref);
  579. if (parent) {
  580. full_ref->parent = parent;
  581. ref->type = BTRFS_SHARED_BLOCK_REF_KEY;
  582. } else {
  583. full_ref->root = ref_root;
  584. ref->type = BTRFS_TREE_BLOCK_REF_KEY;
  585. }
  586. full_ref->level = level;
  587. existing = tree_insert(&delayed_refs->root, &ref->rb_node);
  588. if (existing) {
  589. update_existing_ref(trans, delayed_refs, existing, ref);
  590. /*
  591. * we've updated the existing ref, free the newly
  592. * allocated ref
  593. */
  594. kfree(ref);
  595. } else {
  596. delayed_refs->num_entries++;
  597. trans->delayed_ref_updates++;
  598. }
  599. return 0;
  600. }
  601. /*
  602. * helper to insert a delayed data ref into the rbtree.
  603. */
  604. static noinline int add_delayed_data_ref(struct btrfs_trans_handle *trans,
  605. struct btrfs_delayed_ref_node *ref,
  606. u64 bytenr, u64 num_bytes, u64 parent,
  607. u64 ref_root, u64 owner, u64 offset,
  608. int action)
  609. {
  610. struct btrfs_delayed_ref_node *existing;
  611. struct btrfs_delayed_data_ref *full_ref;
  612. struct btrfs_delayed_ref_root *delayed_refs;
  613. if (action == BTRFS_ADD_DELAYED_EXTENT)
  614. action = BTRFS_ADD_DELAYED_REF;
  615. delayed_refs = &trans->transaction->delayed_refs;
  616. /* first set the basic ref node struct up */
  617. atomic_set(&ref->refs, 1);
  618. ref->bytenr = bytenr;
  619. ref->num_bytes = num_bytes;
  620. ref->ref_mod = 1;
  621. ref->action = action;
  622. ref->is_head = 0;
  623. ref->in_tree = 1;
  624. full_ref = btrfs_delayed_node_to_data_ref(ref);
  625. if (parent) {
  626. full_ref->parent = parent;
  627. ref->type = BTRFS_SHARED_DATA_REF_KEY;
  628. } else {
  629. full_ref->root = ref_root;
  630. ref->type = BTRFS_EXTENT_DATA_REF_KEY;
  631. }
  632. full_ref->objectid = owner;
  633. full_ref->offset = offset;
  634. existing = tree_insert(&delayed_refs->root, &ref->rb_node);
  635. if (existing) {
  636. update_existing_ref(trans, delayed_refs, existing, ref);
  637. /*
  638. * we've updated the existing ref, free the newly
  639. * allocated ref
  640. */
  641. kfree(ref);
  642. } else {
  643. delayed_refs->num_entries++;
  644. trans->delayed_ref_updates++;
  645. }
  646. return 0;
  647. }
  648. /*
  649. * add a delayed tree ref. This does all of the accounting required
  650. * to make sure the delayed ref is eventually processed before this
  651. * transaction commits.
  652. */
  653. int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
  654. u64 bytenr, u64 num_bytes, u64 parent,
  655. u64 ref_root, int level, int action,
  656. struct btrfs_delayed_extent_op *extent_op)
  657. {
  658. struct btrfs_delayed_tree_ref *ref;
  659. struct btrfs_delayed_ref_head *head_ref;
  660. struct btrfs_delayed_ref_root *delayed_refs;
  661. int ret;
  662. BUG_ON(extent_op && extent_op->is_data);
  663. ref = kmalloc(sizeof(*ref), GFP_NOFS);
  664. if (!ref)
  665. return -ENOMEM;
  666. head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
  667. if (!head_ref) {
  668. kfree(ref);
  669. return -ENOMEM;
  670. }
  671. head_ref->extent_op = extent_op;
  672. delayed_refs = &trans->transaction->delayed_refs;
  673. spin_lock(&delayed_refs->lock);
  674. /*
  675. * insert both the head node and the new ref without dropping
  676. * the spin lock
  677. */
  678. ret = add_delayed_ref_head(trans, &head_ref->node, bytenr, num_bytes,
  679. action, 0);
  680. BUG_ON(ret);
  681. ret = add_delayed_tree_ref(trans, &ref->node, bytenr, num_bytes,
  682. parent, ref_root, level, action);
  683. BUG_ON(ret);
  684. spin_unlock(&delayed_refs->lock);
  685. return 0;
  686. }
  687. /*
  688. * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
  689. */
  690. int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
  691. u64 bytenr, u64 num_bytes,
  692. u64 parent, u64 ref_root,
  693. u64 owner, u64 offset, int action,
  694. struct btrfs_delayed_extent_op *extent_op)
  695. {
  696. struct btrfs_delayed_data_ref *ref;
  697. struct btrfs_delayed_ref_head *head_ref;
  698. struct btrfs_delayed_ref_root *delayed_refs;
  699. int ret;
  700. BUG_ON(extent_op && !extent_op->is_data);
  701. ref = kmalloc(sizeof(*ref), GFP_NOFS);
  702. if (!ref)
  703. return -ENOMEM;
  704. head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
  705. if (!head_ref) {
  706. kfree(ref);
  707. return -ENOMEM;
  708. }
  709. head_ref->extent_op = extent_op;
  710. delayed_refs = &trans->transaction->delayed_refs;
  711. spin_lock(&delayed_refs->lock);
  712. /*
  713. * insert both the head node and the new ref without dropping
  714. * the spin lock
  715. */
  716. ret = add_delayed_ref_head(trans, &head_ref->node, bytenr, num_bytes,
  717. action, 1);
  718. BUG_ON(ret);
  719. ret = add_delayed_data_ref(trans, &ref->node, bytenr, num_bytes,
  720. parent, ref_root, owner, offset, action);
  721. BUG_ON(ret);
  722. spin_unlock(&delayed_refs->lock);
  723. return 0;
  724. }
  725. int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
  726. u64 bytenr, u64 num_bytes,
  727. struct btrfs_delayed_extent_op *extent_op)
  728. {
  729. struct btrfs_delayed_ref_head *head_ref;
  730. struct btrfs_delayed_ref_root *delayed_refs;
  731. int ret;
  732. head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
  733. if (!head_ref)
  734. return -ENOMEM;
  735. head_ref->extent_op = extent_op;
  736. delayed_refs = &trans->transaction->delayed_refs;
  737. spin_lock(&delayed_refs->lock);
  738. ret = add_delayed_ref_head(trans, &head_ref->node, bytenr,
  739. num_bytes, BTRFS_UPDATE_DELAYED_HEAD,
  740. extent_op->is_data);
  741. BUG_ON(ret);
  742. spin_unlock(&delayed_refs->lock);
  743. return 0;
  744. }
  745. /*
  746. * this does a simple search for the head node for a given extent.
  747. * It must be called with the delayed ref spinlock held, and it returns
  748. * the head node if any where found, or NULL if not.
  749. */
  750. struct btrfs_delayed_ref_head *
  751. btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr)
  752. {
  753. struct btrfs_delayed_ref_node *ref;
  754. struct btrfs_delayed_ref_root *delayed_refs;
  755. delayed_refs = &trans->transaction->delayed_refs;
  756. ref = find_ref_head(&delayed_refs->root, bytenr, NULL);
  757. if (ref)
  758. return btrfs_delayed_node_to_head(ref);
  759. return NULL;
  760. }
  761. /*
  762. * add a delayed ref to the tree. This does all of the accounting required
  763. * to make sure the delayed ref is eventually processed before this
  764. * transaction commits.
  765. *
  766. * The main point of this call is to add and remove a backreference in a single
  767. * shot, taking the lock only once, and only searching for the head node once.
  768. *
  769. * It is the same as doing a ref add and delete in two separate calls.
  770. */
  771. #if 0
  772. int btrfs_update_delayed_ref(struct btrfs_trans_handle *trans,
  773. u64 bytenr, u64 num_bytes, u64 orig_parent,
  774. u64 parent, u64 orig_ref_root, u64 ref_root,
  775. u64 orig_ref_generation, u64 ref_generation,
  776. u64 owner_objectid, int pin)
  777. {
  778. struct btrfs_delayed_ref *ref;
  779. struct btrfs_delayed_ref *old_ref;
  780. struct btrfs_delayed_ref_head *head_ref;
  781. struct btrfs_delayed_ref_root *delayed_refs;
  782. int ret;
  783. ref = kmalloc(sizeof(*ref), GFP_NOFS);
  784. if (!ref)
  785. return -ENOMEM;
  786. old_ref = kmalloc(sizeof(*old_ref), GFP_NOFS);
  787. if (!old_ref) {
  788. kfree(ref);
  789. return -ENOMEM;
  790. }
  791. /*
  792. * the parent = 0 case comes from cases where we don't actually
  793. * know the parent yet. It will get updated later via a add/drop
  794. * pair.
  795. */
  796. if (parent == 0)
  797. parent = bytenr;
  798. if (orig_parent == 0)
  799. orig_parent = bytenr;
  800. head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
  801. if (!head_ref) {
  802. kfree(ref);
  803. kfree(old_ref);
  804. return -ENOMEM;
  805. }
  806. delayed_refs = &trans->transaction->delayed_refs;
  807. spin_lock(&delayed_refs->lock);
  808. /*
  809. * insert both the head node and the new ref without dropping
  810. * the spin lock
  811. */
  812. ret = __btrfs_add_delayed_ref(trans, &head_ref->node, bytenr, num_bytes,
  813. (u64)-1, 0, 0, 0,
  814. BTRFS_UPDATE_DELAYED_HEAD, 0);
  815. BUG_ON(ret);
  816. ret = __btrfs_add_delayed_ref(trans, &ref->node, bytenr, num_bytes,
  817. parent, ref_root, ref_generation,
  818. owner_objectid, BTRFS_ADD_DELAYED_REF, 0);
  819. BUG_ON(ret);
  820. ret = __btrfs_add_delayed_ref(trans, &old_ref->node, bytenr, num_bytes,
  821. orig_parent, orig_ref_root,
  822. orig_ref_generation, owner_objectid,
  823. BTRFS_DROP_DELAYED_REF, pin);
  824. BUG_ON(ret);
  825. spin_unlock(&delayed_refs->lock);
  826. return 0;
  827. }
  828. #endif