delayed-ref.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755
  1. /*
  2. * Copyright (C) 2009 Oracle. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/sched.h>
  19. #include <linux/slab.h>
  20. #include <linux/sort.h>
  21. #include "ctree.h"
  22. #include "delayed-ref.h"
  23. #include "transaction.h"
  24. /*
  25. * delayed back reference update tracking. For subvolume trees
  26. * we queue up extent allocations and backref maintenance for
  27. * delayed processing. This avoids deep call chains where we
  28. * add extents in the middle of btrfs_search_slot, and it allows
  29. * us to buffer up frequently modified backrefs in an rb tree instead
  30. * of hammering updates on the extent allocation tree.
  31. */
  32. /*
  33. * compare two delayed tree backrefs with same bytenr and type
  34. */
  35. static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref2,
  36. struct btrfs_delayed_tree_ref *ref1)
  37. {
  38. if (ref1->node.type == BTRFS_TREE_BLOCK_REF_KEY) {
  39. if (ref1->root < ref2->root)
  40. return -1;
  41. if (ref1->root > ref2->root)
  42. return 1;
  43. } else {
  44. if (ref1->parent < ref2->parent)
  45. return -1;
  46. if (ref1->parent > ref2->parent)
  47. return 1;
  48. }
  49. return 0;
  50. }
  51. /*
  52. * compare two delayed data backrefs with same bytenr and type
  53. */
  54. static int comp_data_refs(struct btrfs_delayed_data_ref *ref2,
  55. struct btrfs_delayed_data_ref *ref1)
  56. {
  57. if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
  58. if (ref1->root < ref2->root)
  59. return -1;
  60. if (ref1->root > ref2->root)
  61. return 1;
  62. if (ref1->objectid < ref2->objectid)
  63. return -1;
  64. if (ref1->objectid > ref2->objectid)
  65. return 1;
  66. if (ref1->offset < ref2->offset)
  67. return -1;
  68. if (ref1->offset > ref2->offset)
  69. return 1;
  70. } else {
  71. if (ref1->parent < ref2->parent)
  72. return -1;
  73. if (ref1->parent > ref2->parent)
  74. return 1;
  75. }
  76. return 0;
  77. }
  78. /*
  79. * entries in the rb tree are ordered by the byte number of the extent,
  80. * type of the delayed backrefs and content of delayed backrefs.
  81. */
  82. static int comp_entry(struct btrfs_delayed_ref_node *ref2,
  83. struct btrfs_delayed_ref_node *ref1)
  84. {
  85. if (ref1->bytenr < ref2->bytenr)
  86. return -1;
  87. if (ref1->bytenr > ref2->bytenr)
  88. return 1;
  89. if (ref1->is_head && ref2->is_head)
  90. return 0;
  91. if (ref2->is_head)
  92. return -1;
  93. if (ref1->is_head)
  94. return 1;
  95. if (ref1->type < ref2->type)
  96. return -1;
  97. if (ref1->type > ref2->type)
  98. return 1;
  99. /* merging of sequenced refs is not allowed */
  100. if (ref1->seq < ref2->seq)
  101. return -1;
  102. if (ref1->seq > ref2->seq)
  103. return 1;
  104. if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
  105. ref1->type == BTRFS_SHARED_BLOCK_REF_KEY) {
  106. return comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref2),
  107. btrfs_delayed_node_to_tree_ref(ref1));
  108. } else if (ref1->type == BTRFS_EXTENT_DATA_REF_KEY ||
  109. ref1->type == BTRFS_SHARED_DATA_REF_KEY) {
  110. return comp_data_refs(btrfs_delayed_node_to_data_ref(ref2),
  111. btrfs_delayed_node_to_data_ref(ref1));
  112. }
  113. BUG();
  114. return 0;
  115. }
  116. /*
  117. * insert a new ref into the rbtree. This returns any existing refs
  118. * for the same (bytenr,parent) tuple, or NULL if the new node was properly
  119. * inserted.
  120. */
  121. static struct btrfs_delayed_ref_node *tree_insert(struct rb_root *root,
  122. struct rb_node *node)
  123. {
  124. struct rb_node **p = &root->rb_node;
  125. struct rb_node *parent_node = NULL;
  126. struct btrfs_delayed_ref_node *entry;
  127. struct btrfs_delayed_ref_node *ins;
  128. int cmp;
  129. ins = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
  130. while (*p) {
  131. parent_node = *p;
  132. entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
  133. rb_node);
  134. cmp = comp_entry(entry, ins);
  135. if (cmp < 0)
  136. p = &(*p)->rb_left;
  137. else if (cmp > 0)
  138. p = &(*p)->rb_right;
  139. else
  140. return entry;
  141. }
  142. rb_link_node(node, parent_node, p);
  143. rb_insert_color(node, root);
  144. return NULL;
  145. }
  146. /*
  147. * find an head entry based on bytenr. This returns the delayed ref
  148. * head if it was able to find one, or NULL if nothing was in that spot
  149. */
  150. static struct btrfs_delayed_ref_node *find_ref_head(struct rb_root *root,
  151. u64 bytenr,
  152. struct btrfs_delayed_ref_node **last)
  153. {
  154. struct rb_node *n = root->rb_node;
  155. struct btrfs_delayed_ref_node *entry;
  156. int cmp;
  157. while (n) {
  158. entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
  159. WARN_ON(!entry->in_tree);
  160. if (last)
  161. *last = entry;
  162. if (bytenr < entry->bytenr)
  163. cmp = -1;
  164. else if (bytenr > entry->bytenr)
  165. cmp = 1;
  166. else if (!btrfs_delayed_ref_is_head(entry))
  167. cmp = 1;
  168. else
  169. cmp = 0;
  170. if (cmp < 0)
  171. n = n->rb_left;
  172. else if (cmp > 0)
  173. n = n->rb_right;
  174. else
  175. return entry;
  176. }
  177. return NULL;
  178. }
  179. int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
  180. struct btrfs_delayed_ref_head *head)
  181. {
  182. struct btrfs_delayed_ref_root *delayed_refs;
  183. delayed_refs = &trans->transaction->delayed_refs;
  184. assert_spin_locked(&delayed_refs->lock);
  185. if (mutex_trylock(&head->mutex))
  186. return 0;
  187. atomic_inc(&head->node.refs);
  188. spin_unlock(&delayed_refs->lock);
  189. mutex_lock(&head->mutex);
  190. spin_lock(&delayed_refs->lock);
  191. if (!head->node.in_tree) {
  192. mutex_unlock(&head->mutex);
  193. btrfs_put_delayed_ref(&head->node);
  194. return -EAGAIN;
  195. }
  196. btrfs_put_delayed_ref(&head->node);
  197. return 0;
  198. }
  199. int btrfs_check_delayed_seq(struct btrfs_delayed_ref_root *delayed_refs,
  200. u64 seq)
  201. {
  202. struct seq_list *elem;
  203. assert_spin_locked(&delayed_refs->lock);
  204. if (list_empty(&delayed_refs->seq_head))
  205. return 0;
  206. elem = list_first_entry(&delayed_refs->seq_head, struct seq_list, list);
  207. if (seq >= elem->seq) {
  208. pr_debug("holding back delayed_ref %llu, lowest is %llu (%p)\n",
  209. seq, elem->seq, delayed_refs);
  210. return 1;
  211. }
  212. return 0;
  213. }
  214. int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans,
  215. struct list_head *cluster, u64 start)
  216. {
  217. int count = 0;
  218. struct btrfs_delayed_ref_root *delayed_refs;
  219. struct rb_node *node;
  220. struct btrfs_delayed_ref_node *ref;
  221. struct btrfs_delayed_ref_head *head;
  222. delayed_refs = &trans->transaction->delayed_refs;
  223. if (start == 0) {
  224. node = rb_first(&delayed_refs->root);
  225. } else {
  226. ref = NULL;
  227. find_ref_head(&delayed_refs->root, start, &ref);
  228. if (ref) {
  229. struct btrfs_delayed_ref_node *tmp;
  230. node = rb_prev(&ref->rb_node);
  231. while (node) {
  232. tmp = rb_entry(node,
  233. struct btrfs_delayed_ref_node,
  234. rb_node);
  235. if (tmp->bytenr < start)
  236. break;
  237. ref = tmp;
  238. node = rb_prev(&ref->rb_node);
  239. }
  240. node = &ref->rb_node;
  241. } else
  242. node = rb_first(&delayed_refs->root);
  243. }
  244. again:
  245. while (node && count < 32) {
  246. ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
  247. if (btrfs_delayed_ref_is_head(ref)) {
  248. head = btrfs_delayed_node_to_head(ref);
  249. if (list_empty(&head->cluster)) {
  250. list_add_tail(&head->cluster, cluster);
  251. delayed_refs->run_delayed_start =
  252. head->node.bytenr;
  253. count++;
  254. WARN_ON(delayed_refs->num_heads_ready == 0);
  255. delayed_refs->num_heads_ready--;
  256. } else if (count) {
  257. /* the goal of the clustering is to find extents
  258. * that are likely to end up in the same extent
  259. * leaf on disk. So, we don't want them spread
  260. * all over the tree. Stop now if we've hit
  261. * a head that was already in use
  262. */
  263. break;
  264. }
  265. }
  266. node = rb_next(node);
  267. }
  268. if (count) {
  269. return 0;
  270. } else if (start) {
  271. /*
  272. * we've gone to the end of the rbtree without finding any
  273. * clusters. start from the beginning and try again
  274. */
  275. start = 0;
  276. node = rb_first(&delayed_refs->root);
  277. goto again;
  278. }
  279. return 1;
  280. }
  281. /*
  282. * helper function to update an extent delayed ref in the
  283. * rbtree. existing and update must both have the same
  284. * bytenr and parent
  285. *
  286. * This may free existing if the update cancels out whatever
  287. * operation it was doing.
  288. */
  289. static noinline void
  290. update_existing_ref(struct btrfs_trans_handle *trans,
  291. struct btrfs_delayed_ref_root *delayed_refs,
  292. struct btrfs_delayed_ref_node *existing,
  293. struct btrfs_delayed_ref_node *update)
  294. {
  295. if (update->action != existing->action) {
  296. /*
  297. * this is effectively undoing either an add or a
  298. * drop. We decrement the ref_mod, and if it goes
  299. * down to zero we just delete the entry without
  300. * every changing the extent allocation tree.
  301. */
  302. existing->ref_mod--;
  303. if (existing->ref_mod == 0) {
  304. rb_erase(&existing->rb_node,
  305. &delayed_refs->root);
  306. existing->in_tree = 0;
  307. btrfs_put_delayed_ref(existing);
  308. delayed_refs->num_entries--;
  309. if (trans->delayed_ref_updates)
  310. trans->delayed_ref_updates--;
  311. } else {
  312. WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY ||
  313. existing->type == BTRFS_SHARED_BLOCK_REF_KEY);
  314. }
  315. } else {
  316. WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY ||
  317. existing->type == BTRFS_SHARED_BLOCK_REF_KEY);
  318. /*
  319. * the action on the existing ref matches
  320. * the action on the ref we're trying to add.
  321. * Bump the ref_mod by one so the backref that
  322. * is eventually added/removed has the correct
  323. * reference count
  324. */
  325. existing->ref_mod += update->ref_mod;
  326. }
  327. }
  328. /*
  329. * helper function to update the accounting in the head ref
  330. * existing and update must have the same bytenr
  331. */
  332. static noinline void
  333. update_existing_head_ref(struct btrfs_delayed_ref_node *existing,
  334. struct btrfs_delayed_ref_node *update)
  335. {
  336. struct btrfs_delayed_ref_head *existing_ref;
  337. struct btrfs_delayed_ref_head *ref;
  338. existing_ref = btrfs_delayed_node_to_head(existing);
  339. ref = btrfs_delayed_node_to_head(update);
  340. BUG_ON(existing_ref->is_data != ref->is_data);
  341. if (ref->must_insert_reserved) {
  342. /* if the extent was freed and then
  343. * reallocated before the delayed ref
  344. * entries were processed, we can end up
  345. * with an existing head ref without
  346. * the must_insert_reserved flag set.
  347. * Set it again here
  348. */
  349. existing_ref->must_insert_reserved = ref->must_insert_reserved;
  350. /*
  351. * update the num_bytes so we make sure the accounting
  352. * is done correctly
  353. */
  354. existing->num_bytes = update->num_bytes;
  355. }
  356. if (ref->extent_op) {
  357. if (!existing_ref->extent_op) {
  358. existing_ref->extent_op = ref->extent_op;
  359. } else {
  360. if (ref->extent_op->update_key) {
  361. memcpy(&existing_ref->extent_op->key,
  362. &ref->extent_op->key,
  363. sizeof(ref->extent_op->key));
  364. existing_ref->extent_op->update_key = 1;
  365. }
  366. if (ref->extent_op->update_flags) {
  367. existing_ref->extent_op->flags_to_set |=
  368. ref->extent_op->flags_to_set;
  369. existing_ref->extent_op->update_flags = 1;
  370. }
  371. kfree(ref->extent_op);
  372. }
  373. }
  374. /*
  375. * update the reference mod on the head to reflect this new operation
  376. */
  377. existing->ref_mod += update->ref_mod;
  378. }
  379. /*
  380. * helper function to actually insert a head node into the rbtree.
  381. * this does all the dirty work in terms of maintaining the correct
  382. * overall modification count.
  383. */
  384. static noinline int add_delayed_ref_head(struct btrfs_fs_info *fs_info,
  385. struct btrfs_trans_handle *trans,
  386. struct btrfs_delayed_ref_node *ref,
  387. u64 bytenr, u64 num_bytes,
  388. int action, int is_data)
  389. {
  390. struct btrfs_delayed_ref_node *existing;
  391. struct btrfs_delayed_ref_head *head_ref = NULL;
  392. struct btrfs_delayed_ref_root *delayed_refs;
  393. int count_mod = 1;
  394. int must_insert_reserved = 0;
  395. /*
  396. * the head node stores the sum of all the mods, so dropping a ref
  397. * should drop the sum in the head node by one.
  398. */
  399. if (action == BTRFS_UPDATE_DELAYED_HEAD)
  400. count_mod = 0;
  401. else if (action == BTRFS_DROP_DELAYED_REF)
  402. count_mod = -1;
  403. /*
  404. * BTRFS_ADD_DELAYED_EXTENT means that we need to update
  405. * the reserved accounting when the extent is finally added, or
  406. * if a later modification deletes the delayed ref without ever
  407. * inserting the extent into the extent allocation tree.
  408. * ref->must_insert_reserved is the flag used to record
  409. * that accounting mods are required.
  410. *
  411. * Once we record must_insert_reserved, switch the action to
  412. * BTRFS_ADD_DELAYED_REF because other special casing is not required.
  413. */
  414. if (action == BTRFS_ADD_DELAYED_EXTENT)
  415. must_insert_reserved = 1;
  416. else
  417. must_insert_reserved = 0;
  418. delayed_refs = &trans->transaction->delayed_refs;
  419. /* first set the basic ref node struct up */
  420. atomic_set(&ref->refs, 1);
  421. ref->bytenr = bytenr;
  422. ref->num_bytes = num_bytes;
  423. ref->ref_mod = count_mod;
  424. ref->type = 0;
  425. ref->action = 0;
  426. ref->is_head = 1;
  427. ref->in_tree = 1;
  428. ref->seq = 0;
  429. head_ref = btrfs_delayed_node_to_head(ref);
  430. head_ref->must_insert_reserved = must_insert_reserved;
  431. head_ref->is_data = is_data;
  432. INIT_LIST_HEAD(&head_ref->cluster);
  433. mutex_init(&head_ref->mutex);
  434. trace_btrfs_delayed_ref_head(ref, head_ref, action);
  435. existing = tree_insert(&delayed_refs->root, &ref->rb_node);
  436. if (existing) {
  437. update_existing_head_ref(existing, ref);
  438. /*
  439. * we've updated the existing ref, free the newly
  440. * allocated ref
  441. */
  442. kfree(ref);
  443. } else {
  444. delayed_refs->num_heads++;
  445. delayed_refs->num_heads_ready++;
  446. delayed_refs->num_entries++;
  447. trans->delayed_ref_updates++;
  448. }
  449. return 0;
  450. }
  451. /*
  452. * helper to insert a delayed tree ref into the rbtree.
  453. */
  454. static noinline int add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
  455. struct btrfs_trans_handle *trans,
  456. struct btrfs_delayed_ref_node *ref,
  457. u64 bytenr, u64 num_bytes, u64 parent,
  458. u64 ref_root, int level, int action,
  459. int for_cow)
  460. {
  461. struct btrfs_delayed_ref_node *existing;
  462. struct btrfs_delayed_tree_ref *full_ref;
  463. struct btrfs_delayed_ref_root *delayed_refs;
  464. u64 seq = 0;
  465. if (action == BTRFS_ADD_DELAYED_EXTENT)
  466. action = BTRFS_ADD_DELAYED_REF;
  467. delayed_refs = &trans->transaction->delayed_refs;
  468. /* first set the basic ref node struct up */
  469. atomic_set(&ref->refs, 1);
  470. ref->bytenr = bytenr;
  471. ref->num_bytes = num_bytes;
  472. ref->ref_mod = 1;
  473. ref->action = action;
  474. ref->is_head = 0;
  475. ref->in_tree = 1;
  476. if (need_ref_seq(for_cow, ref_root))
  477. seq = inc_delayed_seq(delayed_refs);
  478. ref->seq = seq;
  479. full_ref = btrfs_delayed_node_to_tree_ref(ref);
  480. full_ref->parent = parent;
  481. full_ref->root = ref_root;
  482. if (parent)
  483. ref->type = BTRFS_SHARED_BLOCK_REF_KEY;
  484. else
  485. ref->type = BTRFS_TREE_BLOCK_REF_KEY;
  486. full_ref->level = level;
  487. trace_btrfs_delayed_tree_ref(ref, full_ref, action);
  488. existing = tree_insert(&delayed_refs->root, &ref->rb_node);
  489. if (existing) {
  490. update_existing_ref(trans, delayed_refs, existing, ref);
  491. /*
  492. * we've updated the existing ref, free the newly
  493. * allocated ref
  494. */
  495. kfree(ref);
  496. } else {
  497. delayed_refs->num_entries++;
  498. trans->delayed_ref_updates++;
  499. }
  500. return 0;
  501. }
  502. /*
  503. * helper to insert a delayed data ref into the rbtree.
  504. */
  505. static noinline int add_delayed_data_ref(struct btrfs_fs_info *fs_info,
  506. struct btrfs_trans_handle *trans,
  507. struct btrfs_delayed_ref_node *ref,
  508. u64 bytenr, u64 num_bytes, u64 parent,
  509. u64 ref_root, u64 owner, u64 offset,
  510. int action, int for_cow)
  511. {
  512. struct btrfs_delayed_ref_node *existing;
  513. struct btrfs_delayed_data_ref *full_ref;
  514. struct btrfs_delayed_ref_root *delayed_refs;
  515. u64 seq = 0;
  516. if (action == BTRFS_ADD_DELAYED_EXTENT)
  517. action = BTRFS_ADD_DELAYED_REF;
  518. delayed_refs = &trans->transaction->delayed_refs;
  519. /* first set the basic ref node struct up */
  520. atomic_set(&ref->refs, 1);
  521. ref->bytenr = bytenr;
  522. ref->num_bytes = num_bytes;
  523. ref->ref_mod = 1;
  524. ref->action = action;
  525. ref->is_head = 0;
  526. ref->in_tree = 1;
  527. if (need_ref_seq(for_cow, ref_root))
  528. seq = inc_delayed_seq(delayed_refs);
  529. ref->seq = seq;
  530. full_ref = btrfs_delayed_node_to_data_ref(ref);
  531. full_ref->parent = parent;
  532. full_ref->root = ref_root;
  533. if (parent)
  534. ref->type = BTRFS_SHARED_DATA_REF_KEY;
  535. else
  536. ref->type = BTRFS_EXTENT_DATA_REF_KEY;
  537. full_ref->objectid = owner;
  538. full_ref->offset = offset;
  539. trace_btrfs_delayed_data_ref(ref, full_ref, action);
  540. existing = tree_insert(&delayed_refs->root, &ref->rb_node);
  541. if (existing) {
  542. update_existing_ref(trans, delayed_refs, existing, ref);
  543. /*
  544. * we've updated the existing ref, free the newly
  545. * allocated ref
  546. */
  547. kfree(ref);
  548. } else {
  549. delayed_refs->num_entries++;
  550. trans->delayed_ref_updates++;
  551. }
  552. return 0;
  553. }
  554. /*
  555. * add a delayed tree ref. This does all of the accounting required
  556. * to make sure the delayed ref is eventually processed before this
  557. * transaction commits.
  558. */
  559. int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
  560. struct btrfs_trans_handle *trans,
  561. u64 bytenr, u64 num_bytes, u64 parent,
  562. u64 ref_root, int level, int action,
  563. struct btrfs_delayed_extent_op *extent_op,
  564. int for_cow)
  565. {
  566. struct btrfs_delayed_tree_ref *ref;
  567. struct btrfs_delayed_ref_head *head_ref;
  568. struct btrfs_delayed_ref_root *delayed_refs;
  569. int ret;
  570. BUG_ON(extent_op && extent_op->is_data);
  571. ref = kmalloc(sizeof(*ref), GFP_NOFS);
  572. if (!ref)
  573. return -ENOMEM;
  574. head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
  575. if (!head_ref) {
  576. kfree(ref);
  577. return -ENOMEM;
  578. }
  579. head_ref->extent_op = extent_op;
  580. delayed_refs = &trans->transaction->delayed_refs;
  581. spin_lock(&delayed_refs->lock);
  582. /*
  583. * insert both the head node and the new ref without dropping
  584. * the spin lock
  585. */
  586. ret = add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
  587. num_bytes, action, 0);
  588. BUG_ON(ret);
  589. ret = add_delayed_tree_ref(fs_info, trans, &ref->node, bytenr,
  590. num_bytes, parent, ref_root, level, action,
  591. for_cow);
  592. BUG_ON(ret);
  593. spin_unlock(&delayed_refs->lock);
  594. return 0;
  595. }
  596. /*
  597. * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
  598. */
  599. int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
  600. struct btrfs_trans_handle *trans,
  601. u64 bytenr, u64 num_bytes,
  602. u64 parent, u64 ref_root,
  603. u64 owner, u64 offset, int action,
  604. struct btrfs_delayed_extent_op *extent_op,
  605. int for_cow)
  606. {
  607. struct btrfs_delayed_data_ref *ref;
  608. struct btrfs_delayed_ref_head *head_ref;
  609. struct btrfs_delayed_ref_root *delayed_refs;
  610. int ret;
  611. BUG_ON(extent_op && !extent_op->is_data);
  612. ref = kmalloc(sizeof(*ref), GFP_NOFS);
  613. if (!ref)
  614. return -ENOMEM;
  615. head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
  616. if (!head_ref) {
  617. kfree(ref);
  618. return -ENOMEM;
  619. }
  620. head_ref->extent_op = extent_op;
  621. delayed_refs = &trans->transaction->delayed_refs;
  622. spin_lock(&delayed_refs->lock);
  623. /*
  624. * insert both the head node and the new ref without dropping
  625. * the spin lock
  626. */
  627. ret = add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
  628. num_bytes, action, 1);
  629. BUG_ON(ret);
  630. ret = add_delayed_data_ref(fs_info, trans, &ref->node, bytenr,
  631. num_bytes, parent, ref_root, owner, offset,
  632. action, for_cow);
  633. BUG_ON(ret);
  634. spin_unlock(&delayed_refs->lock);
  635. return 0;
  636. }
  637. int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
  638. struct btrfs_trans_handle *trans,
  639. u64 bytenr, u64 num_bytes,
  640. struct btrfs_delayed_extent_op *extent_op)
  641. {
  642. struct btrfs_delayed_ref_head *head_ref;
  643. struct btrfs_delayed_ref_root *delayed_refs;
  644. int ret;
  645. head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
  646. if (!head_ref)
  647. return -ENOMEM;
  648. head_ref->extent_op = extent_op;
  649. delayed_refs = &trans->transaction->delayed_refs;
  650. spin_lock(&delayed_refs->lock);
  651. ret = add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
  652. num_bytes, BTRFS_UPDATE_DELAYED_HEAD,
  653. extent_op->is_data);
  654. BUG_ON(ret);
  655. spin_unlock(&delayed_refs->lock);
  656. return 0;
  657. }
  658. /*
  659. * this does a simple search for the head node for a given extent.
  660. * It must be called with the delayed ref spinlock held, and it returns
  661. * the head node if any where found, or NULL if not.
  662. */
  663. struct btrfs_delayed_ref_head *
  664. btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr)
  665. {
  666. struct btrfs_delayed_ref_node *ref;
  667. struct btrfs_delayed_ref_root *delayed_refs;
  668. delayed_refs = &trans->transaction->delayed_refs;
  669. ref = find_ref_head(&delayed_refs->root, bytenr, NULL);
  670. if (ref)
  671. return btrfs_delayed_node_to_head(ref);
  672. return NULL;
  673. }