transaction.c 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087
  1. /*
  2. * Copyright (C) 2007 Oracle. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/fs.h>
  19. #include <linux/sched.h>
  20. #include <linux/writeback.h>
  21. #include <linux/pagemap.h>
  22. #include <linux/blkdev.h>
  23. #include "ctree.h"
  24. #include "disk-io.h"
  25. #include "transaction.h"
  26. #include "locking.h"
  27. #include "tree-log.h"
  28. #define BTRFS_ROOT_TRANS_TAG 0
  29. static noinline void put_transaction(struct btrfs_transaction *transaction)
  30. {
  31. WARN_ON(transaction->use_count == 0);
  32. transaction->use_count--;
  33. if (transaction->use_count == 0) {
  34. list_del_init(&transaction->list);
  35. memset(transaction, 0, sizeof(*transaction));
  36. kmem_cache_free(btrfs_transaction_cachep, transaction);
  37. }
  38. }
  39. /*
  40. * either allocate a new transaction or hop into the existing one
  41. */
  42. static noinline int join_transaction(struct btrfs_root *root)
  43. {
  44. struct btrfs_transaction *cur_trans;
  45. cur_trans = root->fs_info->running_transaction;
  46. if (!cur_trans) {
  47. cur_trans = kmem_cache_alloc(btrfs_transaction_cachep,
  48. GFP_NOFS);
  49. BUG_ON(!cur_trans);
  50. root->fs_info->generation++;
  51. cur_trans->num_writers = 1;
  52. cur_trans->num_joined = 0;
  53. cur_trans->transid = root->fs_info->generation;
  54. init_waitqueue_head(&cur_trans->writer_wait);
  55. init_waitqueue_head(&cur_trans->commit_wait);
  56. cur_trans->in_commit = 0;
  57. cur_trans->blocked = 0;
  58. cur_trans->use_count = 1;
  59. cur_trans->commit_done = 0;
  60. cur_trans->start_time = get_seconds();
  61. cur_trans->delayed_refs.root.rb_node = NULL;
  62. cur_trans->delayed_refs.num_entries = 0;
  63. cur_trans->delayed_refs.num_heads_ready = 0;
  64. cur_trans->delayed_refs.num_heads = 0;
  65. cur_trans->delayed_refs.flushing = 0;
  66. cur_trans->delayed_refs.run_delayed_start = 0;
  67. spin_lock_init(&cur_trans->delayed_refs.lock);
  68. INIT_LIST_HEAD(&cur_trans->pending_snapshots);
  69. list_add_tail(&cur_trans->list, &root->fs_info->trans_list);
  70. extent_io_tree_init(&cur_trans->dirty_pages,
  71. root->fs_info->btree_inode->i_mapping,
  72. GFP_NOFS);
  73. spin_lock(&root->fs_info->new_trans_lock);
  74. root->fs_info->running_transaction = cur_trans;
  75. spin_unlock(&root->fs_info->new_trans_lock);
  76. } else {
  77. cur_trans->num_writers++;
  78. cur_trans->num_joined++;
  79. }
  80. return 0;
  81. }
  82. /*
  83. * this does all the record keeping required to make sure that a reference
  84. * counted root is properly recorded in a given transaction. This is required
  85. * to make sure the old root from before we joined the transaction is deleted
  86. * when the transaction commits
  87. */
  88. static noinline int record_root_in_trans(struct btrfs_trans_handle *trans,
  89. struct btrfs_root *root)
  90. {
  91. if (root->ref_cows && root->last_trans < trans->transid) {
  92. WARN_ON(root == root->fs_info->extent_root);
  93. WARN_ON(root->root_item.refs == 0);
  94. WARN_ON(root->commit_root != root->node);
  95. radix_tree_tag_set(&root->fs_info->fs_roots_radix,
  96. (unsigned long)root->root_key.objectid,
  97. BTRFS_ROOT_TRANS_TAG);
  98. root->last_trans = trans->transid;
  99. btrfs_init_reloc_root(trans, root);
  100. }
  101. return 0;
  102. }
  103. int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
  104. struct btrfs_root *root)
  105. {
  106. if (!root->ref_cows)
  107. return 0;
  108. mutex_lock(&root->fs_info->trans_mutex);
  109. if (root->last_trans == trans->transid) {
  110. mutex_unlock(&root->fs_info->trans_mutex);
  111. return 0;
  112. }
  113. record_root_in_trans(trans, root);
  114. mutex_unlock(&root->fs_info->trans_mutex);
  115. return 0;
  116. }
  117. /* wait for commit against the current transaction to become unblocked
  118. * when this is done, it is safe to start a new transaction, but the current
  119. * transaction might not be fully on disk.
  120. */
  121. static void wait_current_trans(struct btrfs_root *root)
  122. {
  123. struct btrfs_transaction *cur_trans;
  124. cur_trans = root->fs_info->running_transaction;
  125. if (cur_trans && cur_trans->blocked) {
  126. DEFINE_WAIT(wait);
  127. cur_trans->use_count++;
  128. while (1) {
  129. prepare_to_wait(&root->fs_info->transaction_wait, &wait,
  130. TASK_UNINTERRUPTIBLE);
  131. if (cur_trans->blocked) {
  132. mutex_unlock(&root->fs_info->trans_mutex);
  133. schedule();
  134. mutex_lock(&root->fs_info->trans_mutex);
  135. finish_wait(&root->fs_info->transaction_wait,
  136. &wait);
  137. } else {
  138. finish_wait(&root->fs_info->transaction_wait,
  139. &wait);
  140. break;
  141. }
  142. }
  143. put_transaction(cur_trans);
  144. }
  145. }
  146. static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
  147. int num_blocks, int wait)
  148. {
  149. struct btrfs_trans_handle *h =
  150. kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
  151. int ret;
  152. mutex_lock(&root->fs_info->trans_mutex);
  153. if (!root->fs_info->log_root_recovering &&
  154. ((wait == 1 && !root->fs_info->open_ioctl_trans) || wait == 2))
  155. wait_current_trans(root);
  156. ret = join_transaction(root);
  157. BUG_ON(ret);
  158. h->transid = root->fs_info->running_transaction->transid;
  159. h->transaction = root->fs_info->running_transaction;
  160. h->blocks_reserved = num_blocks;
  161. h->blocks_used = 0;
  162. h->block_group = 0;
  163. h->alloc_exclude_nr = 0;
  164. h->alloc_exclude_start = 0;
  165. h->delayed_ref_updates = 0;
  166. root->fs_info->running_transaction->use_count++;
  167. record_root_in_trans(h, root);
  168. mutex_unlock(&root->fs_info->trans_mutex);
  169. return h;
  170. }
  171. struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
  172. int num_blocks)
  173. {
  174. return start_transaction(root, num_blocks, 1);
  175. }
  176. struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root,
  177. int num_blocks)
  178. {
  179. return start_transaction(root, num_blocks, 0);
  180. }
  181. struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *r,
  182. int num_blocks)
  183. {
  184. return start_transaction(r, num_blocks, 2);
  185. }
  186. /* wait for a transaction commit to be fully complete */
  187. static noinline int wait_for_commit(struct btrfs_root *root,
  188. struct btrfs_transaction *commit)
  189. {
  190. DEFINE_WAIT(wait);
  191. mutex_lock(&root->fs_info->trans_mutex);
  192. while (!commit->commit_done) {
  193. prepare_to_wait(&commit->commit_wait, &wait,
  194. TASK_UNINTERRUPTIBLE);
  195. if (commit->commit_done)
  196. break;
  197. mutex_unlock(&root->fs_info->trans_mutex);
  198. schedule();
  199. mutex_lock(&root->fs_info->trans_mutex);
  200. }
  201. mutex_unlock(&root->fs_info->trans_mutex);
  202. finish_wait(&commit->commit_wait, &wait);
  203. return 0;
  204. }
  205. #if 0
  206. /*
  207. * rate limit against the drop_snapshot code. This helps to slow down new
  208. * operations if the drop_snapshot code isn't able to keep up.
  209. */
  210. static void throttle_on_drops(struct btrfs_root *root)
  211. {
  212. struct btrfs_fs_info *info = root->fs_info;
  213. int harder_count = 0;
  214. harder:
  215. if (atomic_read(&info->throttles)) {
  216. DEFINE_WAIT(wait);
  217. int thr;
  218. thr = atomic_read(&info->throttle_gen);
  219. do {
  220. prepare_to_wait(&info->transaction_throttle,
  221. &wait, TASK_UNINTERRUPTIBLE);
  222. if (!atomic_read(&info->throttles)) {
  223. finish_wait(&info->transaction_throttle, &wait);
  224. break;
  225. }
  226. schedule();
  227. finish_wait(&info->transaction_throttle, &wait);
  228. } while (thr == atomic_read(&info->throttle_gen));
  229. harder_count++;
  230. if (root->fs_info->total_ref_cache_size > 1 * 1024 * 1024 &&
  231. harder_count < 2)
  232. goto harder;
  233. if (root->fs_info->total_ref_cache_size > 5 * 1024 * 1024 &&
  234. harder_count < 10)
  235. goto harder;
  236. if (root->fs_info->total_ref_cache_size > 10 * 1024 * 1024 &&
  237. harder_count < 20)
  238. goto harder;
  239. }
  240. }
  241. #endif
  242. void btrfs_throttle(struct btrfs_root *root)
  243. {
  244. mutex_lock(&root->fs_info->trans_mutex);
  245. if (!root->fs_info->open_ioctl_trans)
  246. wait_current_trans(root);
  247. mutex_unlock(&root->fs_info->trans_mutex);
  248. }
  249. static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
  250. struct btrfs_root *root, int throttle)
  251. {
  252. struct btrfs_transaction *cur_trans;
  253. struct btrfs_fs_info *info = root->fs_info;
  254. int count = 0;
  255. while (count < 4) {
  256. unsigned long cur = trans->delayed_ref_updates;
  257. trans->delayed_ref_updates = 0;
  258. if (cur &&
  259. trans->transaction->delayed_refs.num_heads_ready > 64) {
  260. trans->delayed_ref_updates = 0;
  261. /*
  262. * do a full flush if the transaction is trying
  263. * to close
  264. */
  265. if (trans->transaction->delayed_refs.flushing)
  266. cur = 0;
  267. btrfs_run_delayed_refs(trans, root, cur);
  268. } else {
  269. break;
  270. }
  271. count++;
  272. }
  273. mutex_lock(&info->trans_mutex);
  274. cur_trans = info->running_transaction;
  275. WARN_ON(cur_trans != trans->transaction);
  276. WARN_ON(cur_trans->num_writers < 1);
  277. cur_trans->num_writers--;
  278. if (waitqueue_active(&cur_trans->writer_wait))
  279. wake_up(&cur_trans->writer_wait);
  280. put_transaction(cur_trans);
  281. mutex_unlock(&info->trans_mutex);
  282. memset(trans, 0, sizeof(*trans));
  283. kmem_cache_free(btrfs_trans_handle_cachep, trans);
  284. return 0;
  285. }
  286. int btrfs_end_transaction(struct btrfs_trans_handle *trans,
  287. struct btrfs_root *root)
  288. {
  289. return __btrfs_end_transaction(trans, root, 0);
  290. }
  291. int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
  292. struct btrfs_root *root)
  293. {
  294. return __btrfs_end_transaction(trans, root, 1);
  295. }
  296. /*
  297. * when btree blocks are allocated, they have some corresponding bits set for
  298. * them in one of two extent_io trees. This is used to make sure all of
  299. * those extents are on disk for transaction or log commit
  300. */
  301. int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
  302. struct extent_io_tree *dirty_pages)
  303. {
  304. int ret;
  305. int err = 0;
  306. int werr = 0;
  307. struct page *page;
  308. struct inode *btree_inode = root->fs_info->btree_inode;
  309. u64 start = 0;
  310. u64 end;
  311. unsigned long index;
  312. while (1) {
  313. ret = find_first_extent_bit(dirty_pages, start, &start, &end,
  314. EXTENT_DIRTY);
  315. if (ret)
  316. break;
  317. while (start <= end) {
  318. cond_resched();
  319. index = start >> PAGE_CACHE_SHIFT;
  320. start = (u64)(index + 1) << PAGE_CACHE_SHIFT;
  321. page = find_get_page(btree_inode->i_mapping, index);
  322. if (!page)
  323. continue;
  324. btree_lock_page_hook(page);
  325. if (!page->mapping) {
  326. unlock_page(page);
  327. page_cache_release(page);
  328. continue;
  329. }
  330. if (PageWriteback(page)) {
  331. if (PageDirty(page))
  332. wait_on_page_writeback(page);
  333. else {
  334. unlock_page(page);
  335. page_cache_release(page);
  336. continue;
  337. }
  338. }
  339. err = write_one_page(page, 0);
  340. if (err)
  341. werr = err;
  342. page_cache_release(page);
  343. }
  344. }
  345. while (1) {
  346. ret = find_first_extent_bit(dirty_pages, 0, &start, &end,
  347. EXTENT_DIRTY);
  348. if (ret)
  349. break;
  350. clear_extent_dirty(dirty_pages, start, end, GFP_NOFS);
  351. while (start <= end) {
  352. index = start >> PAGE_CACHE_SHIFT;
  353. start = (u64)(index + 1) << PAGE_CACHE_SHIFT;
  354. page = find_get_page(btree_inode->i_mapping, index);
  355. if (!page)
  356. continue;
  357. if (PageDirty(page)) {
  358. btree_lock_page_hook(page);
  359. wait_on_page_writeback(page);
  360. err = write_one_page(page, 0);
  361. if (err)
  362. werr = err;
  363. }
  364. wait_on_page_writeback(page);
  365. page_cache_release(page);
  366. cond_resched();
  367. }
  368. }
  369. if (err)
  370. werr = err;
  371. return werr;
  372. }
  373. int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
  374. struct btrfs_root *root)
  375. {
  376. if (!trans || !trans->transaction) {
  377. struct inode *btree_inode;
  378. btree_inode = root->fs_info->btree_inode;
  379. return filemap_write_and_wait(btree_inode->i_mapping);
  380. }
  381. return btrfs_write_and_wait_marked_extents(root,
  382. &trans->transaction->dirty_pages);
  383. }
  384. /*
  385. * this is used to update the root pointer in the tree of tree roots.
  386. *
  387. * But, in the case of the extent allocation tree, updating the root
  388. * pointer may allocate blocks which may change the root of the extent
  389. * allocation tree.
  390. *
  391. * So, this loops and repeats and makes sure the cowonly root didn't
  392. * change while the root pointer was being updated in the metadata.
  393. */
  394. static int update_cowonly_root(struct btrfs_trans_handle *trans,
  395. struct btrfs_root *root)
  396. {
  397. int ret;
  398. u64 old_root_bytenr;
  399. struct btrfs_root *tree_root = root->fs_info->tree_root;
  400. btrfs_write_dirty_block_groups(trans, root);
  401. ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
  402. BUG_ON(ret);
  403. while (1) {
  404. old_root_bytenr = btrfs_root_bytenr(&root->root_item);
  405. if (old_root_bytenr == root->node->start)
  406. break;
  407. btrfs_set_root_node(&root->root_item, root->node);
  408. ret = btrfs_update_root(trans, tree_root,
  409. &root->root_key,
  410. &root->root_item);
  411. BUG_ON(ret);
  412. btrfs_write_dirty_block_groups(trans, root);
  413. ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
  414. BUG_ON(ret);
  415. }
  416. free_extent_buffer(root->commit_root);
  417. root->commit_root = btrfs_root_node(root);
  418. return 0;
  419. }
  420. /*
  421. * update all the cowonly tree roots on disk
  422. */
  423. static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
  424. struct btrfs_root *root)
  425. {
  426. struct btrfs_fs_info *fs_info = root->fs_info;
  427. struct list_head *next;
  428. struct extent_buffer *eb;
  429. int ret;
  430. ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
  431. BUG_ON(ret);
  432. eb = btrfs_lock_root_node(fs_info->tree_root);
  433. btrfs_cow_block(trans, fs_info->tree_root, eb, NULL, 0, &eb);
  434. btrfs_tree_unlock(eb);
  435. free_extent_buffer(eb);
  436. ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
  437. BUG_ON(ret);
  438. while (!list_empty(&fs_info->dirty_cowonly_roots)) {
  439. next = fs_info->dirty_cowonly_roots.next;
  440. list_del_init(next);
  441. root = list_entry(next, struct btrfs_root, dirty_list);
  442. update_cowonly_root(trans, root);
  443. ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
  444. BUG_ON(ret);
  445. }
  446. return 0;
  447. }
  448. /*
  449. * dead roots are old snapshots that need to be deleted. This allocates
  450. * a dirty root struct and adds it into the list of dead roots that need to
  451. * be deleted
  452. */
  453. int btrfs_add_dead_root(struct btrfs_root *root)
  454. {
  455. mutex_lock(&root->fs_info->trans_mutex);
  456. list_add(&root->root_list, &root->fs_info->dead_roots);
  457. mutex_unlock(&root->fs_info->trans_mutex);
  458. return 0;
  459. }
  460. /*
  461. * update all the cowonly tree roots on disk
  462. */
  463. static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
  464. struct btrfs_root *root)
  465. {
  466. struct btrfs_root *gang[8];
  467. struct btrfs_fs_info *fs_info = root->fs_info;
  468. int i;
  469. int ret;
  470. int err = 0;
  471. while (1) {
  472. ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
  473. (void **)gang, 0,
  474. ARRAY_SIZE(gang),
  475. BTRFS_ROOT_TRANS_TAG);
  476. if (ret == 0)
  477. break;
  478. for (i = 0; i < ret; i++) {
  479. root = gang[i];
  480. radix_tree_tag_clear(&fs_info->fs_roots_radix,
  481. (unsigned long)root->root_key.objectid,
  482. BTRFS_ROOT_TRANS_TAG);
  483. btrfs_free_log(trans, root);
  484. btrfs_update_reloc_root(trans, root);
  485. if (root->commit_root != root->node) {
  486. free_extent_buffer(root->commit_root);
  487. root->commit_root = btrfs_root_node(root);
  488. btrfs_set_root_node(&root->root_item,
  489. root->node);
  490. }
  491. err = btrfs_update_root(trans, fs_info->tree_root,
  492. &root->root_key,
  493. &root->root_item);
  494. if (err)
  495. break;
  496. }
  497. }
  498. return err;
  499. }
  500. /*
  501. * defrag a given btree. If cacheonly == 1, this won't read from the disk,
  502. * otherwise every leaf in the btree is read and defragged.
  503. */
  504. int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
  505. {
  506. struct btrfs_fs_info *info = root->fs_info;
  507. int ret;
  508. struct btrfs_trans_handle *trans;
  509. unsigned long nr;
  510. smp_mb();
  511. if (root->defrag_running)
  512. return 0;
  513. trans = btrfs_start_transaction(root, 1);
  514. while (1) {
  515. root->defrag_running = 1;
  516. ret = btrfs_defrag_leaves(trans, root, cacheonly);
  517. nr = trans->blocks_used;
  518. btrfs_end_transaction(trans, root);
  519. btrfs_btree_balance_dirty(info->tree_root, nr);
  520. cond_resched();
  521. trans = btrfs_start_transaction(root, 1);
  522. if (root->fs_info->closing || ret != -EAGAIN)
  523. break;
  524. }
  525. root->defrag_running = 0;
  526. smp_mb();
  527. btrfs_end_transaction(trans, root);
  528. return 0;
  529. }
  530. /*
  531. * when dropping snapshots, we generate a ton of delayed refs, and it makes
  532. * sense not to join the transaction while it is trying to flush the current
  533. * queue of delayed refs out.
  534. *
  535. * This is used by the drop snapshot code only
  536. */
  537. static noinline int wait_transaction_pre_flush(struct btrfs_fs_info *info)
  538. {
  539. DEFINE_WAIT(wait);
  540. mutex_lock(&info->trans_mutex);
  541. while (info->running_transaction &&
  542. info->running_transaction->delayed_refs.flushing) {
  543. prepare_to_wait(&info->transaction_wait, &wait,
  544. TASK_UNINTERRUPTIBLE);
  545. mutex_unlock(&info->trans_mutex);
  546. schedule();
  547. mutex_lock(&info->trans_mutex);
  548. finish_wait(&info->transaction_wait, &wait);
  549. }
  550. mutex_unlock(&info->trans_mutex);
  551. return 0;
  552. }
  553. /*
  554. * Given a list of roots that need to be deleted, call btrfs_drop_snapshot on
  555. * all of them
  556. */
  557. int btrfs_drop_dead_root(struct btrfs_root *root)
  558. {
  559. struct btrfs_trans_handle *trans;
  560. struct btrfs_root *tree_root = root->fs_info->tree_root;
  561. unsigned long nr;
  562. int ret;
  563. while (1) {
  564. /*
  565. * we don't want to jump in and create a bunch of
  566. * delayed refs if the transaction is starting to close
  567. */
  568. wait_transaction_pre_flush(tree_root->fs_info);
  569. trans = btrfs_start_transaction(tree_root, 1);
  570. /*
  571. * we've joined a transaction, make sure it isn't
  572. * closing right now
  573. */
  574. if (trans->transaction->delayed_refs.flushing) {
  575. btrfs_end_transaction(trans, tree_root);
  576. continue;
  577. }
  578. ret = btrfs_drop_snapshot(trans, root);
  579. if (ret != -EAGAIN)
  580. break;
  581. ret = btrfs_update_root(trans, tree_root,
  582. &root->root_key,
  583. &root->root_item);
  584. if (ret)
  585. break;
  586. nr = trans->blocks_used;
  587. ret = btrfs_end_transaction(trans, tree_root);
  588. BUG_ON(ret);
  589. btrfs_btree_balance_dirty(tree_root, nr);
  590. cond_resched();
  591. }
  592. BUG_ON(ret);
  593. ret = btrfs_del_root(trans, tree_root, &root->root_key);
  594. BUG_ON(ret);
  595. nr = trans->blocks_used;
  596. ret = btrfs_end_transaction(trans, tree_root);
  597. BUG_ON(ret);
  598. free_extent_buffer(root->node);
  599. free_extent_buffer(root->commit_root);
  600. kfree(root);
  601. btrfs_btree_balance_dirty(tree_root, nr);
  602. return ret;
  603. }
  604. /*
  605. * new snapshots need to be created at a very specific time in the
  606. * transaction commit. This does the actual creation
  607. */
  608. static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
  609. struct btrfs_fs_info *fs_info,
  610. struct btrfs_pending_snapshot *pending)
  611. {
  612. struct btrfs_key key;
  613. struct btrfs_root_item *new_root_item;
  614. struct btrfs_root *tree_root = fs_info->tree_root;
  615. struct btrfs_root *root = pending->root;
  616. struct extent_buffer *tmp;
  617. struct extent_buffer *old;
  618. int ret;
  619. u64 objectid;
  620. new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS);
  621. if (!new_root_item) {
  622. ret = -ENOMEM;
  623. goto fail;
  624. }
  625. ret = btrfs_find_free_objectid(trans, tree_root, 0, &objectid);
  626. if (ret)
  627. goto fail;
  628. record_root_in_trans(trans, root);
  629. btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
  630. memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
  631. key.objectid = objectid;
  632. key.offset = 0;
  633. btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
  634. old = btrfs_lock_root_node(root);
  635. btrfs_cow_block(trans, root, old, NULL, 0, &old);
  636. btrfs_set_lock_blocking(old);
  637. btrfs_copy_root(trans, root, old, &tmp, objectid);
  638. btrfs_tree_unlock(old);
  639. free_extent_buffer(old);
  640. btrfs_set_root_node(new_root_item, tmp);
  641. ret = btrfs_insert_root(trans, root->fs_info->tree_root, &key,
  642. new_root_item);
  643. btrfs_tree_unlock(tmp);
  644. free_extent_buffer(tmp);
  645. if (ret)
  646. goto fail;
  647. key.offset = (u64)-1;
  648. memcpy(&pending->root_key, &key, sizeof(key));
  649. fail:
  650. kfree(new_root_item);
  651. return ret;
  652. }
  653. static noinline int finish_pending_snapshot(struct btrfs_fs_info *fs_info,
  654. struct btrfs_pending_snapshot *pending)
  655. {
  656. int ret;
  657. int namelen;
  658. u64 index = 0;
  659. struct btrfs_trans_handle *trans;
  660. struct inode *parent_inode;
  661. struct inode *inode;
  662. struct btrfs_root *parent_root;
  663. parent_inode = pending->dentry->d_parent->d_inode;
  664. parent_root = BTRFS_I(parent_inode)->root;
  665. trans = btrfs_join_transaction(parent_root, 1);
  666. /*
  667. * insert the directory item
  668. */
  669. namelen = strlen(pending->name);
  670. ret = btrfs_set_inode_index(parent_inode, &index);
  671. ret = btrfs_insert_dir_item(trans, parent_root,
  672. pending->name, namelen,
  673. parent_inode->i_ino,
  674. &pending->root_key, BTRFS_FT_DIR, index);
  675. if (ret)
  676. goto fail;
  677. btrfs_i_size_write(parent_inode, parent_inode->i_size + namelen * 2);
  678. ret = btrfs_update_inode(trans, parent_root, parent_inode);
  679. BUG_ON(ret);
  680. /* add the backref first */
  681. ret = btrfs_add_root_ref(trans, parent_root->fs_info->tree_root,
  682. pending->root_key.objectid,
  683. BTRFS_ROOT_BACKREF_KEY,
  684. parent_root->root_key.objectid,
  685. parent_inode->i_ino, index, pending->name,
  686. namelen);
  687. BUG_ON(ret);
  688. /* now add the forward ref */
  689. ret = btrfs_add_root_ref(trans, parent_root->fs_info->tree_root,
  690. parent_root->root_key.objectid,
  691. BTRFS_ROOT_REF_KEY,
  692. pending->root_key.objectid,
  693. parent_inode->i_ino, index, pending->name,
  694. namelen);
  695. inode = btrfs_lookup_dentry(parent_inode, pending->dentry);
  696. d_instantiate(pending->dentry, inode);
  697. fail:
  698. btrfs_end_transaction(trans, fs_info->fs_root);
  699. return ret;
  700. }
  701. /*
  702. * create all the snapshots we've scheduled for creation
  703. */
  704. static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
  705. struct btrfs_fs_info *fs_info)
  706. {
  707. struct btrfs_pending_snapshot *pending;
  708. struct list_head *head = &trans->transaction->pending_snapshots;
  709. int ret;
  710. list_for_each_entry(pending, head, list) {
  711. ret = create_pending_snapshot(trans, fs_info, pending);
  712. BUG_ON(ret);
  713. }
  714. return 0;
  715. }
  716. static noinline int finish_pending_snapshots(struct btrfs_trans_handle *trans,
  717. struct btrfs_fs_info *fs_info)
  718. {
  719. struct btrfs_pending_snapshot *pending;
  720. struct list_head *head = &trans->transaction->pending_snapshots;
  721. int ret;
  722. while (!list_empty(head)) {
  723. pending = list_entry(head->next,
  724. struct btrfs_pending_snapshot, list);
  725. ret = finish_pending_snapshot(fs_info, pending);
  726. BUG_ON(ret);
  727. list_del(&pending->list);
  728. kfree(pending->name);
  729. kfree(pending);
  730. }
  731. return 0;
  732. }
  733. static void update_super_roots(struct btrfs_root *root)
  734. {
  735. struct btrfs_root_item *root_item;
  736. struct btrfs_super_block *super;
  737. super = &root->fs_info->super_copy;
  738. root_item = &root->fs_info->chunk_root->root_item;
  739. super->chunk_root = root_item->bytenr;
  740. super->chunk_root_generation = root_item->generation;
  741. super->chunk_root_level = root_item->level;
  742. root_item = &root->fs_info->tree_root->root_item;
  743. super->root = root_item->bytenr;
  744. super->generation = root_item->generation;
  745. super->root_level = root_item->level;
  746. }
  747. int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
  748. struct btrfs_root *root)
  749. {
  750. unsigned long joined = 0;
  751. unsigned long timeout = 1;
  752. struct btrfs_transaction *cur_trans;
  753. struct btrfs_transaction *prev_trans = NULL;
  754. struct extent_io_tree *pinned_copy;
  755. DEFINE_WAIT(wait);
  756. int ret;
  757. int should_grow = 0;
  758. unsigned long now = get_seconds();
  759. int flush_on_commit = btrfs_test_opt(root, FLUSHONCOMMIT);
  760. btrfs_run_ordered_operations(root, 0);
  761. /* make a pass through all the delayed refs we have so far
  762. * any runnings procs may add more while we are here
  763. */
  764. ret = btrfs_run_delayed_refs(trans, root, 0);
  765. BUG_ON(ret);
  766. cur_trans = trans->transaction;
  767. /*
  768. * set the flushing flag so procs in this transaction have to
  769. * start sending their work down.
  770. */
  771. cur_trans->delayed_refs.flushing = 1;
  772. ret = btrfs_run_delayed_refs(trans, root, 0);
  773. BUG_ON(ret);
  774. mutex_lock(&root->fs_info->trans_mutex);
  775. if (cur_trans->in_commit) {
  776. cur_trans->use_count++;
  777. mutex_unlock(&root->fs_info->trans_mutex);
  778. btrfs_end_transaction(trans, root);
  779. ret = wait_for_commit(root, cur_trans);
  780. BUG_ON(ret);
  781. mutex_lock(&root->fs_info->trans_mutex);
  782. put_transaction(cur_trans);
  783. mutex_unlock(&root->fs_info->trans_mutex);
  784. return 0;
  785. }
  786. pinned_copy = kmalloc(sizeof(*pinned_copy), GFP_NOFS);
  787. if (!pinned_copy)
  788. return -ENOMEM;
  789. extent_io_tree_init(pinned_copy,
  790. root->fs_info->btree_inode->i_mapping, GFP_NOFS);
  791. trans->transaction->in_commit = 1;
  792. trans->transaction->blocked = 1;
  793. if (cur_trans->list.prev != &root->fs_info->trans_list) {
  794. prev_trans = list_entry(cur_trans->list.prev,
  795. struct btrfs_transaction, list);
  796. if (!prev_trans->commit_done) {
  797. prev_trans->use_count++;
  798. mutex_unlock(&root->fs_info->trans_mutex);
  799. wait_for_commit(root, prev_trans);
  800. mutex_lock(&root->fs_info->trans_mutex);
  801. put_transaction(prev_trans);
  802. }
  803. }
  804. if (now < cur_trans->start_time || now - cur_trans->start_time < 1)
  805. should_grow = 1;
  806. do {
  807. int snap_pending = 0;
  808. joined = cur_trans->num_joined;
  809. if (!list_empty(&trans->transaction->pending_snapshots))
  810. snap_pending = 1;
  811. WARN_ON(cur_trans != trans->transaction);
  812. prepare_to_wait(&cur_trans->writer_wait, &wait,
  813. TASK_UNINTERRUPTIBLE);
  814. if (cur_trans->num_writers > 1)
  815. timeout = MAX_SCHEDULE_TIMEOUT;
  816. else if (should_grow)
  817. timeout = 1;
  818. mutex_unlock(&root->fs_info->trans_mutex);
  819. if (flush_on_commit || snap_pending) {
  820. if (flush_on_commit)
  821. btrfs_start_delalloc_inodes(root);
  822. ret = btrfs_wait_ordered_extents(root, 1);
  823. BUG_ON(ret);
  824. }
  825. /*
  826. * rename don't use btrfs_join_transaction, so, once we
  827. * set the transaction to blocked above, we aren't going
  828. * to get any new ordered operations. We can safely run
  829. * it here and no for sure that nothing new will be added
  830. * to the list
  831. */
  832. btrfs_run_ordered_operations(root, 1);
  833. smp_mb();
  834. if (cur_trans->num_writers > 1 || should_grow)
  835. schedule_timeout(timeout);
  836. mutex_lock(&root->fs_info->trans_mutex);
  837. finish_wait(&cur_trans->writer_wait, &wait);
  838. } while (cur_trans->num_writers > 1 ||
  839. (should_grow && cur_trans->num_joined != joined));
  840. ret = create_pending_snapshots(trans, root->fs_info);
  841. BUG_ON(ret);
  842. ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
  843. BUG_ON(ret);
  844. WARN_ON(cur_trans != trans->transaction);
  845. /* btrfs_commit_tree_roots is responsible for getting the
  846. * various roots consistent with each other. Every pointer
  847. * in the tree of tree roots has to point to the most up to date
  848. * root for every subvolume and other tree. So, we have to keep
  849. * the tree logging code from jumping in and changing any
  850. * of the trees.
  851. *
  852. * At this point in the commit, there can't be any tree-log
  853. * writers, but a little lower down we drop the trans mutex
  854. * and let new people in. By holding the tree_log_mutex
  855. * from now until after the super is written, we avoid races
  856. * with the tree-log code.
  857. */
  858. mutex_lock(&root->fs_info->tree_log_mutex);
  859. ret = commit_fs_roots(trans, root);
  860. BUG_ON(ret);
  861. /* commit_fs_roots gets rid of all the tree log roots, it is now
  862. * safe to free the root of tree log roots
  863. */
  864. btrfs_free_log_root_tree(trans, root->fs_info);
  865. ret = commit_cowonly_roots(trans, root);
  866. BUG_ON(ret);
  867. cur_trans = root->fs_info->running_transaction;
  868. spin_lock(&root->fs_info->new_trans_lock);
  869. root->fs_info->running_transaction = NULL;
  870. spin_unlock(&root->fs_info->new_trans_lock);
  871. btrfs_set_root_node(&root->fs_info->tree_root->root_item,
  872. root->fs_info->tree_root->node);
  873. free_extent_buffer(root->fs_info->tree_root->commit_root);
  874. root->fs_info->tree_root->commit_root =
  875. btrfs_root_node(root->fs_info->tree_root);
  876. btrfs_set_root_node(&root->fs_info->chunk_root->root_item,
  877. root->fs_info->chunk_root->node);
  878. free_extent_buffer(root->fs_info->chunk_root->commit_root);
  879. root->fs_info->chunk_root->commit_root =
  880. btrfs_root_node(root->fs_info->chunk_root);
  881. update_super_roots(root);
  882. if (!root->fs_info->log_root_recovering) {
  883. btrfs_set_super_log_root(&root->fs_info->super_copy, 0);
  884. btrfs_set_super_log_root_level(&root->fs_info->super_copy, 0);
  885. }
  886. memcpy(&root->fs_info->super_for_commit, &root->fs_info->super_copy,
  887. sizeof(root->fs_info->super_copy));
  888. btrfs_copy_pinned(root, pinned_copy);
  889. trans->transaction->blocked = 0;
  890. wake_up(&root->fs_info->transaction_wait);
  891. mutex_unlock(&root->fs_info->trans_mutex);
  892. ret = btrfs_write_and_wait_transaction(trans, root);
  893. BUG_ON(ret);
  894. write_ctree_super(trans, root, 0);
  895. /*
  896. * the super is written, we can safely allow the tree-loggers
  897. * to go about their business
  898. */
  899. mutex_unlock(&root->fs_info->tree_log_mutex);
  900. btrfs_finish_extent_commit(trans, root, pinned_copy);
  901. kfree(pinned_copy);
  902. /* do the directory inserts of any pending snapshot creations */
  903. finish_pending_snapshots(trans, root->fs_info);
  904. mutex_lock(&root->fs_info->trans_mutex);
  905. cur_trans->commit_done = 1;
  906. root->fs_info->last_trans_committed = cur_trans->transid;
  907. wake_up(&cur_trans->commit_wait);
  908. put_transaction(cur_trans);
  909. put_transaction(cur_trans);
  910. mutex_unlock(&root->fs_info->trans_mutex);
  911. kmem_cache_free(btrfs_trans_handle_cachep, trans);
  912. return ret;
  913. }
  914. /*
  915. * interface function to delete all the snapshots we have scheduled for deletion
  916. */
  917. int btrfs_clean_old_snapshots(struct btrfs_root *root)
  918. {
  919. LIST_HEAD(list);
  920. struct btrfs_fs_info *fs_info = root->fs_info;
  921. mutex_lock(&fs_info->trans_mutex);
  922. list_splice_init(&fs_info->dead_roots, &list);
  923. mutex_unlock(&fs_info->trans_mutex);
  924. while (!list_empty(&list)) {
  925. root = list_entry(list.next, struct btrfs_root, root_list);
  926. list_del_init(&root->root_list);
  927. btrfs_drop_dead_root(root);
  928. }
  929. return 0;
  930. }