transaction.c 36 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420
  1. /*
  2. * Copyright (C) 2007 Oracle. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/fs.h>
  19. #include <linux/slab.h>
  20. #include <linux/sched.h>
  21. #include <linux/writeback.h>
  22. #include <linux/pagemap.h>
  23. #include <linux/blkdev.h>
  24. #include "ctree.h"
  25. #include "disk-io.h"
  26. #include "transaction.h"
  27. #include "locking.h"
  28. #include "tree-log.h"
  29. #define BTRFS_ROOT_TRANS_TAG 0
  30. static noinline void put_transaction(struct btrfs_transaction *transaction)
  31. {
  32. WARN_ON(transaction->use_count == 0);
  33. transaction->use_count--;
  34. if (transaction->use_count == 0) {
  35. list_del_init(&transaction->list);
  36. memset(transaction, 0, sizeof(*transaction));
  37. kmem_cache_free(btrfs_transaction_cachep, transaction);
  38. }
  39. }
  40. static noinline void switch_commit_root(struct btrfs_root *root)
  41. {
  42. free_extent_buffer(root->commit_root);
  43. root->commit_root = btrfs_root_node(root);
  44. }
  45. /*
  46. * either allocate a new transaction or hop into the existing one
  47. */
  48. static noinline int join_transaction(struct btrfs_root *root)
  49. {
  50. struct btrfs_transaction *cur_trans;
  51. cur_trans = root->fs_info->running_transaction;
  52. if (!cur_trans) {
  53. cur_trans = kmem_cache_alloc(btrfs_transaction_cachep,
  54. GFP_NOFS);
  55. BUG_ON(!cur_trans);
  56. root->fs_info->generation++;
  57. cur_trans->num_writers = 1;
  58. cur_trans->num_joined = 0;
  59. cur_trans->transid = root->fs_info->generation;
  60. init_waitqueue_head(&cur_trans->writer_wait);
  61. init_waitqueue_head(&cur_trans->commit_wait);
  62. cur_trans->in_commit = 0;
  63. cur_trans->blocked = 0;
  64. cur_trans->use_count = 1;
  65. cur_trans->commit_done = 0;
  66. cur_trans->start_time = get_seconds();
  67. cur_trans->delayed_refs.root = RB_ROOT;
  68. cur_trans->delayed_refs.num_entries = 0;
  69. cur_trans->delayed_refs.num_heads_ready = 0;
  70. cur_trans->delayed_refs.num_heads = 0;
  71. cur_trans->delayed_refs.flushing = 0;
  72. cur_trans->delayed_refs.run_delayed_start = 0;
  73. spin_lock_init(&cur_trans->delayed_refs.lock);
  74. INIT_LIST_HEAD(&cur_trans->pending_snapshots);
  75. list_add_tail(&cur_trans->list, &root->fs_info->trans_list);
  76. extent_io_tree_init(&cur_trans->dirty_pages,
  77. root->fs_info->btree_inode->i_mapping,
  78. GFP_NOFS);
  79. spin_lock(&root->fs_info->new_trans_lock);
  80. root->fs_info->running_transaction = cur_trans;
  81. spin_unlock(&root->fs_info->new_trans_lock);
  82. } else {
  83. cur_trans->num_writers++;
  84. cur_trans->num_joined++;
  85. }
  86. return 0;
  87. }
  88. /*
  89. * this does all the record keeping required to make sure that a reference
  90. * counted root is properly recorded in a given transaction. This is required
  91. * to make sure the old root from before we joined the transaction is deleted
  92. * when the transaction commits
  93. */
  94. static noinline int record_root_in_trans(struct btrfs_trans_handle *trans,
  95. struct btrfs_root *root)
  96. {
  97. if (root->ref_cows && root->last_trans < trans->transid) {
  98. WARN_ON(root == root->fs_info->extent_root);
  99. WARN_ON(root->commit_root != root->node);
  100. radix_tree_tag_set(&root->fs_info->fs_roots_radix,
  101. (unsigned long)root->root_key.objectid,
  102. BTRFS_ROOT_TRANS_TAG);
  103. root->last_trans = trans->transid;
  104. btrfs_init_reloc_root(trans, root);
  105. }
  106. return 0;
  107. }
  108. int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
  109. struct btrfs_root *root)
  110. {
  111. if (!root->ref_cows)
  112. return 0;
  113. mutex_lock(&root->fs_info->trans_mutex);
  114. if (root->last_trans == trans->transid) {
  115. mutex_unlock(&root->fs_info->trans_mutex);
  116. return 0;
  117. }
  118. record_root_in_trans(trans, root);
  119. mutex_unlock(&root->fs_info->trans_mutex);
  120. return 0;
  121. }
  122. /* wait for commit against the current transaction to become unblocked
  123. * when this is done, it is safe to start a new transaction, but the current
  124. * transaction might not be fully on disk.
  125. */
  126. static void wait_current_trans(struct btrfs_root *root)
  127. {
  128. struct btrfs_transaction *cur_trans;
  129. cur_trans = root->fs_info->running_transaction;
  130. if (cur_trans && cur_trans->blocked) {
  131. DEFINE_WAIT(wait);
  132. cur_trans->use_count++;
  133. while (1) {
  134. prepare_to_wait(&root->fs_info->transaction_wait, &wait,
  135. TASK_UNINTERRUPTIBLE);
  136. if (!cur_trans->blocked)
  137. break;
  138. mutex_unlock(&root->fs_info->trans_mutex);
  139. schedule();
  140. mutex_lock(&root->fs_info->trans_mutex);
  141. }
  142. finish_wait(&root->fs_info->transaction_wait, &wait);
  143. put_transaction(cur_trans);
  144. }
  145. }
  146. enum btrfs_trans_type {
  147. TRANS_START,
  148. TRANS_JOIN,
  149. TRANS_USERSPACE,
  150. TRANS_JOIN_NOLOCK,
  151. };
  152. static int may_wait_transaction(struct btrfs_root *root, int type)
  153. {
  154. if (!root->fs_info->log_root_recovering &&
  155. ((type == TRANS_START && !root->fs_info->open_ioctl_trans) ||
  156. type == TRANS_USERSPACE))
  157. return 1;
  158. return 0;
  159. }
  160. static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
  161. u64 num_items, int type)
  162. {
  163. struct btrfs_trans_handle *h;
  164. struct btrfs_transaction *cur_trans;
  165. int ret;
  166. again:
  167. h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
  168. if (!h)
  169. return ERR_PTR(-ENOMEM);
  170. if (type != TRANS_JOIN_NOLOCK)
  171. mutex_lock(&root->fs_info->trans_mutex);
  172. if (may_wait_transaction(root, type))
  173. wait_current_trans(root);
  174. ret = join_transaction(root);
  175. BUG_ON(ret);
  176. cur_trans = root->fs_info->running_transaction;
  177. cur_trans->use_count++;
  178. if (type != TRANS_JOIN_NOLOCK)
  179. mutex_unlock(&root->fs_info->trans_mutex);
  180. h->transid = cur_trans->transid;
  181. h->transaction = cur_trans;
  182. h->blocks_used = 0;
  183. h->block_group = 0;
  184. h->bytes_reserved = 0;
  185. h->delayed_ref_updates = 0;
  186. h->block_rsv = NULL;
  187. smp_mb();
  188. if (cur_trans->blocked && may_wait_transaction(root, type)) {
  189. btrfs_commit_transaction(h, root);
  190. goto again;
  191. }
  192. if (num_items > 0) {
  193. ret = btrfs_trans_reserve_metadata(h, root, num_items);
  194. if (ret == -EAGAIN) {
  195. btrfs_commit_transaction(h, root);
  196. goto again;
  197. }
  198. if (ret < 0) {
  199. btrfs_end_transaction(h, root);
  200. return ERR_PTR(ret);
  201. }
  202. }
  203. if (type != TRANS_JOIN_NOLOCK)
  204. mutex_lock(&root->fs_info->trans_mutex);
  205. record_root_in_trans(h, root);
  206. if (type != TRANS_JOIN_NOLOCK)
  207. mutex_unlock(&root->fs_info->trans_mutex);
  208. if (!current->journal_info && type != TRANS_USERSPACE)
  209. current->journal_info = h;
  210. return h;
  211. }
  212. struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
  213. int num_items)
  214. {
  215. return start_transaction(root, num_items, TRANS_START);
  216. }
  217. struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root,
  218. int num_blocks)
  219. {
  220. return start_transaction(root, 0, TRANS_JOIN);
  221. }
  222. struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root,
  223. int num_blocks)
  224. {
  225. return start_transaction(root, 0, TRANS_JOIN_NOLOCK);
  226. }
  227. struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *r,
  228. int num_blocks)
  229. {
  230. return start_transaction(r, 0, TRANS_USERSPACE);
  231. }
  232. /* wait for a transaction commit to be fully complete */
  233. static noinline int wait_for_commit(struct btrfs_root *root,
  234. struct btrfs_transaction *commit)
  235. {
  236. DEFINE_WAIT(wait);
  237. mutex_lock(&root->fs_info->trans_mutex);
  238. while (!commit->commit_done) {
  239. prepare_to_wait(&commit->commit_wait, &wait,
  240. TASK_UNINTERRUPTIBLE);
  241. if (commit->commit_done)
  242. break;
  243. mutex_unlock(&root->fs_info->trans_mutex);
  244. schedule();
  245. mutex_lock(&root->fs_info->trans_mutex);
  246. }
  247. mutex_unlock(&root->fs_info->trans_mutex);
  248. finish_wait(&commit->commit_wait, &wait);
  249. return 0;
  250. }
  251. int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
  252. {
  253. struct btrfs_transaction *cur_trans = NULL, *t;
  254. int ret;
  255. mutex_lock(&root->fs_info->trans_mutex);
  256. ret = 0;
  257. if (transid) {
  258. if (transid <= root->fs_info->last_trans_committed)
  259. goto out_unlock;
  260. /* find specified transaction */
  261. list_for_each_entry(t, &root->fs_info->trans_list, list) {
  262. if (t->transid == transid) {
  263. cur_trans = t;
  264. break;
  265. }
  266. if (t->transid > transid)
  267. break;
  268. }
  269. ret = -EINVAL;
  270. if (!cur_trans)
  271. goto out_unlock; /* bad transid */
  272. } else {
  273. /* find newest transaction that is committing | committed */
  274. list_for_each_entry_reverse(t, &root->fs_info->trans_list,
  275. list) {
  276. if (t->in_commit) {
  277. if (t->commit_done)
  278. goto out_unlock;
  279. cur_trans = t;
  280. break;
  281. }
  282. }
  283. if (!cur_trans)
  284. goto out_unlock; /* nothing committing|committed */
  285. }
  286. cur_trans->use_count++;
  287. mutex_unlock(&root->fs_info->trans_mutex);
  288. wait_for_commit(root, cur_trans);
  289. mutex_lock(&root->fs_info->trans_mutex);
  290. put_transaction(cur_trans);
  291. ret = 0;
  292. out_unlock:
  293. mutex_unlock(&root->fs_info->trans_mutex);
  294. return ret;
  295. }
  296. #if 0
  297. /*
  298. * rate limit against the drop_snapshot code. This helps to slow down new
  299. * operations if the drop_snapshot code isn't able to keep up.
  300. */
  301. static void throttle_on_drops(struct btrfs_root *root)
  302. {
  303. struct btrfs_fs_info *info = root->fs_info;
  304. int harder_count = 0;
  305. harder:
  306. if (atomic_read(&info->throttles)) {
  307. DEFINE_WAIT(wait);
  308. int thr;
  309. thr = atomic_read(&info->throttle_gen);
  310. do {
  311. prepare_to_wait(&info->transaction_throttle,
  312. &wait, TASK_UNINTERRUPTIBLE);
  313. if (!atomic_read(&info->throttles)) {
  314. finish_wait(&info->transaction_throttle, &wait);
  315. break;
  316. }
  317. schedule();
  318. finish_wait(&info->transaction_throttle, &wait);
  319. } while (thr == atomic_read(&info->throttle_gen));
  320. harder_count++;
  321. if (root->fs_info->total_ref_cache_size > 1 * 1024 * 1024 &&
  322. harder_count < 2)
  323. goto harder;
  324. if (root->fs_info->total_ref_cache_size > 5 * 1024 * 1024 &&
  325. harder_count < 10)
  326. goto harder;
  327. if (root->fs_info->total_ref_cache_size > 10 * 1024 * 1024 &&
  328. harder_count < 20)
  329. goto harder;
  330. }
  331. }
  332. #endif
  333. void btrfs_throttle(struct btrfs_root *root)
  334. {
  335. mutex_lock(&root->fs_info->trans_mutex);
  336. if (!root->fs_info->open_ioctl_trans)
  337. wait_current_trans(root);
  338. mutex_unlock(&root->fs_info->trans_mutex);
  339. }
  340. static int should_end_transaction(struct btrfs_trans_handle *trans,
  341. struct btrfs_root *root)
  342. {
  343. int ret;
  344. ret = btrfs_block_rsv_check(trans, root,
  345. &root->fs_info->global_block_rsv, 0, 5);
  346. return ret ? 1 : 0;
  347. }
  348. int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
  349. struct btrfs_root *root)
  350. {
  351. struct btrfs_transaction *cur_trans = trans->transaction;
  352. int updates;
  353. if (cur_trans->blocked || cur_trans->delayed_refs.flushing)
  354. return 1;
  355. updates = trans->delayed_ref_updates;
  356. trans->delayed_ref_updates = 0;
  357. if (updates)
  358. btrfs_run_delayed_refs(trans, root, updates);
  359. return should_end_transaction(trans, root);
  360. }
  361. static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
  362. struct btrfs_root *root, int throttle, int lock)
  363. {
  364. struct btrfs_transaction *cur_trans = trans->transaction;
  365. struct btrfs_fs_info *info = root->fs_info;
  366. int count = 0;
  367. while (count < 4) {
  368. unsigned long cur = trans->delayed_ref_updates;
  369. trans->delayed_ref_updates = 0;
  370. if (cur &&
  371. trans->transaction->delayed_refs.num_heads_ready > 64) {
  372. trans->delayed_ref_updates = 0;
  373. /*
  374. * do a full flush if the transaction is trying
  375. * to close
  376. */
  377. if (trans->transaction->delayed_refs.flushing)
  378. cur = 0;
  379. btrfs_run_delayed_refs(trans, root, cur);
  380. } else {
  381. break;
  382. }
  383. count++;
  384. }
  385. btrfs_trans_release_metadata(trans, root);
  386. if (lock && !root->fs_info->open_ioctl_trans &&
  387. should_end_transaction(trans, root))
  388. trans->transaction->blocked = 1;
  389. if (lock && cur_trans->blocked && !cur_trans->in_commit) {
  390. if (throttle)
  391. return btrfs_commit_transaction(trans, root);
  392. else
  393. wake_up_process(info->transaction_kthread);
  394. }
  395. if (lock)
  396. mutex_lock(&info->trans_mutex);
  397. WARN_ON(cur_trans != info->running_transaction);
  398. WARN_ON(cur_trans->num_writers < 1);
  399. cur_trans->num_writers--;
  400. smp_mb();
  401. if (waitqueue_active(&cur_trans->writer_wait))
  402. wake_up(&cur_trans->writer_wait);
  403. put_transaction(cur_trans);
  404. if (lock)
  405. mutex_unlock(&info->trans_mutex);
  406. if (current->journal_info == trans)
  407. current->journal_info = NULL;
  408. memset(trans, 0, sizeof(*trans));
  409. kmem_cache_free(btrfs_trans_handle_cachep, trans);
  410. if (throttle)
  411. btrfs_run_delayed_iputs(root);
  412. return 0;
  413. }
  414. int btrfs_end_transaction(struct btrfs_trans_handle *trans,
  415. struct btrfs_root *root)
  416. {
  417. return __btrfs_end_transaction(trans, root, 0, 1);
  418. }
  419. int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
  420. struct btrfs_root *root)
  421. {
  422. return __btrfs_end_transaction(trans, root, 1, 1);
  423. }
  424. int btrfs_end_transaction_nolock(struct btrfs_trans_handle *trans,
  425. struct btrfs_root *root)
  426. {
  427. return __btrfs_end_transaction(trans, root, 0, 0);
  428. }
  429. /*
  430. * when btree blocks are allocated, they have some corresponding bits set for
  431. * them in one of two extent_io trees. This is used to make sure all of
  432. * those extents are sent to disk but does not wait on them
  433. */
  434. int btrfs_write_marked_extents(struct btrfs_root *root,
  435. struct extent_io_tree *dirty_pages, int mark)
  436. {
  437. int ret;
  438. int err = 0;
  439. int werr = 0;
  440. struct page *page;
  441. struct inode *btree_inode = root->fs_info->btree_inode;
  442. u64 start = 0;
  443. u64 end;
  444. unsigned long index;
  445. while (1) {
  446. ret = find_first_extent_bit(dirty_pages, start, &start, &end,
  447. mark);
  448. if (ret)
  449. break;
  450. while (start <= end) {
  451. cond_resched();
  452. index = start >> PAGE_CACHE_SHIFT;
  453. start = (u64)(index + 1) << PAGE_CACHE_SHIFT;
  454. page = find_get_page(btree_inode->i_mapping, index);
  455. if (!page)
  456. continue;
  457. btree_lock_page_hook(page);
  458. if (!page->mapping) {
  459. unlock_page(page);
  460. page_cache_release(page);
  461. continue;
  462. }
  463. if (PageWriteback(page)) {
  464. if (PageDirty(page))
  465. wait_on_page_writeback(page);
  466. else {
  467. unlock_page(page);
  468. page_cache_release(page);
  469. continue;
  470. }
  471. }
  472. err = write_one_page(page, 0);
  473. if (err)
  474. werr = err;
  475. page_cache_release(page);
  476. }
  477. }
  478. if (err)
  479. werr = err;
  480. return werr;
  481. }
  482. /*
  483. * when btree blocks are allocated, they have some corresponding bits set for
  484. * them in one of two extent_io trees. This is used to make sure all of
  485. * those extents are on disk for transaction or log commit. We wait
  486. * on all the pages and clear them from the dirty pages state tree
  487. */
  488. int btrfs_wait_marked_extents(struct btrfs_root *root,
  489. struct extent_io_tree *dirty_pages, int mark)
  490. {
  491. int ret;
  492. int err = 0;
  493. int werr = 0;
  494. struct page *page;
  495. struct inode *btree_inode = root->fs_info->btree_inode;
  496. u64 start = 0;
  497. u64 end;
  498. unsigned long index;
  499. while (1) {
  500. ret = find_first_extent_bit(dirty_pages, start, &start, &end,
  501. mark);
  502. if (ret)
  503. break;
  504. clear_extent_bits(dirty_pages, start, end, mark, GFP_NOFS);
  505. while (start <= end) {
  506. index = start >> PAGE_CACHE_SHIFT;
  507. start = (u64)(index + 1) << PAGE_CACHE_SHIFT;
  508. page = find_get_page(btree_inode->i_mapping, index);
  509. if (!page)
  510. continue;
  511. if (PageDirty(page)) {
  512. btree_lock_page_hook(page);
  513. wait_on_page_writeback(page);
  514. err = write_one_page(page, 0);
  515. if (err)
  516. werr = err;
  517. }
  518. wait_on_page_writeback(page);
  519. page_cache_release(page);
  520. cond_resched();
  521. }
  522. }
  523. if (err)
  524. werr = err;
  525. return werr;
  526. }
  527. /*
  528. * when btree blocks are allocated, they have some corresponding bits set for
  529. * them in one of two extent_io trees. This is used to make sure all of
  530. * those extents are on disk for transaction or log commit
  531. */
  532. int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
  533. struct extent_io_tree *dirty_pages, int mark)
  534. {
  535. int ret;
  536. int ret2;
  537. ret = btrfs_write_marked_extents(root, dirty_pages, mark);
  538. ret2 = btrfs_wait_marked_extents(root, dirty_pages, mark);
  539. return ret || ret2;
  540. }
  541. int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
  542. struct btrfs_root *root)
  543. {
  544. if (!trans || !trans->transaction) {
  545. struct inode *btree_inode;
  546. btree_inode = root->fs_info->btree_inode;
  547. return filemap_write_and_wait(btree_inode->i_mapping);
  548. }
  549. return btrfs_write_and_wait_marked_extents(root,
  550. &trans->transaction->dirty_pages,
  551. EXTENT_DIRTY);
  552. }
  553. /*
  554. * this is used to update the root pointer in the tree of tree roots.
  555. *
  556. * But, in the case of the extent allocation tree, updating the root
  557. * pointer may allocate blocks which may change the root of the extent
  558. * allocation tree.
  559. *
  560. * So, this loops and repeats and makes sure the cowonly root didn't
  561. * change while the root pointer was being updated in the metadata.
  562. */
  563. static int update_cowonly_root(struct btrfs_trans_handle *trans,
  564. struct btrfs_root *root)
  565. {
  566. int ret;
  567. u64 old_root_bytenr;
  568. u64 old_root_used;
  569. struct btrfs_root *tree_root = root->fs_info->tree_root;
  570. old_root_used = btrfs_root_used(&root->root_item);
  571. btrfs_write_dirty_block_groups(trans, root);
  572. while (1) {
  573. old_root_bytenr = btrfs_root_bytenr(&root->root_item);
  574. if (old_root_bytenr == root->node->start &&
  575. old_root_used == btrfs_root_used(&root->root_item))
  576. break;
  577. btrfs_set_root_node(&root->root_item, root->node);
  578. ret = btrfs_update_root(trans, tree_root,
  579. &root->root_key,
  580. &root->root_item);
  581. BUG_ON(ret);
  582. old_root_used = btrfs_root_used(&root->root_item);
  583. ret = btrfs_write_dirty_block_groups(trans, root);
  584. BUG_ON(ret);
  585. }
  586. if (root != root->fs_info->extent_root)
  587. switch_commit_root(root);
  588. return 0;
  589. }
  590. /*
  591. * update all the cowonly tree roots on disk
  592. */
  593. static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
  594. struct btrfs_root *root)
  595. {
  596. struct btrfs_fs_info *fs_info = root->fs_info;
  597. struct list_head *next;
  598. struct extent_buffer *eb;
  599. int ret;
  600. ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
  601. BUG_ON(ret);
  602. eb = btrfs_lock_root_node(fs_info->tree_root);
  603. btrfs_cow_block(trans, fs_info->tree_root, eb, NULL, 0, &eb);
  604. btrfs_tree_unlock(eb);
  605. free_extent_buffer(eb);
  606. ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
  607. BUG_ON(ret);
  608. while (!list_empty(&fs_info->dirty_cowonly_roots)) {
  609. next = fs_info->dirty_cowonly_roots.next;
  610. list_del_init(next);
  611. root = list_entry(next, struct btrfs_root, dirty_list);
  612. update_cowonly_root(trans, root);
  613. }
  614. down_write(&fs_info->extent_commit_sem);
  615. switch_commit_root(fs_info->extent_root);
  616. up_write(&fs_info->extent_commit_sem);
  617. return 0;
  618. }
  619. /*
  620. * dead roots are old snapshots that need to be deleted. This allocates
  621. * a dirty root struct and adds it into the list of dead roots that need to
  622. * be deleted
  623. */
  624. int btrfs_add_dead_root(struct btrfs_root *root)
  625. {
  626. mutex_lock(&root->fs_info->trans_mutex);
  627. list_add(&root->root_list, &root->fs_info->dead_roots);
  628. mutex_unlock(&root->fs_info->trans_mutex);
  629. return 0;
  630. }
  631. /*
  632. * update all the cowonly tree roots on disk
  633. */
  634. static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
  635. struct btrfs_root *root)
  636. {
  637. struct btrfs_root *gang[8];
  638. struct btrfs_fs_info *fs_info = root->fs_info;
  639. int i;
  640. int ret;
  641. int err = 0;
  642. while (1) {
  643. ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
  644. (void **)gang, 0,
  645. ARRAY_SIZE(gang),
  646. BTRFS_ROOT_TRANS_TAG);
  647. if (ret == 0)
  648. break;
  649. for (i = 0; i < ret; i++) {
  650. root = gang[i];
  651. radix_tree_tag_clear(&fs_info->fs_roots_radix,
  652. (unsigned long)root->root_key.objectid,
  653. BTRFS_ROOT_TRANS_TAG);
  654. btrfs_free_log(trans, root);
  655. btrfs_update_reloc_root(trans, root);
  656. btrfs_orphan_commit_root(trans, root);
  657. if (root->commit_root != root->node) {
  658. switch_commit_root(root);
  659. btrfs_set_root_node(&root->root_item,
  660. root->node);
  661. }
  662. err = btrfs_update_root(trans, fs_info->tree_root,
  663. &root->root_key,
  664. &root->root_item);
  665. if (err)
  666. break;
  667. }
  668. }
  669. return err;
  670. }
  671. /*
  672. * defrag a given btree. If cacheonly == 1, this won't read from the disk,
  673. * otherwise every leaf in the btree is read and defragged.
  674. */
  675. int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
  676. {
  677. struct btrfs_fs_info *info = root->fs_info;
  678. struct btrfs_trans_handle *trans;
  679. int ret;
  680. unsigned long nr;
  681. if (xchg(&root->defrag_running, 1))
  682. return 0;
  683. while (1) {
  684. trans = btrfs_start_transaction(root, 0);
  685. if (IS_ERR(trans))
  686. return PTR_ERR(trans);
  687. ret = btrfs_defrag_leaves(trans, root, cacheonly);
  688. nr = trans->blocks_used;
  689. btrfs_end_transaction(trans, root);
  690. btrfs_btree_balance_dirty(info->tree_root, nr);
  691. cond_resched();
  692. if (root->fs_info->closing || ret != -EAGAIN)
  693. break;
  694. }
  695. root->defrag_running = 0;
  696. return ret;
  697. }
  698. #if 0
  699. /*
  700. * when dropping snapshots, we generate a ton of delayed refs, and it makes
  701. * sense not to join the transaction while it is trying to flush the current
  702. * queue of delayed refs out.
  703. *
  704. * This is used by the drop snapshot code only
  705. */
  706. static noinline int wait_transaction_pre_flush(struct btrfs_fs_info *info)
  707. {
  708. DEFINE_WAIT(wait);
  709. mutex_lock(&info->trans_mutex);
  710. while (info->running_transaction &&
  711. info->running_transaction->delayed_refs.flushing) {
  712. prepare_to_wait(&info->transaction_wait, &wait,
  713. TASK_UNINTERRUPTIBLE);
  714. mutex_unlock(&info->trans_mutex);
  715. schedule();
  716. mutex_lock(&info->trans_mutex);
  717. finish_wait(&info->transaction_wait, &wait);
  718. }
  719. mutex_unlock(&info->trans_mutex);
  720. return 0;
  721. }
  722. /*
  723. * Given a list of roots that need to be deleted, call btrfs_drop_snapshot on
  724. * all of them
  725. */
  726. int btrfs_drop_dead_root(struct btrfs_root *root)
  727. {
  728. struct btrfs_trans_handle *trans;
  729. struct btrfs_root *tree_root = root->fs_info->tree_root;
  730. unsigned long nr;
  731. int ret;
  732. while (1) {
  733. /*
  734. * we don't want to jump in and create a bunch of
  735. * delayed refs if the transaction is starting to close
  736. */
  737. wait_transaction_pre_flush(tree_root->fs_info);
  738. trans = btrfs_start_transaction(tree_root, 1);
  739. /*
  740. * we've joined a transaction, make sure it isn't
  741. * closing right now
  742. */
  743. if (trans->transaction->delayed_refs.flushing) {
  744. btrfs_end_transaction(trans, tree_root);
  745. continue;
  746. }
  747. ret = btrfs_drop_snapshot(trans, root);
  748. if (ret != -EAGAIN)
  749. break;
  750. ret = btrfs_update_root(trans, tree_root,
  751. &root->root_key,
  752. &root->root_item);
  753. if (ret)
  754. break;
  755. nr = trans->blocks_used;
  756. ret = btrfs_end_transaction(trans, tree_root);
  757. BUG_ON(ret);
  758. btrfs_btree_balance_dirty(tree_root, nr);
  759. cond_resched();
  760. }
  761. BUG_ON(ret);
  762. ret = btrfs_del_root(trans, tree_root, &root->root_key);
  763. BUG_ON(ret);
  764. nr = trans->blocks_used;
  765. ret = btrfs_end_transaction(trans, tree_root);
  766. BUG_ON(ret);
  767. free_extent_buffer(root->node);
  768. free_extent_buffer(root->commit_root);
  769. kfree(root);
  770. btrfs_btree_balance_dirty(tree_root, nr);
  771. return ret;
  772. }
  773. #endif
  774. /*
  775. * new snapshots need to be created at a very specific time in the
  776. * transaction commit. This does the actual creation
  777. */
  778. static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
  779. struct btrfs_fs_info *fs_info,
  780. struct btrfs_pending_snapshot *pending)
  781. {
  782. struct btrfs_key key;
  783. struct btrfs_root_item *new_root_item;
  784. struct btrfs_root *tree_root = fs_info->tree_root;
  785. struct btrfs_root *root = pending->root;
  786. struct btrfs_root *parent_root;
  787. struct inode *parent_inode;
  788. struct dentry *parent;
  789. struct dentry *dentry;
  790. struct extent_buffer *tmp;
  791. struct extent_buffer *old;
  792. int ret;
  793. u64 to_reserve = 0;
  794. u64 index = 0;
  795. u64 objectid;
  796. u64 root_flags;
  797. new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS);
  798. if (!new_root_item) {
  799. pending->error = -ENOMEM;
  800. goto fail;
  801. }
  802. ret = btrfs_find_free_objectid(trans, tree_root, 0, &objectid);
  803. if (ret) {
  804. pending->error = ret;
  805. goto fail;
  806. }
  807. btrfs_reloc_pre_snapshot(trans, pending, &to_reserve);
  808. btrfs_orphan_pre_snapshot(trans, pending, &to_reserve);
  809. if (to_reserve > 0) {
  810. ret = btrfs_block_rsv_add(trans, root, &pending->block_rsv,
  811. to_reserve);
  812. if (ret) {
  813. pending->error = ret;
  814. goto fail;
  815. }
  816. }
  817. key.objectid = objectid;
  818. key.offset = (u64)-1;
  819. key.type = BTRFS_ROOT_ITEM_KEY;
  820. trans->block_rsv = &pending->block_rsv;
  821. dentry = pending->dentry;
  822. parent = dget_parent(dentry);
  823. parent_inode = parent->d_inode;
  824. parent_root = BTRFS_I(parent_inode)->root;
  825. record_root_in_trans(trans, parent_root);
  826. /*
  827. * insert the directory item
  828. */
  829. ret = btrfs_set_inode_index(parent_inode, &index);
  830. BUG_ON(ret);
  831. ret = btrfs_insert_dir_item(trans, parent_root,
  832. dentry->d_name.name, dentry->d_name.len,
  833. parent_inode->i_ino, &key,
  834. BTRFS_FT_DIR, index);
  835. BUG_ON(ret);
  836. btrfs_i_size_write(parent_inode, parent_inode->i_size +
  837. dentry->d_name.len * 2);
  838. ret = btrfs_update_inode(trans, parent_root, parent_inode);
  839. BUG_ON(ret);
  840. record_root_in_trans(trans, root);
  841. btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
  842. memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
  843. root_flags = btrfs_root_flags(new_root_item);
  844. if (pending->readonly)
  845. root_flags |= BTRFS_ROOT_SUBVOL_RDONLY;
  846. else
  847. root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY;
  848. btrfs_set_root_flags(new_root_item, root_flags);
  849. old = btrfs_lock_root_node(root);
  850. btrfs_cow_block(trans, root, old, NULL, 0, &old);
  851. btrfs_set_lock_blocking(old);
  852. btrfs_copy_root(trans, root, old, &tmp, objectid);
  853. btrfs_tree_unlock(old);
  854. free_extent_buffer(old);
  855. btrfs_set_root_node(new_root_item, tmp);
  856. /* record when the snapshot was created in key.offset */
  857. key.offset = trans->transid;
  858. ret = btrfs_insert_root(trans, tree_root, &key, new_root_item);
  859. btrfs_tree_unlock(tmp);
  860. free_extent_buffer(tmp);
  861. BUG_ON(ret);
  862. /*
  863. * insert root back/forward references
  864. */
  865. ret = btrfs_add_root_ref(trans, tree_root, objectid,
  866. parent_root->root_key.objectid,
  867. parent_inode->i_ino, index,
  868. dentry->d_name.name, dentry->d_name.len);
  869. BUG_ON(ret);
  870. dput(parent);
  871. key.offset = (u64)-1;
  872. pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key);
  873. BUG_ON(IS_ERR(pending->snap));
  874. btrfs_reloc_post_snapshot(trans, pending);
  875. btrfs_orphan_post_snapshot(trans, pending);
  876. fail:
  877. kfree(new_root_item);
  878. btrfs_block_rsv_release(root, &pending->block_rsv, (u64)-1);
  879. return 0;
  880. }
  881. /*
  882. * create all the snapshots we've scheduled for creation
  883. */
  884. static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
  885. struct btrfs_fs_info *fs_info)
  886. {
  887. struct btrfs_pending_snapshot *pending;
  888. struct list_head *head = &trans->transaction->pending_snapshots;
  889. int ret;
  890. list_for_each_entry(pending, head, list) {
  891. ret = create_pending_snapshot(trans, fs_info, pending);
  892. BUG_ON(ret);
  893. }
  894. return 0;
  895. }
  896. static void update_super_roots(struct btrfs_root *root)
  897. {
  898. struct btrfs_root_item *root_item;
  899. struct btrfs_super_block *super;
  900. super = &root->fs_info->super_copy;
  901. root_item = &root->fs_info->chunk_root->root_item;
  902. super->chunk_root = root_item->bytenr;
  903. super->chunk_root_generation = root_item->generation;
  904. super->chunk_root_level = root_item->level;
  905. root_item = &root->fs_info->tree_root->root_item;
  906. super->root = root_item->bytenr;
  907. super->generation = root_item->generation;
  908. super->root_level = root_item->level;
  909. if (super->cache_generation != 0 || btrfs_test_opt(root, SPACE_CACHE))
  910. super->cache_generation = root_item->generation;
  911. }
  912. int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
  913. {
  914. int ret = 0;
  915. spin_lock(&info->new_trans_lock);
  916. if (info->running_transaction)
  917. ret = info->running_transaction->in_commit;
  918. spin_unlock(&info->new_trans_lock);
  919. return ret;
  920. }
  921. int btrfs_transaction_blocked(struct btrfs_fs_info *info)
  922. {
  923. int ret = 0;
  924. spin_lock(&info->new_trans_lock);
  925. if (info->running_transaction)
  926. ret = info->running_transaction->blocked;
  927. spin_unlock(&info->new_trans_lock);
  928. return ret;
  929. }
  930. /*
  931. * wait for the current transaction commit to start and block subsequent
  932. * transaction joins
  933. */
  934. static void wait_current_trans_commit_start(struct btrfs_root *root,
  935. struct btrfs_transaction *trans)
  936. {
  937. DEFINE_WAIT(wait);
  938. if (trans->in_commit)
  939. return;
  940. while (1) {
  941. prepare_to_wait(&root->fs_info->transaction_blocked_wait, &wait,
  942. TASK_UNINTERRUPTIBLE);
  943. if (trans->in_commit) {
  944. finish_wait(&root->fs_info->transaction_blocked_wait,
  945. &wait);
  946. break;
  947. }
  948. mutex_unlock(&root->fs_info->trans_mutex);
  949. schedule();
  950. mutex_lock(&root->fs_info->trans_mutex);
  951. finish_wait(&root->fs_info->transaction_blocked_wait, &wait);
  952. }
  953. }
  954. /*
  955. * wait for the current transaction to start and then become unblocked.
  956. * caller holds ref.
  957. */
  958. static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root,
  959. struct btrfs_transaction *trans)
  960. {
  961. DEFINE_WAIT(wait);
  962. if (trans->commit_done || (trans->in_commit && !trans->blocked))
  963. return;
  964. while (1) {
  965. prepare_to_wait(&root->fs_info->transaction_wait, &wait,
  966. TASK_UNINTERRUPTIBLE);
  967. if (trans->commit_done ||
  968. (trans->in_commit && !trans->blocked)) {
  969. finish_wait(&root->fs_info->transaction_wait,
  970. &wait);
  971. break;
  972. }
  973. mutex_unlock(&root->fs_info->trans_mutex);
  974. schedule();
  975. mutex_lock(&root->fs_info->trans_mutex);
  976. finish_wait(&root->fs_info->transaction_wait,
  977. &wait);
  978. }
  979. }
  980. /*
  981. * commit transactions asynchronously. once btrfs_commit_transaction_async
  982. * returns, any subsequent transaction will not be allowed to join.
  983. */
  984. struct btrfs_async_commit {
  985. struct btrfs_trans_handle *newtrans;
  986. struct btrfs_root *root;
  987. struct delayed_work work;
  988. };
  989. static void do_async_commit(struct work_struct *work)
  990. {
  991. struct btrfs_async_commit *ac =
  992. container_of(work, struct btrfs_async_commit, work.work);
  993. btrfs_commit_transaction(ac->newtrans, ac->root);
  994. kfree(ac);
  995. }
  996. int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
  997. struct btrfs_root *root,
  998. int wait_for_unblock)
  999. {
  1000. struct btrfs_async_commit *ac;
  1001. struct btrfs_transaction *cur_trans;
  1002. ac = kmalloc(sizeof(*ac), GFP_NOFS);
  1003. BUG_ON(!ac);
  1004. INIT_DELAYED_WORK(&ac->work, do_async_commit);
  1005. ac->root = root;
  1006. ac->newtrans = btrfs_join_transaction(root, 0);
  1007. /* take transaction reference */
  1008. mutex_lock(&root->fs_info->trans_mutex);
  1009. cur_trans = trans->transaction;
  1010. cur_trans->use_count++;
  1011. mutex_unlock(&root->fs_info->trans_mutex);
  1012. btrfs_end_transaction(trans, root);
  1013. schedule_delayed_work(&ac->work, 0);
  1014. /* wait for transaction to start and unblock */
  1015. mutex_lock(&root->fs_info->trans_mutex);
  1016. if (wait_for_unblock)
  1017. wait_current_trans_commit_start_and_unblock(root, cur_trans);
  1018. else
  1019. wait_current_trans_commit_start(root, cur_trans);
  1020. put_transaction(cur_trans);
  1021. mutex_unlock(&root->fs_info->trans_mutex);
  1022. return 0;
  1023. }
  1024. /*
  1025. * btrfs_transaction state sequence:
  1026. * in_commit = 0, blocked = 0 (initial)
  1027. * in_commit = 1, blocked = 1
  1028. * blocked = 0
  1029. * commit_done = 1
  1030. */
  1031. int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
  1032. struct btrfs_root *root)
  1033. {
  1034. unsigned long joined = 0;
  1035. struct btrfs_transaction *cur_trans;
  1036. struct btrfs_transaction *prev_trans = NULL;
  1037. DEFINE_WAIT(wait);
  1038. int ret;
  1039. int should_grow = 0;
  1040. unsigned long now = get_seconds();
  1041. int flush_on_commit = btrfs_test_opt(root, FLUSHONCOMMIT);
  1042. btrfs_run_ordered_operations(root, 0);
  1043. /* make a pass through all the delayed refs we have so far
  1044. * any runnings procs may add more while we are here
  1045. */
  1046. ret = btrfs_run_delayed_refs(trans, root, 0);
  1047. BUG_ON(ret);
  1048. btrfs_trans_release_metadata(trans, root);
  1049. cur_trans = trans->transaction;
  1050. /*
  1051. * set the flushing flag so procs in this transaction have to
  1052. * start sending their work down.
  1053. */
  1054. cur_trans->delayed_refs.flushing = 1;
  1055. ret = btrfs_run_delayed_refs(trans, root, 0);
  1056. BUG_ON(ret);
  1057. mutex_lock(&root->fs_info->trans_mutex);
  1058. if (cur_trans->in_commit) {
  1059. cur_trans->use_count++;
  1060. mutex_unlock(&root->fs_info->trans_mutex);
  1061. btrfs_end_transaction(trans, root);
  1062. ret = wait_for_commit(root, cur_trans);
  1063. BUG_ON(ret);
  1064. mutex_lock(&root->fs_info->trans_mutex);
  1065. put_transaction(cur_trans);
  1066. mutex_unlock(&root->fs_info->trans_mutex);
  1067. return 0;
  1068. }
  1069. trans->transaction->in_commit = 1;
  1070. trans->transaction->blocked = 1;
  1071. wake_up(&root->fs_info->transaction_blocked_wait);
  1072. if (cur_trans->list.prev != &root->fs_info->trans_list) {
  1073. prev_trans = list_entry(cur_trans->list.prev,
  1074. struct btrfs_transaction, list);
  1075. if (!prev_trans->commit_done) {
  1076. prev_trans->use_count++;
  1077. mutex_unlock(&root->fs_info->trans_mutex);
  1078. wait_for_commit(root, prev_trans);
  1079. mutex_lock(&root->fs_info->trans_mutex);
  1080. put_transaction(prev_trans);
  1081. }
  1082. }
  1083. if (now < cur_trans->start_time || now - cur_trans->start_time < 1)
  1084. should_grow = 1;
  1085. do {
  1086. int snap_pending = 0;
  1087. joined = cur_trans->num_joined;
  1088. if (!list_empty(&trans->transaction->pending_snapshots))
  1089. snap_pending = 1;
  1090. WARN_ON(cur_trans != trans->transaction);
  1091. mutex_unlock(&root->fs_info->trans_mutex);
  1092. if (flush_on_commit || snap_pending) {
  1093. btrfs_start_delalloc_inodes(root, 1);
  1094. ret = btrfs_wait_ordered_extents(root, 0, 1);
  1095. BUG_ON(ret);
  1096. }
  1097. /*
  1098. * rename don't use btrfs_join_transaction, so, once we
  1099. * set the transaction to blocked above, we aren't going
  1100. * to get any new ordered operations. We can safely run
  1101. * it here and no for sure that nothing new will be added
  1102. * to the list
  1103. */
  1104. btrfs_run_ordered_operations(root, 1);
  1105. prepare_to_wait(&cur_trans->writer_wait, &wait,
  1106. TASK_UNINTERRUPTIBLE);
  1107. smp_mb();
  1108. if (cur_trans->num_writers > 1)
  1109. schedule_timeout(MAX_SCHEDULE_TIMEOUT);
  1110. else if (should_grow)
  1111. schedule_timeout(1);
  1112. mutex_lock(&root->fs_info->trans_mutex);
  1113. finish_wait(&cur_trans->writer_wait, &wait);
  1114. } while (cur_trans->num_writers > 1 ||
  1115. (should_grow && cur_trans->num_joined != joined));
  1116. ret = create_pending_snapshots(trans, root->fs_info);
  1117. BUG_ON(ret);
  1118. ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
  1119. BUG_ON(ret);
  1120. WARN_ON(cur_trans != trans->transaction);
  1121. /* btrfs_commit_tree_roots is responsible for getting the
  1122. * various roots consistent with each other. Every pointer
  1123. * in the tree of tree roots has to point to the most up to date
  1124. * root for every subvolume and other tree. So, we have to keep
  1125. * the tree logging code from jumping in and changing any
  1126. * of the trees.
  1127. *
  1128. * At this point in the commit, there can't be any tree-log
  1129. * writers, but a little lower down we drop the trans mutex
  1130. * and let new people in. By holding the tree_log_mutex
  1131. * from now until after the super is written, we avoid races
  1132. * with the tree-log code.
  1133. */
  1134. mutex_lock(&root->fs_info->tree_log_mutex);
  1135. ret = commit_fs_roots(trans, root);
  1136. BUG_ON(ret);
  1137. /* commit_fs_roots gets rid of all the tree log roots, it is now
  1138. * safe to free the root of tree log roots
  1139. */
  1140. btrfs_free_log_root_tree(trans, root->fs_info);
  1141. ret = commit_cowonly_roots(trans, root);
  1142. BUG_ON(ret);
  1143. btrfs_prepare_extent_commit(trans, root);
  1144. cur_trans = root->fs_info->running_transaction;
  1145. spin_lock(&root->fs_info->new_trans_lock);
  1146. root->fs_info->running_transaction = NULL;
  1147. spin_unlock(&root->fs_info->new_trans_lock);
  1148. btrfs_set_root_node(&root->fs_info->tree_root->root_item,
  1149. root->fs_info->tree_root->node);
  1150. switch_commit_root(root->fs_info->tree_root);
  1151. btrfs_set_root_node(&root->fs_info->chunk_root->root_item,
  1152. root->fs_info->chunk_root->node);
  1153. switch_commit_root(root->fs_info->chunk_root);
  1154. update_super_roots(root);
  1155. if (!root->fs_info->log_root_recovering) {
  1156. btrfs_set_super_log_root(&root->fs_info->super_copy, 0);
  1157. btrfs_set_super_log_root_level(&root->fs_info->super_copy, 0);
  1158. }
  1159. memcpy(&root->fs_info->super_for_commit, &root->fs_info->super_copy,
  1160. sizeof(root->fs_info->super_copy));
  1161. trans->transaction->blocked = 0;
  1162. wake_up(&root->fs_info->transaction_wait);
  1163. mutex_unlock(&root->fs_info->trans_mutex);
  1164. ret = btrfs_write_and_wait_transaction(trans, root);
  1165. BUG_ON(ret);
  1166. write_ctree_super(trans, root, 0);
  1167. /*
  1168. * the super is written, we can safely allow the tree-loggers
  1169. * to go about their business
  1170. */
  1171. mutex_unlock(&root->fs_info->tree_log_mutex);
  1172. btrfs_finish_extent_commit(trans, root);
  1173. mutex_lock(&root->fs_info->trans_mutex);
  1174. cur_trans->commit_done = 1;
  1175. root->fs_info->last_trans_committed = cur_trans->transid;
  1176. wake_up(&cur_trans->commit_wait);
  1177. put_transaction(cur_trans);
  1178. put_transaction(cur_trans);
  1179. mutex_unlock(&root->fs_info->trans_mutex);
  1180. if (current->journal_info == trans)
  1181. current->journal_info = NULL;
  1182. kmem_cache_free(btrfs_trans_handle_cachep, trans);
  1183. if (current != root->fs_info->transaction_kthread)
  1184. btrfs_run_delayed_iputs(root);
  1185. return ret;
  1186. }
  1187. /*
  1188. * interface function to delete all the snapshots we have scheduled for deletion
  1189. */
  1190. int btrfs_clean_old_snapshots(struct btrfs_root *root)
  1191. {
  1192. LIST_HEAD(list);
  1193. struct btrfs_fs_info *fs_info = root->fs_info;
  1194. mutex_lock(&fs_info->trans_mutex);
  1195. list_splice_init(&fs_info->dead_roots, &list);
  1196. mutex_unlock(&fs_info->trans_mutex);
  1197. while (!list_empty(&list)) {
  1198. root = list_entry(list.next, struct btrfs_root, root_list);
  1199. list_del(&root->root_list);
  1200. if (btrfs_header_backref_rev(root->node) <
  1201. BTRFS_MIXED_BACKREF_REV)
  1202. btrfs_drop_snapshot(root, NULL, 0);
  1203. else
  1204. btrfs_drop_snapshot(root, NULL, 1);
  1205. }
  1206. return 0;
  1207. }