transaction.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794
  1. /*
  2. * Copyright (C) 2007 Oracle. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/fs.h>
  19. #include <linux/sched.h>
  20. #include <linux/writeback.h>
  21. #include <linux/pagemap.h>
  22. #include "ctree.h"
  23. #include "disk-io.h"
  24. #include "transaction.h"
  25. #include "locking.h"
  26. #include "ref-cache.h"
  27. static int total_trans = 0;
  28. extern struct kmem_cache *btrfs_trans_handle_cachep;
  29. extern struct kmem_cache *btrfs_transaction_cachep;
  30. #define BTRFS_ROOT_TRANS_TAG 0
  31. struct dirty_root {
  32. struct list_head list;
  33. struct btrfs_root *root;
  34. struct btrfs_root *latest_root;
  35. };
  36. static noinline void put_transaction(struct btrfs_transaction *transaction)
  37. {
  38. WARN_ON(transaction->use_count == 0);
  39. transaction->use_count--;
  40. if (transaction->use_count == 0) {
  41. WARN_ON(total_trans == 0);
  42. total_trans--;
  43. list_del_init(&transaction->list);
  44. memset(transaction, 0, sizeof(*transaction));
  45. kmem_cache_free(btrfs_transaction_cachep, transaction);
  46. }
  47. }
  48. static noinline int join_transaction(struct btrfs_root *root)
  49. {
  50. struct btrfs_transaction *cur_trans;
  51. cur_trans = root->fs_info->running_transaction;
  52. if (!cur_trans) {
  53. cur_trans = kmem_cache_alloc(btrfs_transaction_cachep,
  54. GFP_NOFS);
  55. total_trans++;
  56. BUG_ON(!cur_trans);
  57. root->fs_info->generation++;
  58. root->fs_info->last_alloc = 0;
  59. root->fs_info->last_data_alloc = 0;
  60. cur_trans->num_writers = 1;
  61. cur_trans->num_joined = 0;
  62. cur_trans->transid = root->fs_info->generation;
  63. init_waitqueue_head(&cur_trans->writer_wait);
  64. init_waitqueue_head(&cur_trans->commit_wait);
  65. cur_trans->in_commit = 0;
  66. cur_trans->blocked = 0;
  67. cur_trans->use_count = 1;
  68. cur_trans->commit_done = 0;
  69. cur_trans->start_time = get_seconds();
  70. INIT_LIST_HEAD(&cur_trans->pending_snapshots);
  71. list_add_tail(&cur_trans->list, &root->fs_info->trans_list);
  72. extent_io_tree_init(&cur_trans->dirty_pages,
  73. root->fs_info->btree_inode->i_mapping,
  74. GFP_NOFS);
  75. spin_lock(&root->fs_info->new_trans_lock);
  76. root->fs_info->running_transaction = cur_trans;
  77. spin_unlock(&root->fs_info->new_trans_lock);
  78. } else {
  79. cur_trans->num_writers++;
  80. cur_trans->num_joined++;
  81. }
  82. return 0;
  83. }
  84. static noinline int record_root_in_trans(struct btrfs_root *root)
  85. {
  86. struct dirty_root *dirty;
  87. u64 running_trans_id = root->fs_info->running_transaction->transid;
  88. if (root->ref_cows && root->last_trans < running_trans_id) {
  89. WARN_ON(root == root->fs_info->extent_root);
  90. if (root->root_item.refs != 0) {
  91. radix_tree_tag_set(&root->fs_info->fs_roots_radix,
  92. (unsigned long)root->root_key.objectid,
  93. BTRFS_ROOT_TRANS_TAG);
  94. dirty = kmalloc(sizeof(*dirty), GFP_NOFS);
  95. BUG_ON(!dirty);
  96. dirty->root = kmalloc(sizeof(*dirty->root), GFP_NOFS);
  97. BUG_ON(!dirty->root);
  98. dirty->latest_root = root;
  99. INIT_LIST_HEAD(&dirty->list);
  100. root->commit_root = btrfs_root_node(root);
  101. root->dirty_root = dirty;
  102. memcpy(dirty->root, root, sizeof(*root));
  103. dirty->root->ref_tree = &root->ref_tree_struct;
  104. spin_lock_init(&dirty->root->node_lock);
  105. mutex_init(&dirty->root->objectid_mutex);
  106. dirty->root->node = root->commit_root;
  107. dirty->root->commit_root = NULL;
  108. } else {
  109. WARN_ON(1);
  110. }
  111. root->last_trans = running_trans_id;
  112. }
  113. return 0;
  114. }
  115. struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
  116. int num_blocks, int join)
  117. {
  118. struct btrfs_trans_handle *h =
  119. kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
  120. struct btrfs_transaction *cur_trans;
  121. int ret;
  122. mutex_lock(&root->fs_info->trans_mutex);
  123. cur_trans = root->fs_info->running_transaction;
  124. if (cur_trans && cur_trans->blocked && !join) {
  125. DEFINE_WAIT(wait);
  126. cur_trans->use_count++;
  127. while(1) {
  128. prepare_to_wait(&root->fs_info->transaction_wait, &wait,
  129. TASK_UNINTERRUPTIBLE);
  130. if (cur_trans->blocked) {
  131. mutex_unlock(&root->fs_info->trans_mutex);
  132. schedule();
  133. mutex_lock(&root->fs_info->trans_mutex);
  134. finish_wait(&root->fs_info->transaction_wait,
  135. &wait);
  136. } else {
  137. finish_wait(&root->fs_info->transaction_wait,
  138. &wait);
  139. break;
  140. }
  141. }
  142. put_transaction(cur_trans);
  143. }
  144. ret = join_transaction(root);
  145. BUG_ON(ret);
  146. record_root_in_trans(root);
  147. h->transid = root->fs_info->running_transaction->transid;
  148. h->transaction = root->fs_info->running_transaction;
  149. h->blocks_reserved = num_blocks;
  150. h->blocks_used = 0;
  151. h->block_group = NULL;
  152. h->alloc_exclude_nr = 0;
  153. h->alloc_exclude_start = 0;
  154. root->fs_info->running_transaction->use_count++;
  155. mutex_unlock(&root->fs_info->trans_mutex);
  156. return h;
  157. }
  158. struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
  159. int num_blocks)
  160. {
  161. return start_transaction(root, num_blocks, 0);
  162. }
  163. struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root,
  164. int num_blocks)
  165. {
  166. return start_transaction(root, num_blocks, 1);
  167. }
  168. static noinline int wait_for_commit(struct btrfs_root *root,
  169. struct btrfs_transaction *commit)
  170. {
  171. DEFINE_WAIT(wait);
  172. mutex_lock(&root->fs_info->trans_mutex);
  173. while(!commit->commit_done) {
  174. prepare_to_wait(&commit->commit_wait, &wait,
  175. TASK_UNINTERRUPTIBLE);
  176. if (commit->commit_done)
  177. break;
  178. mutex_unlock(&root->fs_info->trans_mutex);
  179. schedule();
  180. mutex_lock(&root->fs_info->trans_mutex);
  181. }
  182. mutex_unlock(&root->fs_info->trans_mutex);
  183. finish_wait(&commit->commit_wait, &wait);
  184. return 0;
  185. }
  186. static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
  187. struct btrfs_root *root, int throttle)
  188. {
  189. struct btrfs_transaction *cur_trans;
  190. mutex_lock(&root->fs_info->trans_mutex);
  191. cur_trans = root->fs_info->running_transaction;
  192. WARN_ON(cur_trans != trans->transaction);
  193. WARN_ON(cur_trans->num_writers < 1);
  194. cur_trans->num_writers--;
  195. if (waitqueue_active(&cur_trans->writer_wait))
  196. wake_up(&cur_trans->writer_wait);
  197. if (throttle && atomic_read(&root->fs_info->throttles)) {
  198. DEFINE_WAIT(wait);
  199. mutex_unlock(&root->fs_info->trans_mutex);
  200. prepare_to_wait(&root->fs_info->transaction_throttle, &wait,
  201. TASK_UNINTERRUPTIBLE);
  202. if (atomic_read(&root->fs_info->throttles))
  203. schedule();
  204. finish_wait(&root->fs_info->transaction_throttle, &wait);
  205. mutex_lock(&root->fs_info->trans_mutex);
  206. }
  207. put_transaction(cur_trans);
  208. mutex_unlock(&root->fs_info->trans_mutex);
  209. memset(trans, 0, sizeof(*trans));
  210. kmem_cache_free(btrfs_trans_handle_cachep, trans);
  211. return 0;
  212. }
  213. int btrfs_end_transaction(struct btrfs_trans_handle *trans,
  214. struct btrfs_root *root)
  215. {
  216. return __btrfs_end_transaction(trans, root, 0);
  217. }
  218. int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
  219. struct btrfs_root *root)
  220. {
  221. return __btrfs_end_transaction(trans, root, 1);
  222. }
  223. int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
  224. struct btrfs_root *root)
  225. {
  226. int ret;
  227. int err;
  228. int werr = 0;
  229. struct extent_io_tree *dirty_pages;
  230. struct page *page;
  231. struct inode *btree_inode = root->fs_info->btree_inode;
  232. u64 start;
  233. u64 end;
  234. unsigned long index;
  235. if (!trans || !trans->transaction) {
  236. return filemap_write_and_wait(btree_inode->i_mapping);
  237. }
  238. dirty_pages = &trans->transaction->dirty_pages;
  239. while(1) {
  240. ret = find_first_extent_bit(dirty_pages, 0, &start, &end,
  241. EXTENT_DIRTY);
  242. if (ret)
  243. break;
  244. clear_extent_dirty(dirty_pages, start, end, GFP_NOFS);
  245. while(start <= end) {
  246. index = start >> PAGE_CACHE_SHIFT;
  247. start = (u64)(index + 1) << PAGE_CACHE_SHIFT;
  248. page = find_lock_page(btree_inode->i_mapping, index);
  249. if (!page)
  250. continue;
  251. if (PageWriteback(page)) {
  252. if (PageDirty(page))
  253. wait_on_page_writeback(page);
  254. else {
  255. unlock_page(page);
  256. page_cache_release(page);
  257. continue;
  258. }
  259. }
  260. err = write_one_page(page, 0);
  261. if (err)
  262. werr = err;
  263. page_cache_release(page);
  264. }
  265. }
  266. err = filemap_fdatawait(btree_inode->i_mapping);
  267. if (err)
  268. werr = err;
  269. return werr;
  270. }
  271. static int update_cowonly_root(struct btrfs_trans_handle *trans,
  272. struct btrfs_root *root)
  273. {
  274. int ret;
  275. u64 old_root_bytenr;
  276. struct btrfs_root *tree_root = root->fs_info->tree_root;
  277. btrfs_write_dirty_block_groups(trans, root);
  278. while(1) {
  279. old_root_bytenr = btrfs_root_bytenr(&root->root_item);
  280. if (old_root_bytenr == root->node->start)
  281. break;
  282. btrfs_set_root_bytenr(&root->root_item,
  283. root->node->start);
  284. btrfs_set_root_level(&root->root_item,
  285. btrfs_header_level(root->node));
  286. ret = btrfs_update_root(trans, tree_root,
  287. &root->root_key,
  288. &root->root_item);
  289. BUG_ON(ret);
  290. btrfs_write_dirty_block_groups(trans, root);
  291. }
  292. return 0;
  293. }
  294. int btrfs_commit_tree_roots(struct btrfs_trans_handle *trans,
  295. struct btrfs_root *root)
  296. {
  297. struct btrfs_fs_info *fs_info = root->fs_info;
  298. struct list_head *next;
  299. while(!list_empty(&fs_info->dirty_cowonly_roots)) {
  300. next = fs_info->dirty_cowonly_roots.next;
  301. list_del_init(next);
  302. root = list_entry(next, struct btrfs_root, dirty_list);
  303. update_cowonly_root(trans, root);
  304. if (root->fs_info->closing)
  305. btrfs_remove_leaf_refs(root);
  306. }
  307. return 0;
  308. }
  309. int btrfs_add_dead_root(struct btrfs_root *root,
  310. struct btrfs_root *latest,
  311. struct list_head *dead_list)
  312. {
  313. struct dirty_root *dirty;
  314. dirty = kmalloc(sizeof(*dirty), GFP_NOFS);
  315. if (!dirty)
  316. return -ENOMEM;
  317. dirty->root = root;
  318. dirty->latest_root = latest;
  319. list_add(&dirty->list, dead_list);
  320. return 0;
  321. }
  322. static noinline int add_dirty_roots(struct btrfs_trans_handle *trans,
  323. struct radix_tree_root *radix,
  324. struct list_head *list)
  325. {
  326. struct dirty_root *dirty;
  327. struct btrfs_root *gang[8];
  328. struct btrfs_root *root;
  329. int i;
  330. int ret;
  331. int err = 0;
  332. u32 refs;
  333. while(1) {
  334. ret = radix_tree_gang_lookup_tag(radix, (void **)gang, 0,
  335. ARRAY_SIZE(gang),
  336. BTRFS_ROOT_TRANS_TAG);
  337. if (ret == 0)
  338. break;
  339. for (i = 0; i < ret; i++) {
  340. root = gang[i];
  341. radix_tree_tag_clear(radix,
  342. (unsigned long)root->root_key.objectid,
  343. BTRFS_ROOT_TRANS_TAG);
  344. BUG_ON(!root->ref_tree);
  345. dirty = root->dirty_root;
  346. if (root->commit_root == root->node) {
  347. WARN_ON(root->node->start !=
  348. btrfs_root_bytenr(&root->root_item));
  349. free_extent_buffer(root->commit_root);
  350. root->commit_root = NULL;
  351. kfree(dirty->root);
  352. kfree(dirty);
  353. /* make sure to update the root on disk
  354. * so we get any updates to the block used
  355. * counts
  356. */
  357. err = btrfs_update_root(trans,
  358. root->fs_info->tree_root,
  359. &root->root_key,
  360. &root->root_item);
  361. continue;
  362. }
  363. memset(&root->root_item.drop_progress, 0,
  364. sizeof(struct btrfs_disk_key));
  365. root->root_item.drop_level = 0;
  366. root->commit_root = NULL;
  367. root->root_key.offset = root->fs_info->generation;
  368. btrfs_set_root_bytenr(&root->root_item,
  369. root->node->start);
  370. btrfs_set_root_level(&root->root_item,
  371. btrfs_header_level(root->node));
  372. err = btrfs_insert_root(trans, root->fs_info->tree_root,
  373. &root->root_key,
  374. &root->root_item);
  375. if (err)
  376. break;
  377. refs = btrfs_root_refs(&dirty->root->root_item);
  378. btrfs_set_root_refs(&dirty->root->root_item, refs - 1);
  379. err = btrfs_update_root(trans, root->fs_info->tree_root,
  380. &dirty->root->root_key,
  381. &dirty->root->root_item);
  382. BUG_ON(err);
  383. if (refs == 1) {
  384. list_add(&dirty->list, list);
  385. } else {
  386. WARN_ON(1);
  387. free_extent_buffer(dirty->root->node);
  388. kfree(dirty->root);
  389. kfree(dirty);
  390. }
  391. }
  392. }
  393. return err;
  394. }
  395. int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
  396. {
  397. struct btrfs_fs_info *info = root->fs_info;
  398. int ret;
  399. struct btrfs_trans_handle *trans;
  400. unsigned long nr;
  401. smp_mb();
  402. if (root->defrag_running)
  403. return 0;
  404. trans = btrfs_start_transaction(root, 1);
  405. while (1) {
  406. root->defrag_running = 1;
  407. ret = btrfs_defrag_leaves(trans, root, cacheonly);
  408. nr = trans->blocks_used;
  409. btrfs_end_transaction(trans, root);
  410. btrfs_btree_balance_dirty(info->tree_root, nr);
  411. cond_resched();
  412. trans = btrfs_start_transaction(root, 1);
  413. if (root->fs_info->closing || ret != -EAGAIN)
  414. break;
  415. }
  416. root->defrag_running = 0;
  417. smp_mb();
  418. btrfs_end_transaction(trans, root);
  419. return 0;
  420. }
  421. static noinline int drop_dirty_roots(struct btrfs_root *tree_root,
  422. struct list_head *list)
  423. {
  424. struct dirty_root *dirty;
  425. struct btrfs_trans_handle *trans;
  426. unsigned long nr;
  427. u64 num_bytes;
  428. u64 bytes_used;
  429. int ret = 0;
  430. int err;
  431. while(!list_empty(list)) {
  432. struct btrfs_root *root;
  433. dirty = list_entry(list->prev, struct dirty_root, list);
  434. list_del_init(&dirty->list);
  435. num_bytes = btrfs_root_used(&dirty->root->root_item);
  436. root = dirty->latest_root;
  437. atomic_inc(&root->fs_info->throttles);
  438. mutex_lock(&root->fs_info->drop_mutex);
  439. while(1) {
  440. trans = btrfs_start_transaction(tree_root, 1);
  441. ret = btrfs_drop_snapshot(trans, dirty->root);
  442. if (ret != -EAGAIN) {
  443. break;
  444. }
  445. err = btrfs_update_root(trans,
  446. tree_root,
  447. &dirty->root->root_key,
  448. &dirty->root->root_item);
  449. if (err)
  450. ret = err;
  451. nr = trans->blocks_used;
  452. ret = btrfs_end_transaction(trans, tree_root);
  453. BUG_ON(ret);
  454. mutex_unlock(&root->fs_info->drop_mutex);
  455. btrfs_btree_balance_dirty(tree_root, nr);
  456. cond_resched();
  457. mutex_lock(&root->fs_info->drop_mutex);
  458. }
  459. BUG_ON(ret);
  460. atomic_dec(&root->fs_info->throttles);
  461. wake_up(&root->fs_info->transaction_throttle);
  462. mutex_lock(&root->fs_info->alloc_mutex);
  463. num_bytes -= btrfs_root_used(&dirty->root->root_item);
  464. bytes_used = btrfs_root_used(&root->root_item);
  465. if (num_bytes) {
  466. record_root_in_trans(root);
  467. btrfs_set_root_used(&root->root_item,
  468. bytes_used - num_bytes);
  469. }
  470. mutex_unlock(&root->fs_info->alloc_mutex);
  471. ret = btrfs_del_root(trans, tree_root, &dirty->root->root_key);
  472. if (ret) {
  473. BUG();
  474. break;
  475. }
  476. mutex_unlock(&root->fs_info->drop_mutex);
  477. nr = trans->blocks_used;
  478. ret = btrfs_end_transaction(trans, tree_root);
  479. BUG_ON(ret);
  480. free_extent_buffer(dirty->root->node);
  481. kfree(dirty->root);
  482. kfree(dirty);
  483. btrfs_btree_balance_dirty(tree_root, nr);
  484. cond_resched();
  485. }
  486. return ret;
  487. }
  488. static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
  489. struct btrfs_fs_info *fs_info,
  490. struct btrfs_pending_snapshot *pending)
  491. {
  492. struct btrfs_key key;
  493. struct btrfs_root_item *new_root_item;
  494. struct btrfs_root *tree_root = fs_info->tree_root;
  495. struct btrfs_root *root = pending->root;
  496. struct extent_buffer *tmp;
  497. struct extent_buffer *old;
  498. int ret;
  499. int namelen;
  500. u64 objectid;
  501. new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS);
  502. if (!new_root_item) {
  503. ret = -ENOMEM;
  504. goto fail;
  505. }
  506. ret = btrfs_find_free_objectid(trans, tree_root, 0, &objectid);
  507. if (ret)
  508. goto fail;
  509. memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
  510. key.objectid = objectid;
  511. key.offset = 1;
  512. btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
  513. old = btrfs_lock_root_node(root);
  514. btrfs_cow_block(trans, root, old, NULL, 0, &old);
  515. btrfs_copy_root(trans, root, old, &tmp, objectid);
  516. btrfs_tree_unlock(old);
  517. free_extent_buffer(old);
  518. btrfs_set_root_bytenr(new_root_item, tmp->start);
  519. btrfs_set_root_level(new_root_item, btrfs_header_level(tmp));
  520. ret = btrfs_insert_root(trans, root->fs_info->tree_root, &key,
  521. new_root_item);
  522. btrfs_tree_unlock(tmp);
  523. free_extent_buffer(tmp);
  524. if (ret)
  525. goto fail;
  526. /*
  527. * insert the directory item
  528. */
  529. key.offset = (u64)-1;
  530. namelen = strlen(pending->name);
  531. ret = btrfs_insert_dir_item(trans, root->fs_info->tree_root,
  532. pending->name, namelen,
  533. root->fs_info->sb->s_root->d_inode->i_ino,
  534. &key, BTRFS_FT_DIR, 0);
  535. if (ret)
  536. goto fail;
  537. ret = btrfs_insert_inode_ref(trans, root->fs_info->tree_root,
  538. pending->name, strlen(pending->name), objectid,
  539. root->fs_info->sb->s_root->d_inode->i_ino, 0);
  540. /* Invalidate existing dcache entry for new snapshot. */
  541. btrfs_invalidate_dcache_root(root, pending->name, namelen);
  542. fail:
  543. kfree(new_root_item);
  544. return ret;
  545. }
  546. static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
  547. struct btrfs_fs_info *fs_info)
  548. {
  549. struct btrfs_pending_snapshot *pending;
  550. struct list_head *head = &trans->transaction->pending_snapshots;
  551. int ret;
  552. while(!list_empty(head)) {
  553. pending = list_entry(head->next,
  554. struct btrfs_pending_snapshot, list);
  555. ret = create_pending_snapshot(trans, fs_info, pending);
  556. BUG_ON(ret);
  557. list_del(&pending->list);
  558. kfree(pending->name);
  559. kfree(pending);
  560. }
  561. return 0;
  562. }
  563. int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
  564. struct btrfs_root *root)
  565. {
  566. unsigned long joined = 0;
  567. unsigned long timeout = 1;
  568. struct btrfs_transaction *cur_trans;
  569. struct btrfs_transaction *prev_trans = NULL;
  570. struct btrfs_root *chunk_root = root->fs_info->chunk_root;
  571. struct list_head dirty_fs_roots;
  572. struct extent_io_tree *pinned_copy;
  573. DEFINE_WAIT(wait);
  574. int ret;
  575. INIT_LIST_HEAD(&dirty_fs_roots);
  576. mutex_lock(&root->fs_info->trans_mutex);
  577. if (trans->transaction->in_commit) {
  578. cur_trans = trans->transaction;
  579. trans->transaction->use_count++;
  580. mutex_unlock(&root->fs_info->trans_mutex);
  581. btrfs_end_transaction(trans, root);
  582. ret = wait_for_commit(root, cur_trans);
  583. BUG_ON(ret);
  584. mutex_lock(&root->fs_info->trans_mutex);
  585. put_transaction(cur_trans);
  586. mutex_unlock(&root->fs_info->trans_mutex);
  587. return 0;
  588. }
  589. pinned_copy = kmalloc(sizeof(*pinned_copy), GFP_NOFS);
  590. if (!pinned_copy)
  591. return -ENOMEM;
  592. extent_io_tree_init(pinned_copy,
  593. root->fs_info->btree_inode->i_mapping, GFP_NOFS);
  594. trans->transaction->in_commit = 1;
  595. trans->transaction->blocked = 1;
  596. cur_trans = trans->transaction;
  597. if (cur_trans->list.prev != &root->fs_info->trans_list) {
  598. prev_trans = list_entry(cur_trans->list.prev,
  599. struct btrfs_transaction, list);
  600. if (!prev_trans->commit_done) {
  601. prev_trans->use_count++;
  602. mutex_unlock(&root->fs_info->trans_mutex);
  603. wait_for_commit(root, prev_trans);
  604. mutex_lock(&root->fs_info->trans_mutex);
  605. put_transaction(prev_trans);
  606. }
  607. }
  608. do {
  609. joined = cur_trans->num_joined;
  610. WARN_ON(cur_trans != trans->transaction);
  611. prepare_to_wait(&cur_trans->writer_wait, &wait,
  612. TASK_UNINTERRUPTIBLE);
  613. if (cur_trans->num_writers > 1)
  614. timeout = MAX_SCHEDULE_TIMEOUT;
  615. else
  616. timeout = 1;
  617. mutex_unlock(&root->fs_info->trans_mutex);
  618. schedule_timeout(timeout);
  619. mutex_lock(&root->fs_info->trans_mutex);
  620. finish_wait(&cur_trans->writer_wait, &wait);
  621. } while (cur_trans->num_writers > 1 ||
  622. (cur_trans->num_joined != joined));
  623. ret = create_pending_snapshots(trans, root->fs_info);
  624. BUG_ON(ret);
  625. WARN_ON(cur_trans != trans->transaction);
  626. ret = add_dirty_roots(trans, &root->fs_info->fs_roots_radix,
  627. &dirty_fs_roots);
  628. BUG_ON(ret);
  629. ret = btrfs_commit_tree_roots(trans, root);
  630. BUG_ON(ret);
  631. cur_trans = root->fs_info->running_transaction;
  632. spin_lock(&root->fs_info->new_trans_lock);
  633. root->fs_info->running_transaction = NULL;
  634. spin_unlock(&root->fs_info->new_trans_lock);
  635. btrfs_set_super_generation(&root->fs_info->super_copy,
  636. cur_trans->transid);
  637. btrfs_set_super_root(&root->fs_info->super_copy,
  638. root->fs_info->tree_root->node->start);
  639. btrfs_set_super_root_level(&root->fs_info->super_copy,
  640. btrfs_header_level(root->fs_info->tree_root->node));
  641. btrfs_set_super_chunk_root(&root->fs_info->super_copy,
  642. chunk_root->node->start);
  643. btrfs_set_super_chunk_root_level(&root->fs_info->super_copy,
  644. btrfs_header_level(chunk_root->node));
  645. memcpy(&root->fs_info->super_for_commit, &root->fs_info->super_copy,
  646. sizeof(root->fs_info->super_copy));
  647. btrfs_copy_pinned(root, pinned_copy);
  648. trans->transaction->blocked = 0;
  649. wake_up(&root->fs_info->transaction_throttle);
  650. wake_up(&root->fs_info->transaction_wait);
  651. mutex_unlock(&root->fs_info->trans_mutex);
  652. ret = btrfs_write_and_wait_transaction(trans, root);
  653. BUG_ON(ret);
  654. write_ctree_super(trans, root);
  655. btrfs_finish_extent_commit(trans, root, pinned_copy);
  656. mutex_lock(&root->fs_info->trans_mutex);
  657. kfree(pinned_copy);
  658. cur_trans->commit_done = 1;
  659. root->fs_info->last_trans_committed = cur_trans->transid;
  660. wake_up(&cur_trans->commit_wait);
  661. put_transaction(cur_trans);
  662. put_transaction(cur_trans);
  663. if (root->fs_info->closing)
  664. list_splice_init(&root->fs_info->dead_roots, &dirty_fs_roots);
  665. else
  666. list_splice_init(&dirty_fs_roots, &root->fs_info->dead_roots);
  667. mutex_unlock(&root->fs_info->trans_mutex);
  668. kmem_cache_free(btrfs_trans_handle_cachep, trans);
  669. if (root->fs_info->closing) {
  670. drop_dirty_roots(root->fs_info->tree_root, &dirty_fs_roots);
  671. }
  672. return ret;
  673. }
  674. int btrfs_clean_old_snapshots(struct btrfs_root *root)
  675. {
  676. struct list_head dirty_roots;
  677. INIT_LIST_HEAD(&dirty_roots);
  678. again:
  679. mutex_lock(&root->fs_info->trans_mutex);
  680. list_splice_init(&root->fs_info->dead_roots, &dirty_roots);
  681. mutex_unlock(&root->fs_info->trans_mutex);
  682. if (!list_empty(&dirty_roots)) {
  683. drop_dirty_roots(root, &dirty_roots);
  684. goto again;
  685. }
  686. return 0;
  687. }