transaction.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417
  1. #include <linux/module.h>
  2. #include <linux/fs.h>
  3. #include "ctree.h"
  4. #include "disk-io.h"
  5. #include "transaction.h"
  6. static int total_trans = 0;
  7. extern struct kmem_cache *btrfs_trans_handle_cachep;
  8. extern struct kmem_cache *btrfs_transaction_cachep;
  9. static struct workqueue_struct *trans_wq;
  10. #define BTRFS_ROOT_TRANS_TAG 0
  11. static void put_transaction(struct btrfs_transaction *transaction)
  12. {
  13. WARN_ON(transaction->use_count == 0);
  14. transaction->use_count--;
  15. if (transaction->use_count == 0) {
  16. WARN_ON(total_trans == 0);
  17. total_trans--;
  18. list_del_init(&transaction->list);
  19. memset(transaction, 0, sizeof(*transaction));
  20. kmem_cache_free(btrfs_transaction_cachep, transaction);
  21. }
  22. }
  23. static int join_transaction(struct btrfs_root *root)
  24. {
  25. struct btrfs_transaction *cur_trans;
  26. cur_trans = root->fs_info->running_transaction;
  27. if (!cur_trans) {
  28. cur_trans = kmem_cache_alloc(btrfs_transaction_cachep,
  29. GFP_NOFS);
  30. total_trans++;
  31. BUG_ON(!cur_trans);
  32. root->fs_info->generation++;
  33. root->fs_info->running_transaction = cur_trans;
  34. cur_trans->num_writers = 0;
  35. cur_trans->transid = root->fs_info->generation;
  36. init_waitqueue_head(&cur_trans->writer_wait);
  37. init_waitqueue_head(&cur_trans->commit_wait);
  38. cur_trans->in_commit = 0;
  39. cur_trans->use_count = 1;
  40. cur_trans->commit_done = 0;
  41. cur_trans->start_time = get_seconds();
  42. list_add_tail(&cur_trans->list, &root->fs_info->trans_list);
  43. init_bit_radix(&cur_trans->dirty_pages);
  44. }
  45. cur_trans->num_writers++;
  46. return 0;
  47. }
  48. struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
  49. int num_blocks)
  50. {
  51. struct btrfs_trans_handle *h =
  52. kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
  53. int ret;
  54. u64 running_trans_id;
  55. mutex_lock(&root->fs_info->trans_mutex);
  56. ret = join_transaction(root);
  57. BUG_ON(ret);
  58. running_trans_id = root->fs_info->running_transaction->transid;
  59. if (root != root->fs_info->tree_root && root->last_trans <
  60. running_trans_id) {
  61. radix_tree_tag_set(&root->fs_info->fs_roots_radix,
  62. (unsigned long)root->root_key.objectid,
  63. BTRFS_ROOT_TRANS_TAG);
  64. root->commit_root = root->node;
  65. get_bh(root->node);
  66. }
  67. root->last_trans = running_trans_id;
  68. h->transid = running_trans_id;
  69. h->transaction = root->fs_info->running_transaction;
  70. h->blocks_reserved = num_blocks;
  71. h->blocks_used = 0;
  72. h->block_group = NULL;
  73. root->fs_info->running_transaction->use_count++;
  74. mutex_unlock(&root->fs_info->trans_mutex);
  75. return h;
  76. }
  77. int btrfs_end_transaction(struct btrfs_trans_handle *trans,
  78. struct btrfs_root *root)
  79. {
  80. struct btrfs_transaction *cur_trans;
  81. mutex_lock(&root->fs_info->trans_mutex);
  82. cur_trans = root->fs_info->running_transaction;
  83. WARN_ON(cur_trans->num_writers < 1);
  84. if (waitqueue_active(&cur_trans->writer_wait))
  85. wake_up(&cur_trans->writer_wait);
  86. cur_trans->num_writers--;
  87. put_transaction(cur_trans);
  88. mutex_unlock(&root->fs_info->trans_mutex);
  89. memset(trans, 0, sizeof(*trans));
  90. kmem_cache_free(btrfs_trans_handle_cachep, trans);
  91. return 0;
  92. }
  93. int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
  94. struct btrfs_root *root)
  95. {
  96. unsigned long gang[16];
  97. int ret;
  98. int i;
  99. int err;
  100. int werr = 0;
  101. struct page *page;
  102. struct radix_tree_root *dirty_pages;
  103. struct inode *btree_inode = root->fs_info->btree_inode;
  104. if (!trans || !trans->transaction) {
  105. return filemap_write_and_wait(btree_inode->i_mapping);
  106. }
  107. dirty_pages = &trans->transaction->dirty_pages;
  108. while(1) {
  109. ret = find_first_radix_bit(dirty_pages, gang,
  110. 0, ARRAY_SIZE(gang));
  111. if (!ret)
  112. break;
  113. for (i = 0; i < ret; i++) {
  114. /* FIXME EIO */
  115. clear_radix_bit(dirty_pages, gang[i]);
  116. page = find_lock_page(btree_inode->i_mapping,
  117. gang[i]);
  118. if (!page)
  119. continue;
  120. err = write_one_page(page, 0);
  121. if (err)
  122. werr = err;
  123. page_cache_release(page);
  124. }
  125. }
  126. err = filemap_fdatawait(btree_inode->i_mapping);
  127. if (err)
  128. werr = err;
  129. return werr;
  130. }
  131. int btrfs_commit_tree_roots(struct btrfs_trans_handle *trans,
  132. struct btrfs_root *root)
  133. {
  134. int ret;
  135. u64 old_extent_block;
  136. struct btrfs_fs_info *fs_info = root->fs_info;
  137. struct btrfs_root *tree_root = fs_info->tree_root;
  138. struct btrfs_root *extent_root = fs_info->extent_root;
  139. btrfs_write_dirty_block_groups(trans, extent_root);
  140. while(1) {
  141. old_extent_block = btrfs_root_blocknr(&extent_root->root_item);
  142. if (old_extent_block == bh_blocknr(extent_root->node))
  143. break;
  144. btrfs_set_root_blocknr(&extent_root->root_item,
  145. bh_blocknr(extent_root->node));
  146. ret = btrfs_update_root(trans, tree_root,
  147. &extent_root->root_key,
  148. &extent_root->root_item);
  149. BUG_ON(ret);
  150. btrfs_write_dirty_block_groups(trans, extent_root);
  151. }
  152. return 0;
  153. }
  154. static int wait_for_commit(struct btrfs_root *root,
  155. struct btrfs_transaction *commit)
  156. {
  157. DEFINE_WAIT(wait);
  158. while(!commit->commit_done) {
  159. prepare_to_wait(&commit->commit_wait, &wait,
  160. TASK_UNINTERRUPTIBLE);
  161. if (commit->commit_done)
  162. break;
  163. mutex_unlock(&root->fs_info->trans_mutex);
  164. schedule();
  165. mutex_lock(&root->fs_info->trans_mutex);
  166. }
  167. finish_wait(&commit->commit_wait, &wait);
  168. return 0;
  169. }
  170. struct dirty_root {
  171. struct list_head list;
  172. struct btrfs_key snap_key;
  173. struct buffer_head *commit_root;
  174. struct btrfs_root *root;
  175. };
  176. static int add_dirty_roots(struct btrfs_trans_handle *trans,
  177. struct radix_tree_root *radix,
  178. struct list_head *list)
  179. {
  180. struct dirty_root *dirty;
  181. struct btrfs_root *gang[8];
  182. struct btrfs_root *root;
  183. int i;
  184. int ret;
  185. int err;
  186. while(1) {
  187. ret = radix_tree_gang_lookup_tag(radix, (void **)gang, 0,
  188. ARRAY_SIZE(gang),
  189. BTRFS_ROOT_TRANS_TAG);
  190. if (ret == 0)
  191. break;
  192. for (i = 0; i < ret; i++) {
  193. root = gang[i];
  194. radix_tree_tag_clear(radix,
  195. (unsigned long)root->root_key.objectid,
  196. BTRFS_ROOT_TRANS_TAG);
  197. if (root->commit_root == root->node) {
  198. WARN_ON(bh_blocknr(root->node) !=
  199. btrfs_root_blocknr(&root->root_item));
  200. brelse(root->commit_root);
  201. root->commit_root = NULL;
  202. continue;
  203. }
  204. dirty = kmalloc(sizeof(*dirty), GFP_NOFS);
  205. BUG_ON(!dirty);
  206. memcpy(&dirty->snap_key, &root->root_key,
  207. sizeof(root->root_key));
  208. dirty->commit_root = root->commit_root;
  209. root->commit_root = NULL;
  210. dirty->root = root;
  211. root->root_key.offset = root->fs_info->generation;
  212. btrfs_set_root_blocknr(&root->root_item,
  213. bh_blocknr(root->node));
  214. err = btrfs_insert_root(trans, root->fs_info->tree_root,
  215. &root->root_key,
  216. &root->root_item);
  217. BUG_ON(err);
  218. list_add(&dirty->list, list);
  219. }
  220. }
  221. return 0;
  222. }
  223. static int drop_dirty_roots(struct btrfs_root *tree_root,
  224. struct list_head *list)
  225. {
  226. struct dirty_root *dirty;
  227. struct btrfs_trans_handle *trans;
  228. int ret;
  229. while(!list_empty(list)) {
  230. mutex_lock(&tree_root->fs_info->fs_mutex);
  231. dirty = list_entry(list->next, struct dirty_root, list);
  232. list_del_init(&dirty->list);
  233. trans = btrfs_start_transaction(tree_root, 1);
  234. ret = btrfs_drop_snapshot(trans, dirty->root,
  235. dirty->commit_root);
  236. BUG_ON(ret);
  237. ret = btrfs_del_root(trans, tree_root, &dirty->snap_key);
  238. BUG_ON(ret);
  239. ret = btrfs_end_transaction(trans, tree_root);
  240. BUG_ON(ret);
  241. kfree(dirty);
  242. mutex_unlock(&tree_root->fs_info->fs_mutex);
  243. }
  244. return 0;
  245. }
  246. int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
  247. struct btrfs_root *root)
  248. {
  249. int ret = 0;
  250. struct btrfs_transaction *cur_trans;
  251. struct btrfs_transaction *prev_trans = NULL;
  252. struct list_head dirty_fs_roots;
  253. DEFINE_WAIT(wait);
  254. INIT_LIST_HEAD(&dirty_fs_roots);
  255. mutex_lock(&root->fs_info->trans_mutex);
  256. if (trans->transaction->in_commit) {
  257. cur_trans = trans->transaction;
  258. trans->transaction->use_count++;
  259. btrfs_end_transaction(trans, root);
  260. ret = wait_for_commit(root, cur_trans);
  261. BUG_ON(ret);
  262. put_transaction(cur_trans);
  263. mutex_unlock(&root->fs_info->trans_mutex);
  264. return 0;
  265. }
  266. cur_trans = trans->transaction;
  267. trans->transaction->in_commit = 1;
  268. while (trans->transaction->num_writers > 1) {
  269. WARN_ON(cur_trans != trans->transaction);
  270. prepare_to_wait(&trans->transaction->writer_wait, &wait,
  271. TASK_UNINTERRUPTIBLE);
  272. if (trans->transaction->num_writers <= 1)
  273. break;
  274. mutex_unlock(&root->fs_info->trans_mutex);
  275. schedule();
  276. mutex_lock(&root->fs_info->trans_mutex);
  277. finish_wait(&trans->transaction->writer_wait, &wait);
  278. }
  279. finish_wait(&trans->transaction->writer_wait, &wait);
  280. WARN_ON(cur_trans != trans->transaction);
  281. add_dirty_roots(trans, &root->fs_info->fs_roots_radix, &dirty_fs_roots);
  282. ret = btrfs_commit_tree_roots(trans, root);
  283. BUG_ON(ret);
  284. cur_trans = root->fs_info->running_transaction;
  285. root->fs_info->running_transaction = NULL;
  286. if (cur_trans->list.prev != &root->fs_info->trans_list) {
  287. prev_trans = list_entry(cur_trans->list.prev,
  288. struct btrfs_transaction, list);
  289. if (prev_trans->commit_done)
  290. prev_trans = NULL;
  291. else
  292. prev_trans->use_count++;
  293. }
  294. mutex_unlock(&root->fs_info->trans_mutex);
  295. mutex_unlock(&root->fs_info->fs_mutex);
  296. ret = btrfs_write_and_wait_transaction(trans, root);
  297. if (prev_trans) {
  298. mutex_lock(&root->fs_info->trans_mutex);
  299. wait_for_commit(root, prev_trans);
  300. put_transaction(prev_trans);
  301. mutex_unlock(&root->fs_info->trans_mutex);
  302. }
  303. btrfs_set_super_generation(root->fs_info->disk_super,
  304. cur_trans->transid);
  305. BUG_ON(ret);
  306. write_ctree_super(trans, root);
  307. mutex_lock(&root->fs_info->fs_mutex);
  308. btrfs_finish_extent_commit(trans, root);
  309. mutex_lock(&root->fs_info->trans_mutex);
  310. cur_trans->commit_done = 1;
  311. wake_up(&cur_trans->commit_wait);
  312. put_transaction(cur_trans);
  313. put_transaction(cur_trans);
  314. if (root->fs_info->closing)
  315. list_splice_init(&root->fs_info->dead_roots, &dirty_fs_roots);
  316. else
  317. list_splice_init(&dirty_fs_roots, &root->fs_info->dead_roots);
  318. mutex_unlock(&root->fs_info->trans_mutex);
  319. kmem_cache_free(btrfs_trans_handle_cachep, trans);
  320. if (root->fs_info->closing) {
  321. mutex_unlock(&root->fs_info->fs_mutex);
  322. drop_dirty_roots(root->fs_info->tree_root, &dirty_fs_roots);
  323. mutex_lock(&root->fs_info->fs_mutex);
  324. }
  325. return ret;
  326. }
  327. void btrfs_transaction_cleaner(struct work_struct *work)
  328. {
  329. struct btrfs_fs_info *fs_info = container_of(work,
  330. struct btrfs_fs_info,
  331. trans_work.work);
  332. struct btrfs_root *root = fs_info->tree_root;
  333. struct btrfs_transaction *cur;
  334. struct btrfs_trans_handle *trans;
  335. struct list_head dirty_roots;
  336. unsigned long now;
  337. unsigned long delay = HZ * 30;
  338. int ret;
  339. INIT_LIST_HEAD(&dirty_roots);
  340. mutex_lock(&root->fs_info->fs_mutex);
  341. mutex_lock(&root->fs_info->trans_mutex);
  342. cur = root->fs_info->running_transaction;
  343. if (!cur) {
  344. mutex_unlock(&root->fs_info->trans_mutex);
  345. goto out;
  346. }
  347. now = get_seconds();
  348. if (now < cur->start_time || now - cur->start_time < 30) {
  349. mutex_unlock(&root->fs_info->trans_mutex);
  350. delay = HZ * 5;
  351. goto out;
  352. }
  353. mutex_unlock(&root->fs_info->trans_mutex);
  354. trans = btrfs_start_transaction(root, 1);
  355. ret = btrfs_commit_transaction(trans, root);
  356. out:
  357. mutex_unlock(&root->fs_info->fs_mutex);
  358. mutex_lock(&root->fs_info->trans_mutex);
  359. list_splice_init(&root->fs_info->dead_roots, &dirty_roots);
  360. mutex_unlock(&root->fs_info->trans_mutex);
  361. if (!list_empty(&dirty_roots)) {
  362. drop_dirty_roots(root, &dirty_roots);
  363. }
  364. btrfs_transaction_queue_work(root, delay);
  365. }
  366. void btrfs_transaction_queue_work(struct btrfs_root *root, int delay)
  367. {
  368. queue_delayed_work(trans_wq, &root->fs_info->trans_work, delay);
  369. }
  370. void btrfs_transaction_flush_work(struct btrfs_root *root)
  371. {
  372. cancel_rearming_delayed_workqueue(trans_wq, &root->fs_info->trans_work);
  373. flush_workqueue(trans_wq);
  374. }
  375. void __init btrfs_init_transaction_sys(void)
  376. {
  377. trans_wq = create_workqueue("btrfs");
  378. }
  379. void __exit btrfs_exit_transaction_sys(void)
  380. {
  381. destroy_workqueue(trans_wq);
  382. }