transaction.c 48 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810
  1. /*
  2. * Copyright (C) 2007 Oracle. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/fs.h>
  19. #include <linux/slab.h>
  20. #include <linux/sched.h>
  21. #include <linux/writeback.h>
  22. #include <linux/pagemap.h>
  23. #include <linux/blkdev.h>
  24. #include <linux/uuid.h>
  25. #include "ctree.h"
  26. #include "disk-io.h"
  27. #include "transaction.h"
  28. #include "locking.h"
  29. #include "tree-log.h"
  30. #include "inode-map.h"
  31. #include "volumes.h"
  32. #include "dev-replace.h"
  33. #define BTRFS_ROOT_TRANS_TAG 0
  34. void put_transaction(struct btrfs_transaction *transaction)
  35. {
  36. WARN_ON(atomic_read(&transaction->use_count) == 0);
  37. if (atomic_dec_and_test(&transaction->use_count)) {
  38. BUG_ON(!list_empty(&transaction->list));
  39. WARN_ON(transaction->delayed_refs.root.rb_node);
  40. kmem_cache_free(btrfs_transaction_cachep, transaction);
  41. }
  42. }
  43. static noinline void switch_commit_root(struct btrfs_root *root)
  44. {
  45. free_extent_buffer(root->commit_root);
  46. root->commit_root = btrfs_root_node(root);
  47. }
  48. /*
  49. * either allocate a new transaction or hop into the existing one
  50. */
  51. static noinline int join_transaction(struct btrfs_root *root, int type)
  52. {
  53. struct btrfs_transaction *cur_trans;
  54. struct btrfs_fs_info *fs_info = root->fs_info;
  55. spin_lock(&fs_info->trans_lock);
  56. loop:
  57. /* The file system has been taken offline. No new transactions. */
  58. if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
  59. spin_unlock(&fs_info->trans_lock);
  60. return -EROFS;
  61. }
  62. if (fs_info->trans_no_join) {
  63. /*
  64. * If we are JOIN_NOLOCK we're already committing a current
  65. * transaction, we just need a handle to deal with something
  66. * when committing the transaction, such as inode cache and
  67. * space cache. It is a special case.
  68. */
  69. if (type != TRANS_JOIN_NOLOCK) {
  70. spin_unlock(&fs_info->trans_lock);
  71. return -EBUSY;
  72. }
  73. }
  74. cur_trans = fs_info->running_transaction;
  75. if (cur_trans) {
  76. if (cur_trans->aborted) {
  77. spin_unlock(&fs_info->trans_lock);
  78. return cur_trans->aborted;
  79. }
  80. atomic_inc(&cur_trans->use_count);
  81. atomic_inc(&cur_trans->num_writers);
  82. cur_trans->num_joined++;
  83. spin_unlock(&fs_info->trans_lock);
  84. return 0;
  85. }
  86. spin_unlock(&fs_info->trans_lock);
  87. /*
  88. * If we are ATTACH, we just want to catch the current transaction,
  89. * and commit it. If there is no transaction, just return ENOENT.
  90. */
  91. if (type == TRANS_ATTACH)
  92. return -ENOENT;
  93. cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, GFP_NOFS);
  94. if (!cur_trans)
  95. return -ENOMEM;
  96. spin_lock(&fs_info->trans_lock);
  97. if (fs_info->running_transaction) {
  98. /*
  99. * someone started a transaction after we unlocked. Make sure
  100. * to redo the trans_no_join checks above
  101. */
  102. kmem_cache_free(btrfs_transaction_cachep, cur_trans);
  103. cur_trans = fs_info->running_transaction;
  104. goto loop;
  105. } else if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
  106. spin_unlock(&fs_info->trans_lock);
  107. kmem_cache_free(btrfs_transaction_cachep, cur_trans);
  108. return -EROFS;
  109. }
  110. atomic_set(&cur_trans->num_writers, 1);
  111. cur_trans->num_joined = 0;
  112. init_waitqueue_head(&cur_trans->writer_wait);
  113. init_waitqueue_head(&cur_trans->commit_wait);
  114. cur_trans->in_commit = 0;
  115. cur_trans->blocked = 0;
  116. /*
  117. * One for this trans handle, one so it will live on until we
  118. * commit the transaction.
  119. */
  120. atomic_set(&cur_trans->use_count, 2);
  121. cur_trans->commit_done = 0;
  122. cur_trans->start_time = get_seconds();
  123. cur_trans->delayed_refs.root = RB_ROOT;
  124. cur_trans->delayed_refs.num_entries = 0;
  125. cur_trans->delayed_refs.num_heads_ready = 0;
  126. cur_trans->delayed_refs.num_heads = 0;
  127. cur_trans->delayed_refs.flushing = 0;
  128. cur_trans->delayed_refs.run_delayed_start = 0;
  129. /*
  130. * although the tree mod log is per file system and not per transaction,
  131. * the log must never go across transaction boundaries.
  132. */
  133. smp_mb();
  134. if (!list_empty(&fs_info->tree_mod_seq_list))
  135. WARN(1, KERN_ERR "btrfs: tree_mod_seq_list not empty when "
  136. "creating a fresh transaction\n");
  137. if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log))
  138. WARN(1, KERN_ERR "btrfs: tree_mod_log rb tree not empty when "
  139. "creating a fresh transaction\n");
  140. atomic_set(&fs_info->tree_mod_seq, 0);
  141. spin_lock_init(&cur_trans->commit_lock);
  142. spin_lock_init(&cur_trans->delayed_refs.lock);
  143. INIT_LIST_HEAD(&cur_trans->pending_snapshots);
  144. list_add_tail(&cur_trans->list, &fs_info->trans_list);
  145. extent_io_tree_init(&cur_trans->dirty_pages,
  146. fs_info->btree_inode->i_mapping);
  147. fs_info->generation++;
  148. cur_trans->transid = fs_info->generation;
  149. fs_info->running_transaction = cur_trans;
  150. cur_trans->aborted = 0;
  151. spin_unlock(&fs_info->trans_lock);
  152. return 0;
  153. }
  154. /*
  155. * this does all the record keeping required to make sure that a reference
  156. * counted root is properly recorded in a given transaction. This is required
  157. * to make sure the old root from before we joined the transaction is deleted
  158. * when the transaction commits
  159. */
  160. static int record_root_in_trans(struct btrfs_trans_handle *trans,
  161. struct btrfs_root *root)
  162. {
  163. if (root->ref_cows && root->last_trans < trans->transid) {
  164. WARN_ON(root == root->fs_info->extent_root);
  165. WARN_ON(root->commit_root != root->node);
  166. /*
  167. * see below for in_trans_setup usage rules
  168. * we have the reloc mutex held now, so there
  169. * is only one writer in this function
  170. */
  171. root->in_trans_setup = 1;
  172. /* make sure readers find in_trans_setup before
  173. * they find our root->last_trans update
  174. */
  175. smp_wmb();
  176. spin_lock(&root->fs_info->fs_roots_radix_lock);
  177. if (root->last_trans == trans->transid) {
  178. spin_unlock(&root->fs_info->fs_roots_radix_lock);
  179. return 0;
  180. }
  181. radix_tree_tag_set(&root->fs_info->fs_roots_radix,
  182. (unsigned long)root->root_key.objectid,
  183. BTRFS_ROOT_TRANS_TAG);
  184. spin_unlock(&root->fs_info->fs_roots_radix_lock);
  185. root->last_trans = trans->transid;
  186. /* this is pretty tricky. We don't want to
  187. * take the relocation lock in btrfs_record_root_in_trans
  188. * unless we're really doing the first setup for this root in
  189. * this transaction.
  190. *
  191. * Normally we'd use root->last_trans as a flag to decide
  192. * if we want to take the expensive mutex.
  193. *
  194. * But, we have to set root->last_trans before we
  195. * init the relocation root, otherwise, we trip over warnings
  196. * in ctree.c. The solution used here is to flag ourselves
  197. * with root->in_trans_setup. When this is 1, we're still
  198. * fixing up the reloc trees and everyone must wait.
  199. *
  200. * When this is zero, they can trust root->last_trans and fly
  201. * through btrfs_record_root_in_trans without having to take the
  202. * lock. smp_wmb() makes sure that all the writes above are
  203. * done before we pop in the zero below
  204. */
  205. btrfs_init_reloc_root(trans, root);
  206. smp_wmb();
  207. root->in_trans_setup = 0;
  208. }
  209. return 0;
  210. }
  211. int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
  212. struct btrfs_root *root)
  213. {
  214. if (!root->ref_cows)
  215. return 0;
  216. /*
  217. * see record_root_in_trans for comments about in_trans_setup usage
  218. * and barriers
  219. */
  220. smp_rmb();
  221. if (root->last_trans == trans->transid &&
  222. !root->in_trans_setup)
  223. return 0;
  224. mutex_lock(&root->fs_info->reloc_mutex);
  225. record_root_in_trans(trans, root);
  226. mutex_unlock(&root->fs_info->reloc_mutex);
  227. return 0;
  228. }
  229. /* wait for commit against the current transaction to become unblocked
  230. * when this is done, it is safe to start a new transaction, but the current
  231. * transaction might not be fully on disk.
  232. */
  233. static void wait_current_trans(struct btrfs_root *root)
  234. {
  235. struct btrfs_transaction *cur_trans;
  236. spin_lock(&root->fs_info->trans_lock);
  237. cur_trans = root->fs_info->running_transaction;
  238. if (cur_trans && cur_trans->blocked) {
  239. atomic_inc(&cur_trans->use_count);
  240. spin_unlock(&root->fs_info->trans_lock);
  241. wait_event(root->fs_info->transaction_wait,
  242. !cur_trans->blocked);
  243. put_transaction(cur_trans);
  244. } else {
  245. spin_unlock(&root->fs_info->trans_lock);
  246. }
  247. }
  248. static int may_wait_transaction(struct btrfs_root *root, int type)
  249. {
  250. if (root->fs_info->log_root_recovering)
  251. return 0;
  252. if (type == TRANS_USERSPACE)
  253. return 1;
  254. if (type == TRANS_START &&
  255. !atomic_read(&root->fs_info->open_ioctl_trans))
  256. return 1;
  257. return 0;
  258. }
  259. static struct btrfs_trans_handle *
  260. start_transaction(struct btrfs_root *root, u64 num_items, int type,
  261. enum btrfs_reserve_flush_enum flush)
  262. {
  263. struct btrfs_trans_handle *h;
  264. struct btrfs_transaction *cur_trans;
  265. u64 num_bytes = 0;
  266. int ret;
  267. u64 qgroup_reserved = 0;
  268. if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
  269. return ERR_PTR(-EROFS);
  270. if (current->journal_info) {
  271. WARN_ON(type != TRANS_JOIN && type != TRANS_JOIN_NOLOCK);
  272. h = current->journal_info;
  273. h->use_count++;
  274. WARN_ON(h->use_count > 2);
  275. h->orig_rsv = h->block_rsv;
  276. h->block_rsv = NULL;
  277. goto got_it;
  278. }
  279. /*
  280. * Do the reservation before we join the transaction so we can do all
  281. * the appropriate flushing if need be.
  282. */
  283. if (num_items > 0 && root != root->fs_info->chunk_root) {
  284. if (root->fs_info->quota_enabled &&
  285. is_fstree(root->root_key.objectid)) {
  286. qgroup_reserved = num_items * root->leafsize;
  287. ret = btrfs_qgroup_reserve(root, qgroup_reserved);
  288. if (ret)
  289. return ERR_PTR(ret);
  290. }
  291. num_bytes = btrfs_calc_trans_metadata_size(root, num_items);
  292. ret = btrfs_block_rsv_add(root,
  293. &root->fs_info->trans_block_rsv,
  294. num_bytes, flush);
  295. if (ret)
  296. goto reserve_fail;
  297. }
  298. again:
  299. h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
  300. if (!h) {
  301. ret = -ENOMEM;
  302. goto alloc_fail;
  303. }
  304. /*
  305. * If we are JOIN_NOLOCK we're already committing a transaction and
  306. * waiting on this guy, so we don't need to do the sb_start_intwrite
  307. * because we're already holding a ref. We need this because we could
  308. * have raced in and did an fsync() on a file which can kick a commit
  309. * and then we deadlock with somebody doing a freeze.
  310. *
  311. * If we are ATTACH, it means we just want to catch the current
  312. * transaction and commit it, so we needn't do sb_start_intwrite().
  313. */
  314. if (type < TRANS_JOIN_NOLOCK)
  315. sb_start_intwrite(root->fs_info->sb);
  316. if (may_wait_transaction(root, type))
  317. wait_current_trans(root);
  318. do {
  319. ret = join_transaction(root, type);
  320. if (ret == -EBUSY)
  321. wait_current_trans(root);
  322. } while (ret == -EBUSY);
  323. if (ret < 0) {
  324. /* We must get the transaction if we are JOIN_NOLOCK. */
  325. BUG_ON(type == TRANS_JOIN_NOLOCK);
  326. goto join_fail;
  327. }
  328. cur_trans = root->fs_info->running_transaction;
  329. h->transid = cur_trans->transid;
  330. h->transaction = cur_trans;
  331. h->blocks_used = 0;
  332. h->bytes_reserved = 0;
  333. h->root = root;
  334. h->delayed_ref_updates = 0;
  335. h->use_count = 1;
  336. h->adding_csums = 0;
  337. h->block_rsv = NULL;
  338. h->orig_rsv = NULL;
  339. h->aborted = 0;
  340. h->qgroup_reserved = qgroup_reserved;
  341. h->delayed_ref_elem.seq = 0;
  342. h->type = type;
  343. INIT_LIST_HEAD(&h->qgroup_ref_list);
  344. INIT_LIST_HEAD(&h->new_bgs);
  345. smp_mb();
  346. if (cur_trans->blocked && may_wait_transaction(root, type)) {
  347. btrfs_commit_transaction(h, root);
  348. goto again;
  349. }
  350. if (num_bytes) {
  351. trace_btrfs_space_reservation(root->fs_info, "transaction",
  352. h->transid, num_bytes, 1);
  353. h->block_rsv = &root->fs_info->trans_block_rsv;
  354. h->bytes_reserved = num_bytes;
  355. }
  356. got_it:
  357. btrfs_record_root_in_trans(h, root);
  358. if (!current->journal_info && type != TRANS_USERSPACE)
  359. current->journal_info = h;
  360. return h;
  361. join_fail:
  362. if (type < TRANS_JOIN_NOLOCK)
  363. sb_end_intwrite(root->fs_info->sb);
  364. kmem_cache_free(btrfs_trans_handle_cachep, h);
  365. alloc_fail:
  366. if (num_bytes)
  367. btrfs_block_rsv_release(root, &root->fs_info->trans_block_rsv,
  368. num_bytes);
  369. reserve_fail:
  370. if (qgroup_reserved)
  371. btrfs_qgroup_free(root, qgroup_reserved);
  372. return ERR_PTR(ret);
  373. }
  374. struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
  375. int num_items)
  376. {
  377. return start_transaction(root, num_items, TRANS_START,
  378. BTRFS_RESERVE_FLUSH_ALL);
  379. }
  380. struct btrfs_trans_handle *btrfs_start_transaction_lflush(
  381. struct btrfs_root *root, int num_items)
  382. {
  383. return start_transaction(root, num_items, TRANS_START,
  384. BTRFS_RESERVE_FLUSH_LIMIT);
  385. }
  386. struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root)
  387. {
  388. return start_transaction(root, 0, TRANS_JOIN, 0);
  389. }
  390. struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root)
  391. {
  392. return start_transaction(root, 0, TRANS_JOIN_NOLOCK, 0);
  393. }
  394. struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root)
  395. {
  396. return start_transaction(root, 0, TRANS_USERSPACE, 0);
  397. }
  398. struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root)
  399. {
  400. return start_transaction(root, 0, TRANS_ATTACH, 0);
  401. }
  402. /* wait for a transaction commit to be fully complete */
  403. static noinline void wait_for_commit(struct btrfs_root *root,
  404. struct btrfs_transaction *commit)
  405. {
  406. wait_event(commit->commit_wait, commit->commit_done);
  407. }
  408. int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
  409. {
  410. struct btrfs_transaction *cur_trans = NULL, *t;
  411. int ret = 0;
  412. if (transid) {
  413. if (transid <= root->fs_info->last_trans_committed)
  414. goto out;
  415. ret = -EINVAL;
  416. /* find specified transaction */
  417. spin_lock(&root->fs_info->trans_lock);
  418. list_for_each_entry(t, &root->fs_info->trans_list, list) {
  419. if (t->transid == transid) {
  420. cur_trans = t;
  421. atomic_inc(&cur_trans->use_count);
  422. ret = 0;
  423. break;
  424. }
  425. if (t->transid > transid) {
  426. ret = 0;
  427. break;
  428. }
  429. }
  430. spin_unlock(&root->fs_info->trans_lock);
  431. /* The specified transaction doesn't exist */
  432. if (!cur_trans)
  433. goto out;
  434. } else {
  435. /* find newest transaction that is committing | committed */
  436. spin_lock(&root->fs_info->trans_lock);
  437. list_for_each_entry_reverse(t, &root->fs_info->trans_list,
  438. list) {
  439. if (t->in_commit) {
  440. if (t->commit_done)
  441. break;
  442. cur_trans = t;
  443. atomic_inc(&cur_trans->use_count);
  444. break;
  445. }
  446. }
  447. spin_unlock(&root->fs_info->trans_lock);
  448. if (!cur_trans)
  449. goto out; /* nothing committing|committed */
  450. }
  451. wait_for_commit(root, cur_trans);
  452. put_transaction(cur_trans);
  453. out:
  454. return ret;
  455. }
  456. void btrfs_throttle(struct btrfs_root *root)
  457. {
  458. if (!atomic_read(&root->fs_info->open_ioctl_trans))
  459. wait_current_trans(root);
  460. }
  461. static int should_end_transaction(struct btrfs_trans_handle *trans,
  462. struct btrfs_root *root)
  463. {
  464. int ret;
  465. ret = btrfs_block_rsv_check(root, &root->fs_info->global_block_rsv, 5);
  466. return ret ? 1 : 0;
  467. }
  468. int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
  469. struct btrfs_root *root)
  470. {
  471. struct btrfs_transaction *cur_trans = trans->transaction;
  472. int updates;
  473. int err;
  474. smp_mb();
  475. if (cur_trans->blocked || cur_trans->delayed_refs.flushing)
  476. return 1;
  477. updates = trans->delayed_ref_updates;
  478. trans->delayed_ref_updates = 0;
  479. if (updates) {
  480. err = btrfs_run_delayed_refs(trans, root, updates);
  481. if (err) /* Error code will also eval true */
  482. return err;
  483. }
  484. return should_end_transaction(trans, root);
  485. }
  486. static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
  487. struct btrfs_root *root, int throttle)
  488. {
  489. struct btrfs_transaction *cur_trans = trans->transaction;
  490. struct btrfs_fs_info *info = root->fs_info;
  491. int count = 0;
  492. int lock = (trans->type != TRANS_JOIN_NOLOCK);
  493. int err = 0;
  494. if (--trans->use_count) {
  495. trans->block_rsv = trans->orig_rsv;
  496. return 0;
  497. }
  498. /*
  499. * do the qgroup accounting as early as possible
  500. */
  501. err = btrfs_delayed_refs_qgroup_accounting(trans, info);
  502. btrfs_trans_release_metadata(trans, root);
  503. trans->block_rsv = NULL;
  504. /*
  505. * the same root has to be passed to start_transaction and
  506. * end_transaction. Subvolume quota depends on this.
  507. */
  508. WARN_ON(trans->root != root);
  509. if (trans->qgroup_reserved) {
  510. btrfs_qgroup_free(root, trans->qgroup_reserved);
  511. trans->qgroup_reserved = 0;
  512. }
  513. if (!list_empty(&trans->new_bgs))
  514. btrfs_create_pending_block_groups(trans, root);
  515. while (count < 2) {
  516. unsigned long cur = trans->delayed_ref_updates;
  517. trans->delayed_ref_updates = 0;
  518. if (cur &&
  519. trans->transaction->delayed_refs.num_heads_ready > 64) {
  520. trans->delayed_ref_updates = 0;
  521. btrfs_run_delayed_refs(trans, root, cur);
  522. } else {
  523. break;
  524. }
  525. count++;
  526. }
  527. btrfs_trans_release_metadata(trans, root);
  528. trans->block_rsv = NULL;
  529. if (!list_empty(&trans->new_bgs))
  530. btrfs_create_pending_block_groups(trans, root);
  531. if (lock && !atomic_read(&root->fs_info->open_ioctl_trans) &&
  532. should_end_transaction(trans, root)) {
  533. trans->transaction->blocked = 1;
  534. smp_wmb();
  535. }
  536. if (lock && cur_trans->blocked && !cur_trans->in_commit) {
  537. if (throttle) {
  538. /*
  539. * We may race with somebody else here so end up having
  540. * to call end_transaction on ourselves again, so inc
  541. * our use_count.
  542. */
  543. trans->use_count++;
  544. return btrfs_commit_transaction(trans, root);
  545. } else {
  546. wake_up_process(info->transaction_kthread);
  547. }
  548. }
  549. if (trans->type < TRANS_JOIN_NOLOCK)
  550. sb_end_intwrite(root->fs_info->sb);
  551. WARN_ON(cur_trans != info->running_transaction);
  552. WARN_ON(atomic_read(&cur_trans->num_writers) < 1);
  553. atomic_dec(&cur_trans->num_writers);
  554. smp_mb();
  555. if (waitqueue_active(&cur_trans->writer_wait))
  556. wake_up(&cur_trans->writer_wait);
  557. put_transaction(cur_trans);
  558. if (current->journal_info == trans)
  559. current->journal_info = NULL;
  560. if (throttle)
  561. btrfs_run_delayed_iputs(root);
  562. if (trans->aborted ||
  563. root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
  564. err = -EIO;
  565. }
  566. assert_qgroups_uptodate(trans);
  567. kmem_cache_free(btrfs_trans_handle_cachep, trans);
  568. return err;
  569. }
  570. int btrfs_end_transaction(struct btrfs_trans_handle *trans,
  571. struct btrfs_root *root)
  572. {
  573. int ret;
  574. ret = __btrfs_end_transaction(trans, root, 0);
  575. if (ret)
  576. return ret;
  577. return 0;
  578. }
  579. int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
  580. struct btrfs_root *root)
  581. {
  582. int ret;
  583. ret = __btrfs_end_transaction(trans, root, 1);
  584. if (ret)
  585. return ret;
  586. return 0;
  587. }
  588. int btrfs_end_transaction_dmeta(struct btrfs_trans_handle *trans,
  589. struct btrfs_root *root)
  590. {
  591. return __btrfs_end_transaction(trans, root, 1);
  592. }
  593. /*
  594. * when btree blocks are allocated, they have some corresponding bits set for
  595. * them in one of two extent_io trees. This is used to make sure all of
  596. * those extents are sent to disk but does not wait on them
  597. */
  598. int btrfs_write_marked_extents(struct btrfs_root *root,
  599. struct extent_io_tree *dirty_pages, int mark)
  600. {
  601. int err = 0;
  602. int werr = 0;
  603. struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
  604. struct extent_state *cached_state = NULL;
  605. u64 start = 0;
  606. u64 end;
  607. while (!find_first_extent_bit(dirty_pages, start, &start, &end,
  608. mark, &cached_state)) {
  609. convert_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT,
  610. mark, &cached_state, GFP_NOFS);
  611. cached_state = NULL;
  612. err = filemap_fdatawrite_range(mapping, start, end);
  613. if (err)
  614. werr = err;
  615. cond_resched();
  616. start = end + 1;
  617. }
  618. if (err)
  619. werr = err;
  620. return werr;
  621. }
  622. /*
  623. * when btree blocks are allocated, they have some corresponding bits set for
  624. * them in one of two extent_io trees. This is used to make sure all of
  625. * those extents are on disk for transaction or log commit. We wait
  626. * on all the pages and clear them from the dirty pages state tree
  627. */
  628. int btrfs_wait_marked_extents(struct btrfs_root *root,
  629. struct extent_io_tree *dirty_pages, int mark)
  630. {
  631. int err = 0;
  632. int werr = 0;
  633. struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
  634. struct extent_state *cached_state = NULL;
  635. u64 start = 0;
  636. u64 end;
  637. while (!find_first_extent_bit(dirty_pages, start, &start, &end,
  638. EXTENT_NEED_WAIT, &cached_state)) {
  639. clear_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT,
  640. 0, 0, &cached_state, GFP_NOFS);
  641. err = filemap_fdatawait_range(mapping, start, end);
  642. if (err)
  643. werr = err;
  644. cond_resched();
  645. start = end + 1;
  646. }
  647. if (err)
  648. werr = err;
  649. return werr;
  650. }
  651. /*
  652. * when btree blocks are allocated, they have some corresponding bits set for
  653. * them in one of two extent_io trees. This is used to make sure all of
  654. * those extents are on disk for transaction or log commit
  655. */
  656. int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
  657. struct extent_io_tree *dirty_pages, int mark)
  658. {
  659. int ret;
  660. int ret2;
  661. ret = btrfs_write_marked_extents(root, dirty_pages, mark);
  662. ret2 = btrfs_wait_marked_extents(root, dirty_pages, mark);
  663. if (ret)
  664. return ret;
  665. if (ret2)
  666. return ret2;
  667. return 0;
  668. }
  669. int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
  670. struct btrfs_root *root)
  671. {
  672. if (!trans || !trans->transaction) {
  673. struct inode *btree_inode;
  674. btree_inode = root->fs_info->btree_inode;
  675. return filemap_write_and_wait(btree_inode->i_mapping);
  676. }
  677. return btrfs_write_and_wait_marked_extents(root,
  678. &trans->transaction->dirty_pages,
  679. EXTENT_DIRTY);
  680. }
  681. /*
  682. * this is used to update the root pointer in the tree of tree roots.
  683. *
  684. * But, in the case of the extent allocation tree, updating the root
  685. * pointer may allocate blocks which may change the root of the extent
  686. * allocation tree.
  687. *
  688. * So, this loops and repeats and makes sure the cowonly root didn't
  689. * change while the root pointer was being updated in the metadata.
  690. */
  691. static int update_cowonly_root(struct btrfs_trans_handle *trans,
  692. struct btrfs_root *root)
  693. {
  694. int ret;
  695. u64 old_root_bytenr;
  696. u64 old_root_used;
  697. struct btrfs_root *tree_root = root->fs_info->tree_root;
  698. old_root_used = btrfs_root_used(&root->root_item);
  699. btrfs_write_dirty_block_groups(trans, root);
  700. while (1) {
  701. old_root_bytenr = btrfs_root_bytenr(&root->root_item);
  702. if (old_root_bytenr == root->node->start &&
  703. old_root_used == btrfs_root_used(&root->root_item))
  704. break;
  705. btrfs_set_root_node(&root->root_item, root->node);
  706. ret = btrfs_update_root(trans, tree_root,
  707. &root->root_key,
  708. &root->root_item);
  709. if (ret)
  710. return ret;
  711. old_root_used = btrfs_root_used(&root->root_item);
  712. ret = btrfs_write_dirty_block_groups(trans, root);
  713. if (ret)
  714. return ret;
  715. }
  716. if (root != root->fs_info->extent_root)
  717. switch_commit_root(root);
  718. return 0;
  719. }
  720. /*
  721. * update all the cowonly tree roots on disk
  722. *
  723. * The error handling in this function may not be obvious. Any of the
  724. * failures will cause the file system to go offline. We still need
  725. * to clean up the delayed refs.
  726. */
  727. static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
  728. struct btrfs_root *root)
  729. {
  730. struct btrfs_fs_info *fs_info = root->fs_info;
  731. struct list_head *next;
  732. struct extent_buffer *eb;
  733. int ret;
  734. ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
  735. if (ret)
  736. return ret;
  737. eb = btrfs_lock_root_node(fs_info->tree_root);
  738. ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL,
  739. 0, &eb);
  740. btrfs_tree_unlock(eb);
  741. free_extent_buffer(eb);
  742. if (ret)
  743. return ret;
  744. ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
  745. if (ret)
  746. return ret;
  747. ret = btrfs_run_dev_stats(trans, root->fs_info);
  748. WARN_ON(ret);
  749. ret = btrfs_run_dev_replace(trans, root->fs_info);
  750. WARN_ON(ret);
  751. ret = btrfs_run_qgroups(trans, root->fs_info);
  752. BUG_ON(ret);
  753. /* run_qgroups might have added some more refs */
  754. ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
  755. BUG_ON(ret);
  756. while (!list_empty(&fs_info->dirty_cowonly_roots)) {
  757. next = fs_info->dirty_cowonly_roots.next;
  758. list_del_init(next);
  759. root = list_entry(next, struct btrfs_root, dirty_list);
  760. ret = update_cowonly_root(trans, root);
  761. if (ret)
  762. return ret;
  763. }
  764. down_write(&fs_info->extent_commit_sem);
  765. switch_commit_root(fs_info->extent_root);
  766. up_write(&fs_info->extent_commit_sem);
  767. btrfs_after_dev_replace_commit(fs_info);
  768. return 0;
  769. }
  770. /*
  771. * dead roots are old snapshots that need to be deleted. This allocates
  772. * a dirty root struct and adds it into the list of dead roots that need to
  773. * be deleted
  774. */
  775. int btrfs_add_dead_root(struct btrfs_root *root)
  776. {
  777. spin_lock(&root->fs_info->trans_lock);
  778. list_add(&root->root_list, &root->fs_info->dead_roots);
  779. spin_unlock(&root->fs_info->trans_lock);
  780. return 0;
  781. }
  782. /*
  783. * update all the cowonly tree roots on disk
  784. */
  785. static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
  786. struct btrfs_root *root)
  787. {
  788. struct btrfs_root *gang[8];
  789. struct btrfs_fs_info *fs_info = root->fs_info;
  790. int i;
  791. int ret;
  792. int err = 0;
  793. spin_lock(&fs_info->fs_roots_radix_lock);
  794. while (1) {
  795. ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
  796. (void **)gang, 0,
  797. ARRAY_SIZE(gang),
  798. BTRFS_ROOT_TRANS_TAG);
  799. if (ret == 0)
  800. break;
  801. for (i = 0; i < ret; i++) {
  802. root = gang[i];
  803. radix_tree_tag_clear(&fs_info->fs_roots_radix,
  804. (unsigned long)root->root_key.objectid,
  805. BTRFS_ROOT_TRANS_TAG);
  806. spin_unlock(&fs_info->fs_roots_radix_lock);
  807. btrfs_free_log(trans, root);
  808. btrfs_update_reloc_root(trans, root);
  809. btrfs_orphan_commit_root(trans, root);
  810. btrfs_save_ino_cache(root, trans);
  811. /* see comments in should_cow_block() */
  812. root->force_cow = 0;
  813. smp_wmb();
  814. if (root->commit_root != root->node) {
  815. mutex_lock(&root->fs_commit_mutex);
  816. switch_commit_root(root);
  817. btrfs_unpin_free_ino(root);
  818. mutex_unlock(&root->fs_commit_mutex);
  819. btrfs_set_root_node(&root->root_item,
  820. root->node);
  821. }
  822. err = btrfs_update_root(trans, fs_info->tree_root,
  823. &root->root_key,
  824. &root->root_item);
  825. spin_lock(&fs_info->fs_roots_radix_lock);
  826. if (err)
  827. break;
  828. }
  829. }
  830. spin_unlock(&fs_info->fs_roots_radix_lock);
  831. return err;
  832. }
  833. /*
  834. * defrag a given btree. If cacheonly == 1, this won't read from the disk,
  835. * otherwise every leaf in the btree is read and defragged.
  836. */
  837. int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
  838. {
  839. struct btrfs_fs_info *info = root->fs_info;
  840. struct btrfs_trans_handle *trans;
  841. int ret;
  842. if (xchg(&root->defrag_running, 1))
  843. return 0;
  844. while (1) {
  845. trans = btrfs_start_transaction(root, 0);
  846. if (IS_ERR(trans))
  847. return PTR_ERR(trans);
  848. ret = btrfs_defrag_leaves(trans, root, cacheonly);
  849. btrfs_end_transaction(trans, root);
  850. btrfs_btree_balance_dirty(info->tree_root);
  851. cond_resched();
  852. if (btrfs_fs_closing(root->fs_info) || ret != -EAGAIN)
  853. break;
  854. }
  855. root->defrag_running = 0;
  856. return ret;
  857. }
  858. /*
  859. * new snapshots need to be created at a very specific time in the
  860. * transaction commit. This does the actual creation
  861. */
  862. static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
  863. struct btrfs_fs_info *fs_info,
  864. struct btrfs_pending_snapshot *pending)
  865. {
  866. struct btrfs_key key;
  867. struct btrfs_root_item *new_root_item;
  868. struct btrfs_root *tree_root = fs_info->tree_root;
  869. struct btrfs_root *root = pending->root;
  870. struct btrfs_root *parent_root;
  871. struct btrfs_block_rsv *rsv;
  872. struct inode *parent_inode;
  873. struct btrfs_path *path;
  874. struct btrfs_dir_item *dir_item;
  875. struct dentry *parent;
  876. struct dentry *dentry;
  877. struct extent_buffer *tmp;
  878. struct extent_buffer *old;
  879. struct timespec cur_time = CURRENT_TIME;
  880. int ret;
  881. u64 to_reserve = 0;
  882. u64 index = 0;
  883. u64 objectid;
  884. u64 root_flags;
  885. uuid_le new_uuid;
  886. path = btrfs_alloc_path();
  887. if (!path) {
  888. ret = pending->error = -ENOMEM;
  889. goto path_alloc_fail;
  890. }
  891. new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS);
  892. if (!new_root_item) {
  893. ret = pending->error = -ENOMEM;
  894. goto root_item_alloc_fail;
  895. }
  896. ret = btrfs_find_free_objectid(tree_root, &objectid);
  897. if (ret) {
  898. pending->error = ret;
  899. goto no_free_objectid;
  900. }
  901. btrfs_reloc_pre_snapshot(trans, pending, &to_reserve);
  902. if (to_reserve > 0) {
  903. ret = btrfs_block_rsv_add(root, &pending->block_rsv,
  904. to_reserve,
  905. BTRFS_RESERVE_NO_FLUSH);
  906. if (ret) {
  907. pending->error = ret;
  908. goto no_free_objectid;
  909. }
  910. }
  911. ret = btrfs_qgroup_inherit(trans, fs_info, root->root_key.objectid,
  912. objectid, pending->inherit);
  913. if (ret) {
  914. pending->error = ret;
  915. goto no_free_objectid;
  916. }
  917. key.objectid = objectid;
  918. key.offset = (u64)-1;
  919. key.type = BTRFS_ROOT_ITEM_KEY;
  920. rsv = trans->block_rsv;
  921. trans->block_rsv = &pending->block_rsv;
  922. dentry = pending->dentry;
  923. parent = dget_parent(dentry);
  924. parent_inode = parent->d_inode;
  925. parent_root = BTRFS_I(parent_inode)->root;
  926. record_root_in_trans(trans, parent_root);
  927. /*
  928. * insert the directory item
  929. */
  930. ret = btrfs_set_inode_index(parent_inode, &index);
  931. BUG_ON(ret); /* -ENOMEM */
  932. /* check if there is a file/dir which has the same name. */
  933. dir_item = btrfs_lookup_dir_item(NULL, parent_root, path,
  934. btrfs_ino(parent_inode),
  935. dentry->d_name.name,
  936. dentry->d_name.len, 0);
  937. if (dir_item != NULL && !IS_ERR(dir_item)) {
  938. pending->error = -EEXIST;
  939. goto fail;
  940. } else if (IS_ERR(dir_item)) {
  941. ret = PTR_ERR(dir_item);
  942. btrfs_abort_transaction(trans, root, ret);
  943. goto fail;
  944. }
  945. btrfs_release_path(path);
  946. /*
  947. * pull in the delayed directory update
  948. * and the delayed inode item
  949. * otherwise we corrupt the FS during
  950. * snapshot
  951. */
  952. ret = btrfs_run_delayed_items(trans, root);
  953. if (ret) { /* Transaction aborted */
  954. btrfs_abort_transaction(trans, root, ret);
  955. goto fail;
  956. }
  957. record_root_in_trans(trans, root);
  958. btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
  959. memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
  960. btrfs_check_and_init_root_item(new_root_item);
  961. root_flags = btrfs_root_flags(new_root_item);
  962. if (pending->readonly)
  963. root_flags |= BTRFS_ROOT_SUBVOL_RDONLY;
  964. else
  965. root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY;
  966. btrfs_set_root_flags(new_root_item, root_flags);
  967. btrfs_set_root_generation_v2(new_root_item,
  968. trans->transid);
  969. uuid_le_gen(&new_uuid);
  970. memcpy(new_root_item->uuid, new_uuid.b, BTRFS_UUID_SIZE);
  971. memcpy(new_root_item->parent_uuid, root->root_item.uuid,
  972. BTRFS_UUID_SIZE);
  973. new_root_item->otime.sec = cpu_to_le64(cur_time.tv_sec);
  974. new_root_item->otime.nsec = cpu_to_le32(cur_time.tv_nsec);
  975. btrfs_set_root_otransid(new_root_item, trans->transid);
  976. memset(&new_root_item->stime, 0, sizeof(new_root_item->stime));
  977. memset(&new_root_item->rtime, 0, sizeof(new_root_item->rtime));
  978. btrfs_set_root_stransid(new_root_item, 0);
  979. btrfs_set_root_rtransid(new_root_item, 0);
  980. old = btrfs_lock_root_node(root);
  981. ret = btrfs_cow_block(trans, root, old, NULL, 0, &old);
  982. if (ret) {
  983. btrfs_tree_unlock(old);
  984. free_extent_buffer(old);
  985. btrfs_abort_transaction(trans, root, ret);
  986. goto fail;
  987. }
  988. btrfs_set_lock_blocking(old);
  989. ret = btrfs_copy_root(trans, root, old, &tmp, objectid);
  990. /* clean up in any case */
  991. btrfs_tree_unlock(old);
  992. free_extent_buffer(old);
  993. if (ret) {
  994. btrfs_abort_transaction(trans, root, ret);
  995. goto fail;
  996. }
  997. /* see comments in should_cow_block() */
  998. root->force_cow = 1;
  999. smp_wmb();
  1000. btrfs_set_root_node(new_root_item, tmp);
  1001. /* record when the snapshot was created in key.offset */
  1002. key.offset = trans->transid;
  1003. ret = btrfs_insert_root(trans, tree_root, &key, new_root_item);
  1004. btrfs_tree_unlock(tmp);
  1005. free_extent_buffer(tmp);
  1006. if (ret) {
  1007. btrfs_abort_transaction(trans, root, ret);
  1008. goto fail;
  1009. }
  1010. /*
  1011. * insert root back/forward references
  1012. */
  1013. ret = btrfs_add_root_ref(trans, tree_root, objectid,
  1014. parent_root->root_key.objectid,
  1015. btrfs_ino(parent_inode), index,
  1016. dentry->d_name.name, dentry->d_name.len);
  1017. if (ret) {
  1018. btrfs_abort_transaction(trans, root, ret);
  1019. goto fail;
  1020. }
  1021. key.offset = (u64)-1;
  1022. pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key);
  1023. if (IS_ERR(pending->snap)) {
  1024. ret = PTR_ERR(pending->snap);
  1025. btrfs_abort_transaction(trans, root, ret);
  1026. goto fail;
  1027. }
  1028. ret = btrfs_reloc_post_snapshot(trans, pending);
  1029. if (ret) {
  1030. btrfs_abort_transaction(trans, root, ret);
  1031. goto fail;
  1032. }
  1033. ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
  1034. if (ret) {
  1035. btrfs_abort_transaction(trans, root, ret);
  1036. goto fail;
  1037. }
  1038. ret = btrfs_insert_dir_item(trans, parent_root,
  1039. dentry->d_name.name, dentry->d_name.len,
  1040. parent_inode, &key,
  1041. BTRFS_FT_DIR, index);
  1042. /* We have check then name at the beginning, so it is impossible. */
  1043. BUG_ON(ret == -EEXIST || ret == -EOVERFLOW);
  1044. if (ret) {
  1045. btrfs_abort_transaction(trans, root, ret);
  1046. goto fail;
  1047. }
  1048. btrfs_i_size_write(parent_inode, parent_inode->i_size +
  1049. dentry->d_name.len * 2);
  1050. parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
  1051. ret = btrfs_update_inode_fallback(trans, parent_root, parent_inode);
  1052. if (ret)
  1053. btrfs_abort_transaction(trans, root, ret);
  1054. fail:
  1055. dput(parent);
  1056. trans->block_rsv = rsv;
  1057. no_free_objectid:
  1058. kfree(new_root_item);
  1059. root_item_alloc_fail:
  1060. btrfs_free_path(path);
  1061. path_alloc_fail:
  1062. btrfs_block_rsv_release(root, &pending->block_rsv, (u64)-1);
  1063. return ret;
  1064. }
  1065. /*
  1066. * create all the snapshots we've scheduled for creation
  1067. */
  1068. static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
  1069. struct btrfs_fs_info *fs_info)
  1070. {
  1071. struct btrfs_pending_snapshot *pending;
  1072. struct list_head *head = &trans->transaction->pending_snapshots;
  1073. list_for_each_entry(pending, head, list)
  1074. create_pending_snapshot(trans, fs_info, pending);
  1075. return 0;
  1076. }
  1077. static void update_super_roots(struct btrfs_root *root)
  1078. {
  1079. struct btrfs_root_item *root_item;
  1080. struct btrfs_super_block *super;
  1081. super = root->fs_info->super_copy;
  1082. root_item = &root->fs_info->chunk_root->root_item;
  1083. super->chunk_root = root_item->bytenr;
  1084. super->chunk_root_generation = root_item->generation;
  1085. super->chunk_root_level = root_item->level;
  1086. root_item = &root->fs_info->tree_root->root_item;
  1087. super->root = root_item->bytenr;
  1088. super->generation = root_item->generation;
  1089. super->root_level = root_item->level;
  1090. if (btrfs_test_opt(root, SPACE_CACHE))
  1091. super->cache_generation = root_item->generation;
  1092. }
  1093. int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
  1094. {
  1095. int ret = 0;
  1096. spin_lock(&info->trans_lock);
  1097. if (info->running_transaction)
  1098. ret = info->running_transaction->in_commit;
  1099. spin_unlock(&info->trans_lock);
  1100. return ret;
  1101. }
  1102. int btrfs_transaction_blocked(struct btrfs_fs_info *info)
  1103. {
  1104. int ret = 0;
  1105. spin_lock(&info->trans_lock);
  1106. if (info->running_transaction)
  1107. ret = info->running_transaction->blocked;
  1108. spin_unlock(&info->trans_lock);
  1109. return ret;
  1110. }
  1111. /*
  1112. * wait for the current transaction commit to start and block subsequent
  1113. * transaction joins
  1114. */
  1115. static void wait_current_trans_commit_start(struct btrfs_root *root,
  1116. struct btrfs_transaction *trans)
  1117. {
  1118. wait_event(root->fs_info->transaction_blocked_wait, trans->in_commit);
  1119. }
  1120. /*
  1121. * wait for the current transaction to start and then become unblocked.
  1122. * caller holds ref.
  1123. */
  1124. static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root,
  1125. struct btrfs_transaction *trans)
  1126. {
  1127. wait_event(root->fs_info->transaction_wait,
  1128. trans->commit_done || (trans->in_commit && !trans->blocked));
  1129. }
  1130. /*
  1131. * commit transactions asynchronously. once btrfs_commit_transaction_async
  1132. * returns, any subsequent transaction will not be allowed to join.
  1133. */
  1134. struct btrfs_async_commit {
  1135. struct btrfs_trans_handle *newtrans;
  1136. struct btrfs_root *root;
  1137. struct delayed_work work;
  1138. };
  1139. static void do_async_commit(struct work_struct *work)
  1140. {
  1141. struct btrfs_async_commit *ac =
  1142. container_of(work, struct btrfs_async_commit, work.work);
  1143. /*
  1144. * We've got freeze protection passed with the transaction.
  1145. * Tell lockdep about it.
  1146. */
  1147. if (ac->newtrans->type < TRANS_JOIN_NOLOCK)
  1148. rwsem_acquire_read(
  1149. &ac->root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1],
  1150. 0, 1, _THIS_IP_);
  1151. current->journal_info = ac->newtrans;
  1152. btrfs_commit_transaction(ac->newtrans, ac->root);
  1153. kfree(ac);
  1154. }
  1155. int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
  1156. struct btrfs_root *root,
  1157. int wait_for_unblock)
  1158. {
  1159. struct btrfs_async_commit *ac;
  1160. struct btrfs_transaction *cur_trans;
  1161. ac = kmalloc(sizeof(*ac), GFP_NOFS);
  1162. if (!ac)
  1163. return -ENOMEM;
  1164. INIT_DELAYED_WORK(&ac->work, do_async_commit);
  1165. ac->root = root;
  1166. ac->newtrans = btrfs_join_transaction(root);
  1167. if (IS_ERR(ac->newtrans)) {
  1168. int err = PTR_ERR(ac->newtrans);
  1169. kfree(ac);
  1170. return err;
  1171. }
  1172. /* take transaction reference */
  1173. cur_trans = trans->transaction;
  1174. atomic_inc(&cur_trans->use_count);
  1175. btrfs_end_transaction(trans, root);
  1176. /*
  1177. * Tell lockdep we've released the freeze rwsem, since the
  1178. * async commit thread will be the one to unlock it.
  1179. */
  1180. if (trans->type < TRANS_JOIN_NOLOCK)
  1181. rwsem_release(
  1182. &root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1],
  1183. 1, _THIS_IP_);
  1184. schedule_delayed_work(&ac->work, 0);
  1185. /* wait for transaction to start and unblock */
  1186. if (wait_for_unblock)
  1187. wait_current_trans_commit_start_and_unblock(root, cur_trans);
  1188. else
  1189. wait_current_trans_commit_start(root, cur_trans);
  1190. if (current->journal_info == trans)
  1191. current->journal_info = NULL;
  1192. put_transaction(cur_trans);
  1193. return 0;
  1194. }
  1195. static void cleanup_transaction(struct btrfs_trans_handle *trans,
  1196. struct btrfs_root *root, int err)
  1197. {
  1198. struct btrfs_transaction *cur_trans = trans->transaction;
  1199. WARN_ON(trans->use_count > 1);
  1200. btrfs_abort_transaction(trans, root, err);
  1201. spin_lock(&root->fs_info->trans_lock);
  1202. list_del_init(&cur_trans->list);
  1203. if (cur_trans == root->fs_info->running_transaction) {
  1204. root->fs_info->running_transaction = NULL;
  1205. root->fs_info->trans_no_join = 0;
  1206. }
  1207. spin_unlock(&root->fs_info->trans_lock);
  1208. btrfs_cleanup_one_transaction(trans->transaction, root);
  1209. put_transaction(cur_trans);
  1210. put_transaction(cur_trans);
  1211. trace_btrfs_transaction_commit(root);
  1212. btrfs_scrub_continue(root);
  1213. if (current->journal_info == trans)
  1214. current->journal_info = NULL;
  1215. kmem_cache_free(btrfs_trans_handle_cachep, trans);
  1216. }
  1217. static int btrfs_flush_all_pending_stuffs(struct btrfs_trans_handle *trans,
  1218. struct btrfs_root *root)
  1219. {
  1220. int flush_on_commit = btrfs_test_opt(root, FLUSHONCOMMIT);
  1221. int snap_pending = 0;
  1222. int ret;
  1223. if (!flush_on_commit) {
  1224. spin_lock(&root->fs_info->trans_lock);
  1225. if (!list_empty(&trans->transaction->pending_snapshots))
  1226. snap_pending = 1;
  1227. spin_unlock(&root->fs_info->trans_lock);
  1228. }
  1229. if (flush_on_commit || snap_pending) {
  1230. btrfs_start_delalloc_inodes(root, 1);
  1231. btrfs_wait_ordered_extents(root, 1);
  1232. }
  1233. ret = btrfs_run_delayed_items(trans, root);
  1234. if (ret)
  1235. return ret;
  1236. /*
  1237. * running the delayed items may have added new refs. account
  1238. * them now so that they hinder processing of more delayed refs
  1239. * as little as possible.
  1240. */
  1241. btrfs_delayed_refs_qgroup_accounting(trans, root->fs_info);
  1242. /*
  1243. * rename don't use btrfs_join_transaction, so, once we
  1244. * set the transaction to blocked above, we aren't going
  1245. * to get any new ordered operations. We can safely run
  1246. * it here and no for sure that nothing new will be added
  1247. * to the list
  1248. */
  1249. btrfs_run_ordered_operations(root, 1);
  1250. return 0;
  1251. }
  1252. /*
  1253. * btrfs_transaction state sequence:
  1254. * in_commit = 0, blocked = 0 (initial)
  1255. * in_commit = 1, blocked = 1
  1256. * blocked = 0
  1257. * commit_done = 1
  1258. */
  1259. int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
  1260. struct btrfs_root *root)
  1261. {
  1262. unsigned long joined = 0;
  1263. struct btrfs_transaction *cur_trans = trans->transaction;
  1264. struct btrfs_transaction *prev_trans = NULL;
  1265. DEFINE_WAIT(wait);
  1266. int ret;
  1267. int should_grow = 0;
  1268. unsigned long now = get_seconds();
  1269. ret = btrfs_run_ordered_operations(root, 0);
  1270. if (ret) {
  1271. btrfs_abort_transaction(trans, root, ret);
  1272. goto cleanup_transaction;
  1273. }
  1274. /* Stop the commit early if ->aborted is set */
  1275. if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
  1276. ret = cur_trans->aborted;
  1277. goto cleanup_transaction;
  1278. }
  1279. /* make a pass through all the delayed refs we have so far
  1280. * any runnings procs may add more while we are here
  1281. */
  1282. ret = btrfs_run_delayed_refs(trans, root, 0);
  1283. if (ret)
  1284. goto cleanup_transaction;
  1285. btrfs_trans_release_metadata(trans, root);
  1286. trans->block_rsv = NULL;
  1287. cur_trans = trans->transaction;
  1288. /*
  1289. * set the flushing flag so procs in this transaction have to
  1290. * start sending their work down.
  1291. */
  1292. cur_trans->delayed_refs.flushing = 1;
  1293. if (!list_empty(&trans->new_bgs))
  1294. btrfs_create_pending_block_groups(trans, root);
  1295. ret = btrfs_run_delayed_refs(trans, root, 0);
  1296. if (ret)
  1297. goto cleanup_transaction;
  1298. spin_lock(&cur_trans->commit_lock);
  1299. if (cur_trans->in_commit) {
  1300. spin_unlock(&cur_trans->commit_lock);
  1301. atomic_inc(&cur_trans->use_count);
  1302. ret = btrfs_end_transaction(trans, root);
  1303. wait_for_commit(root, cur_trans);
  1304. put_transaction(cur_trans);
  1305. return ret;
  1306. }
  1307. trans->transaction->in_commit = 1;
  1308. trans->transaction->blocked = 1;
  1309. spin_unlock(&cur_trans->commit_lock);
  1310. wake_up(&root->fs_info->transaction_blocked_wait);
  1311. spin_lock(&root->fs_info->trans_lock);
  1312. if (cur_trans->list.prev != &root->fs_info->trans_list) {
  1313. prev_trans = list_entry(cur_trans->list.prev,
  1314. struct btrfs_transaction, list);
  1315. if (!prev_trans->commit_done) {
  1316. atomic_inc(&prev_trans->use_count);
  1317. spin_unlock(&root->fs_info->trans_lock);
  1318. wait_for_commit(root, prev_trans);
  1319. put_transaction(prev_trans);
  1320. } else {
  1321. spin_unlock(&root->fs_info->trans_lock);
  1322. }
  1323. } else {
  1324. spin_unlock(&root->fs_info->trans_lock);
  1325. }
  1326. if (!btrfs_test_opt(root, SSD) &&
  1327. (now < cur_trans->start_time || now - cur_trans->start_time < 1))
  1328. should_grow = 1;
  1329. do {
  1330. joined = cur_trans->num_joined;
  1331. WARN_ON(cur_trans != trans->transaction);
  1332. ret = btrfs_flush_all_pending_stuffs(trans, root);
  1333. if (ret)
  1334. goto cleanup_transaction;
  1335. prepare_to_wait(&cur_trans->writer_wait, &wait,
  1336. TASK_UNINTERRUPTIBLE);
  1337. if (atomic_read(&cur_trans->num_writers) > 1)
  1338. schedule_timeout(MAX_SCHEDULE_TIMEOUT);
  1339. else if (should_grow)
  1340. schedule_timeout(1);
  1341. finish_wait(&cur_trans->writer_wait, &wait);
  1342. } while (atomic_read(&cur_trans->num_writers) > 1 ||
  1343. (should_grow && cur_trans->num_joined != joined));
  1344. ret = btrfs_flush_all_pending_stuffs(trans, root);
  1345. if (ret)
  1346. goto cleanup_transaction;
  1347. /*
  1348. * Ok now we need to make sure to block out any other joins while we
  1349. * commit the transaction. We could have started a join before setting
  1350. * no_join so make sure to wait for num_writers to == 1 again.
  1351. */
  1352. spin_lock(&root->fs_info->trans_lock);
  1353. root->fs_info->trans_no_join = 1;
  1354. spin_unlock(&root->fs_info->trans_lock);
  1355. wait_event(cur_trans->writer_wait,
  1356. atomic_read(&cur_trans->num_writers) == 1);
  1357. /* ->aborted might be set after the previous check, so check it */
  1358. if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
  1359. ret = cur_trans->aborted;
  1360. goto cleanup_transaction;
  1361. }
  1362. /*
  1363. * the reloc mutex makes sure that we stop
  1364. * the balancing code from coming in and moving
  1365. * extents around in the middle of the commit
  1366. */
  1367. mutex_lock(&root->fs_info->reloc_mutex);
  1368. /*
  1369. * We needn't worry about the delayed items because we will
  1370. * deal with them in create_pending_snapshot(), which is the
  1371. * core function of the snapshot creation.
  1372. */
  1373. ret = create_pending_snapshots(trans, root->fs_info);
  1374. if (ret) {
  1375. mutex_unlock(&root->fs_info->reloc_mutex);
  1376. goto cleanup_transaction;
  1377. }
  1378. /*
  1379. * We insert the dir indexes of the snapshots and update the inode
  1380. * of the snapshots' parents after the snapshot creation, so there
  1381. * are some delayed items which are not dealt with. Now deal with
  1382. * them.
  1383. *
  1384. * We needn't worry that this operation will corrupt the snapshots,
  1385. * because all the tree which are snapshoted will be forced to COW
  1386. * the nodes and leaves.
  1387. */
  1388. ret = btrfs_run_delayed_items(trans, root);
  1389. if (ret) {
  1390. mutex_unlock(&root->fs_info->reloc_mutex);
  1391. goto cleanup_transaction;
  1392. }
  1393. ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
  1394. if (ret) {
  1395. mutex_unlock(&root->fs_info->reloc_mutex);
  1396. goto cleanup_transaction;
  1397. }
  1398. /*
  1399. * make sure none of the code above managed to slip in a
  1400. * delayed item
  1401. */
  1402. btrfs_assert_delayed_root_empty(root);
  1403. WARN_ON(cur_trans != trans->transaction);
  1404. btrfs_scrub_pause(root);
  1405. /* btrfs_commit_tree_roots is responsible for getting the
  1406. * various roots consistent with each other. Every pointer
  1407. * in the tree of tree roots has to point to the most up to date
  1408. * root for every subvolume and other tree. So, we have to keep
  1409. * the tree logging code from jumping in and changing any
  1410. * of the trees.
  1411. *
  1412. * At this point in the commit, there can't be any tree-log
  1413. * writers, but a little lower down we drop the trans mutex
  1414. * and let new people in. By holding the tree_log_mutex
  1415. * from now until after the super is written, we avoid races
  1416. * with the tree-log code.
  1417. */
  1418. mutex_lock(&root->fs_info->tree_log_mutex);
  1419. ret = commit_fs_roots(trans, root);
  1420. if (ret) {
  1421. mutex_unlock(&root->fs_info->tree_log_mutex);
  1422. mutex_unlock(&root->fs_info->reloc_mutex);
  1423. goto cleanup_transaction;
  1424. }
  1425. /* commit_fs_roots gets rid of all the tree log roots, it is now
  1426. * safe to free the root of tree log roots
  1427. */
  1428. btrfs_free_log_root_tree(trans, root->fs_info);
  1429. ret = commit_cowonly_roots(trans, root);
  1430. if (ret) {
  1431. mutex_unlock(&root->fs_info->tree_log_mutex);
  1432. mutex_unlock(&root->fs_info->reloc_mutex);
  1433. goto cleanup_transaction;
  1434. }
  1435. /*
  1436. * The tasks which save the space cache and inode cache may also
  1437. * update ->aborted, check it.
  1438. */
  1439. if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
  1440. ret = cur_trans->aborted;
  1441. mutex_unlock(&root->fs_info->tree_log_mutex);
  1442. mutex_unlock(&root->fs_info->reloc_mutex);
  1443. goto cleanup_transaction;
  1444. }
  1445. btrfs_prepare_extent_commit(trans, root);
  1446. cur_trans = root->fs_info->running_transaction;
  1447. btrfs_set_root_node(&root->fs_info->tree_root->root_item,
  1448. root->fs_info->tree_root->node);
  1449. switch_commit_root(root->fs_info->tree_root);
  1450. btrfs_set_root_node(&root->fs_info->chunk_root->root_item,
  1451. root->fs_info->chunk_root->node);
  1452. switch_commit_root(root->fs_info->chunk_root);
  1453. assert_qgroups_uptodate(trans);
  1454. update_super_roots(root);
  1455. if (!root->fs_info->log_root_recovering) {
  1456. btrfs_set_super_log_root(root->fs_info->super_copy, 0);
  1457. btrfs_set_super_log_root_level(root->fs_info->super_copy, 0);
  1458. }
  1459. memcpy(root->fs_info->super_for_commit, root->fs_info->super_copy,
  1460. sizeof(*root->fs_info->super_copy));
  1461. trans->transaction->blocked = 0;
  1462. spin_lock(&root->fs_info->trans_lock);
  1463. root->fs_info->running_transaction = NULL;
  1464. root->fs_info->trans_no_join = 0;
  1465. spin_unlock(&root->fs_info->trans_lock);
  1466. mutex_unlock(&root->fs_info->reloc_mutex);
  1467. wake_up(&root->fs_info->transaction_wait);
  1468. ret = btrfs_write_and_wait_transaction(trans, root);
  1469. if (ret) {
  1470. btrfs_error(root->fs_info, ret,
  1471. "Error while writing out transaction.");
  1472. mutex_unlock(&root->fs_info->tree_log_mutex);
  1473. goto cleanup_transaction;
  1474. }
  1475. ret = write_ctree_super(trans, root, 0);
  1476. if (ret) {
  1477. mutex_unlock(&root->fs_info->tree_log_mutex);
  1478. goto cleanup_transaction;
  1479. }
  1480. /*
  1481. * the super is written, we can safely allow the tree-loggers
  1482. * to go about their business
  1483. */
  1484. mutex_unlock(&root->fs_info->tree_log_mutex);
  1485. btrfs_finish_extent_commit(trans, root);
  1486. cur_trans->commit_done = 1;
  1487. root->fs_info->last_trans_committed = cur_trans->transid;
  1488. wake_up(&cur_trans->commit_wait);
  1489. spin_lock(&root->fs_info->trans_lock);
  1490. list_del_init(&cur_trans->list);
  1491. spin_unlock(&root->fs_info->trans_lock);
  1492. put_transaction(cur_trans);
  1493. put_transaction(cur_trans);
  1494. if (trans->type < TRANS_JOIN_NOLOCK)
  1495. sb_end_intwrite(root->fs_info->sb);
  1496. trace_btrfs_transaction_commit(root);
  1497. btrfs_scrub_continue(root);
  1498. if (current->journal_info == trans)
  1499. current->journal_info = NULL;
  1500. kmem_cache_free(btrfs_trans_handle_cachep, trans);
  1501. if (current != root->fs_info->transaction_kthread)
  1502. btrfs_run_delayed_iputs(root);
  1503. return ret;
  1504. cleanup_transaction:
  1505. btrfs_trans_release_metadata(trans, root);
  1506. trans->block_rsv = NULL;
  1507. btrfs_printk(root->fs_info, "Skipping commit of aborted transaction.\n");
  1508. // WARN_ON(1);
  1509. if (current->journal_info == trans)
  1510. current->journal_info = NULL;
  1511. cleanup_transaction(trans, root, ret);
  1512. return ret;
  1513. }
  1514. /*
  1515. * interface function to delete all the snapshots we have scheduled for deletion
  1516. */
  1517. int btrfs_clean_old_snapshots(struct btrfs_root *root)
  1518. {
  1519. LIST_HEAD(list);
  1520. struct btrfs_fs_info *fs_info = root->fs_info;
  1521. spin_lock(&fs_info->trans_lock);
  1522. list_splice_init(&fs_info->dead_roots, &list);
  1523. spin_unlock(&fs_info->trans_lock);
  1524. while (!list_empty(&list)) {
  1525. int ret;
  1526. root = list_entry(list.next, struct btrfs_root, root_list);
  1527. list_del(&root->root_list);
  1528. btrfs_kill_all_delayed_nodes(root);
  1529. if (btrfs_header_backref_rev(root->node) <
  1530. BTRFS_MIXED_BACKREF_REV)
  1531. ret = btrfs_drop_snapshot(root, NULL, 0, 0);
  1532. else
  1533. ret =btrfs_drop_snapshot(root, NULL, 1, 0);
  1534. BUG_ON(ret < 0);
  1535. }
  1536. return 0;
  1537. }