transaction.c 48 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801
  1. /*
  2. * Copyright (C) 2007 Oracle. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/fs.h>
  19. #include <linux/slab.h>
  20. #include <linux/sched.h>
  21. #include <linux/writeback.h>
  22. #include <linux/pagemap.h>
  23. #include <linux/blkdev.h>
  24. #include <linux/uuid.h>
  25. #include "ctree.h"
  26. #include "disk-io.h"
  27. #include "transaction.h"
  28. #include "locking.h"
  29. #include "tree-log.h"
  30. #include "inode-map.h"
  31. #include "volumes.h"
  32. #include "dev-replace.h"
  33. #define BTRFS_ROOT_TRANS_TAG 0
  34. void put_transaction(struct btrfs_transaction *transaction)
  35. {
  36. WARN_ON(atomic_read(&transaction->use_count) == 0);
  37. if (atomic_dec_and_test(&transaction->use_count)) {
  38. BUG_ON(!list_empty(&transaction->list));
  39. WARN_ON(transaction->delayed_refs.root.rb_node);
  40. memset(transaction, 0, sizeof(*transaction));
  41. kmem_cache_free(btrfs_transaction_cachep, transaction);
  42. }
  43. }
  44. static noinline void switch_commit_root(struct btrfs_root *root)
  45. {
  46. free_extent_buffer(root->commit_root);
  47. root->commit_root = btrfs_root_node(root);
  48. }
  49. /*
  50. * either allocate a new transaction or hop into the existing one
  51. */
  52. static noinline int join_transaction(struct btrfs_root *root, int type)
  53. {
  54. struct btrfs_transaction *cur_trans;
  55. struct btrfs_fs_info *fs_info = root->fs_info;
  56. spin_lock(&fs_info->trans_lock);
  57. loop:
  58. /* The file system has been taken offline. No new transactions. */
  59. if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
  60. spin_unlock(&fs_info->trans_lock);
  61. return -EROFS;
  62. }
  63. if (fs_info->trans_no_join) {
  64. /*
  65. * If we are JOIN_NOLOCK we're already committing a current
  66. * transaction, we just need a handle to deal with something
  67. * when committing the transaction, such as inode cache and
  68. * space cache. It is a special case.
  69. */
  70. if (type != TRANS_JOIN_NOLOCK) {
  71. spin_unlock(&fs_info->trans_lock);
  72. return -EBUSY;
  73. }
  74. }
  75. cur_trans = fs_info->running_transaction;
  76. if (cur_trans) {
  77. if (cur_trans->aborted) {
  78. spin_unlock(&fs_info->trans_lock);
  79. return cur_trans->aborted;
  80. }
  81. atomic_inc(&cur_trans->use_count);
  82. atomic_inc(&cur_trans->num_writers);
  83. cur_trans->num_joined++;
  84. spin_unlock(&fs_info->trans_lock);
  85. return 0;
  86. }
  87. spin_unlock(&fs_info->trans_lock);
  88. /*
  89. * If we are ATTACH, we just want to catch the current transaction,
  90. * and commit it. If there is no transaction, just return ENOENT.
  91. */
  92. if (type == TRANS_ATTACH)
  93. return -ENOENT;
  94. cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, GFP_NOFS);
  95. if (!cur_trans)
  96. return -ENOMEM;
  97. spin_lock(&fs_info->trans_lock);
  98. if (fs_info->running_transaction) {
  99. /*
  100. * someone started a transaction after we unlocked. Make sure
  101. * to redo the trans_no_join checks above
  102. */
  103. kmem_cache_free(btrfs_transaction_cachep, cur_trans);
  104. cur_trans = fs_info->running_transaction;
  105. goto loop;
  106. } else if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
  107. spin_unlock(&fs_info->trans_lock);
  108. kmem_cache_free(btrfs_transaction_cachep, cur_trans);
  109. return -EROFS;
  110. }
  111. atomic_set(&cur_trans->num_writers, 1);
  112. cur_trans->num_joined = 0;
  113. init_waitqueue_head(&cur_trans->writer_wait);
  114. init_waitqueue_head(&cur_trans->commit_wait);
  115. cur_trans->in_commit = 0;
  116. cur_trans->blocked = 0;
  117. /*
  118. * One for this trans handle, one so it will live on until we
  119. * commit the transaction.
  120. */
  121. atomic_set(&cur_trans->use_count, 2);
  122. cur_trans->commit_done = 0;
  123. cur_trans->start_time = get_seconds();
  124. cur_trans->delayed_refs.root = RB_ROOT;
  125. cur_trans->delayed_refs.num_entries = 0;
  126. cur_trans->delayed_refs.num_heads_ready = 0;
  127. cur_trans->delayed_refs.num_heads = 0;
  128. cur_trans->delayed_refs.flushing = 0;
  129. cur_trans->delayed_refs.run_delayed_start = 0;
  130. /*
  131. * although the tree mod log is per file system and not per transaction,
  132. * the log must never go across transaction boundaries.
  133. */
  134. smp_mb();
  135. if (!list_empty(&fs_info->tree_mod_seq_list))
  136. WARN(1, KERN_ERR "btrfs: tree_mod_seq_list not empty when "
  137. "creating a fresh transaction\n");
  138. if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log))
  139. WARN(1, KERN_ERR "btrfs: tree_mod_log rb tree not empty when "
  140. "creating a fresh transaction\n");
  141. atomic_set(&fs_info->tree_mod_seq, 0);
  142. spin_lock_init(&cur_trans->commit_lock);
  143. spin_lock_init(&cur_trans->delayed_refs.lock);
  144. INIT_LIST_HEAD(&cur_trans->pending_snapshots);
  145. list_add_tail(&cur_trans->list, &fs_info->trans_list);
  146. extent_io_tree_init(&cur_trans->dirty_pages,
  147. fs_info->btree_inode->i_mapping);
  148. fs_info->generation++;
  149. cur_trans->transid = fs_info->generation;
  150. fs_info->running_transaction = cur_trans;
  151. cur_trans->aborted = 0;
  152. spin_unlock(&fs_info->trans_lock);
  153. return 0;
  154. }
  155. /*
  156. * this does all the record keeping required to make sure that a reference
  157. * counted root is properly recorded in a given transaction. This is required
  158. * to make sure the old root from before we joined the transaction is deleted
  159. * when the transaction commits
  160. */
  161. static int record_root_in_trans(struct btrfs_trans_handle *trans,
  162. struct btrfs_root *root)
  163. {
  164. if (root->ref_cows && root->last_trans < trans->transid) {
  165. WARN_ON(root == root->fs_info->extent_root);
  166. WARN_ON(root->commit_root != root->node);
  167. /*
  168. * see below for in_trans_setup usage rules
  169. * we have the reloc mutex held now, so there
  170. * is only one writer in this function
  171. */
  172. root->in_trans_setup = 1;
  173. /* make sure readers find in_trans_setup before
  174. * they find our root->last_trans update
  175. */
  176. smp_wmb();
  177. spin_lock(&root->fs_info->fs_roots_radix_lock);
  178. if (root->last_trans == trans->transid) {
  179. spin_unlock(&root->fs_info->fs_roots_radix_lock);
  180. return 0;
  181. }
  182. radix_tree_tag_set(&root->fs_info->fs_roots_radix,
  183. (unsigned long)root->root_key.objectid,
  184. BTRFS_ROOT_TRANS_TAG);
  185. spin_unlock(&root->fs_info->fs_roots_radix_lock);
  186. root->last_trans = trans->transid;
  187. /* this is pretty tricky. We don't want to
  188. * take the relocation lock in btrfs_record_root_in_trans
  189. * unless we're really doing the first setup for this root in
  190. * this transaction.
  191. *
  192. * Normally we'd use root->last_trans as a flag to decide
  193. * if we want to take the expensive mutex.
  194. *
  195. * But, we have to set root->last_trans before we
  196. * init the relocation root, otherwise, we trip over warnings
  197. * in ctree.c. The solution used here is to flag ourselves
  198. * with root->in_trans_setup. When this is 1, we're still
  199. * fixing up the reloc trees and everyone must wait.
  200. *
  201. * When this is zero, they can trust root->last_trans and fly
  202. * through btrfs_record_root_in_trans without having to take the
  203. * lock. smp_wmb() makes sure that all the writes above are
  204. * done before we pop in the zero below
  205. */
  206. btrfs_init_reloc_root(trans, root);
  207. smp_wmb();
  208. root->in_trans_setup = 0;
  209. }
  210. return 0;
  211. }
  212. int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
  213. struct btrfs_root *root)
  214. {
  215. if (!root->ref_cows)
  216. return 0;
  217. /*
  218. * see record_root_in_trans for comments about in_trans_setup usage
  219. * and barriers
  220. */
  221. smp_rmb();
  222. if (root->last_trans == trans->transid &&
  223. !root->in_trans_setup)
  224. return 0;
  225. mutex_lock(&root->fs_info->reloc_mutex);
  226. record_root_in_trans(trans, root);
  227. mutex_unlock(&root->fs_info->reloc_mutex);
  228. return 0;
  229. }
  230. /* wait for commit against the current transaction to become unblocked
  231. * when this is done, it is safe to start a new transaction, but the current
  232. * transaction might not be fully on disk.
  233. */
  234. static void wait_current_trans(struct btrfs_root *root)
  235. {
  236. struct btrfs_transaction *cur_trans;
  237. spin_lock(&root->fs_info->trans_lock);
  238. cur_trans = root->fs_info->running_transaction;
  239. if (cur_trans && cur_trans->blocked) {
  240. atomic_inc(&cur_trans->use_count);
  241. spin_unlock(&root->fs_info->trans_lock);
  242. wait_event(root->fs_info->transaction_wait,
  243. !cur_trans->blocked);
  244. put_transaction(cur_trans);
  245. } else {
  246. spin_unlock(&root->fs_info->trans_lock);
  247. }
  248. }
  249. static int may_wait_transaction(struct btrfs_root *root, int type)
  250. {
  251. if (root->fs_info->log_root_recovering)
  252. return 0;
  253. if (type == TRANS_USERSPACE)
  254. return 1;
  255. if (type == TRANS_START &&
  256. !atomic_read(&root->fs_info->open_ioctl_trans))
  257. return 1;
  258. return 0;
  259. }
  260. static struct btrfs_trans_handle *
  261. start_transaction(struct btrfs_root *root, u64 num_items, int type,
  262. enum btrfs_reserve_flush_enum flush)
  263. {
  264. struct btrfs_trans_handle *h;
  265. struct btrfs_transaction *cur_trans;
  266. u64 num_bytes = 0;
  267. int ret;
  268. u64 qgroup_reserved = 0;
  269. if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
  270. return ERR_PTR(-EROFS);
  271. if (current->journal_info) {
  272. WARN_ON(type != TRANS_JOIN && type != TRANS_JOIN_NOLOCK);
  273. h = current->journal_info;
  274. h->use_count++;
  275. WARN_ON(h->use_count > 2);
  276. h->orig_rsv = h->block_rsv;
  277. h->block_rsv = NULL;
  278. goto got_it;
  279. }
  280. /*
  281. * Do the reservation before we join the transaction so we can do all
  282. * the appropriate flushing if need be.
  283. */
  284. if (num_items > 0 && root != root->fs_info->chunk_root) {
  285. if (root->fs_info->quota_enabled &&
  286. is_fstree(root->root_key.objectid)) {
  287. qgroup_reserved = num_items * root->leafsize;
  288. ret = btrfs_qgroup_reserve(root, qgroup_reserved);
  289. if (ret)
  290. return ERR_PTR(ret);
  291. }
  292. num_bytes = btrfs_calc_trans_metadata_size(root, num_items);
  293. ret = btrfs_block_rsv_add(root,
  294. &root->fs_info->trans_block_rsv,
  295. num_bytes, flush);
  296. if (ret)
  297. return ERR_PTR(ret);
  298. }
  299. again:
  300. h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
  301. if (!h)
  302. return ERR_PTR(-ENOMEM);
  303. /*
  304. * If we are JOIN_NOLOCK we're already committing a transaction and
  305. * waiting on this guy, so we don't need to do the sb_start_intwrite
  306. * because we're already holding a ref. We need this because we could
  307. * have raced in and did an fsync() on a file which can kick a commit
  308. * and then we deadlock with somebody doing a freeze.
  309. *
  310. * If we are ATTACH, it means we just want to catch the current
  311. * transaction and commit it, so we needn't do sb_start_intwrite().
  312. */
  313. if (type < TRANS_JOIN_NOLOCK)
  314. sb_start_intwrite(root->fs_info->sb);
  315. if (may_wait_transaction(root, type))
  316. wait_current_trans(root);
  317. do {
  318. ret = join_transaction(root, type);
  319. if (ret == -EBUSY)
  320. wait_current_trans(root);
  321. } while (ret == -EBUSY);
  322. if (ret < 0) {
  323. /* We must get the transaction if we are JOIN_NOLOCK. */
  324. BUG_ON(type == TRANS_JOIN_NOLOCK);
  325. if (type < TRANS_JOIN_NOLOCK)
  326. sb_end_intwrite(root->fs_info->sb);
  327. kmem_cache_free(btrfs_trans_handle_cachep, h);
  328. return ERR_PTR(ret);
  329. }
  330. cur_trans = root->fs_info->running_transaction;
  331. h->transid = cur_trans->transid;
  332. h->transaction = cur_trans;
  333. h->blocks_used = 0;
  334. h->bytes_reserved = 0;
  335. h->root = root;
  336. h->delayed_ref_updates = 0;
  337. h->use_count = 1;
  338. h->adding_csums = 0;
  339. h->block_rsv = NULL;
  340. h->orig_rsv = NULL;
  341. h->aborted = 0;
  342. h->qgroup_reserved = qgroup_reserved;
  343. h->delayed_ref_elem.seq = 0;
  344. h->type = type;
  345. INIT_LIST_HEAD(&h->qgroup_ref_list);
  346. INIT_LIST_HEAD(&h->new_bgs);
  347. smp_mb();
  348. if (cur_trans->blocked && may_wait_transaction(root, type)) {
  349. btrfs_commit_transaction(h, root);
  350. goto again;
  351. }
  352. if (num_bytes) {
  353. trace_btrfs_space_reservation(root->fs_info, "transaction",
  354. h->transid, num_bytes, 1);
  355. h->block_rsv = &root->fs_info->trans_block_rsv;
  356. h->bytes_reserved = num_bytes;
  357. }
  358. got_it:
  359. btrfs_record_root_in_trans(h, root);
  360. if (!current->journal_info && type != TRANS_USERSPACE)
  361. current->journal_info = h;
  362. return h;
  363. }
  364. struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
  365. int num_items)
  366. {
  367. return start_transaction(root, num_items, TRANS_START,
  368. BTRFS_RESERVE_FLUSH_ALL);
  369. }
  370. struct btrfs_trans_handle *btrfs_start_transaction_lflush(
  371. struct btrfs_root *root, int num_items)
  372. {
  373. return start_transaction(root, num_items, TRANS_START,
  374. BTRFS_RESERVE_FLUSH_LIMIT);
  375. }
  376. struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root)
  377. {
  378. return start_transaction(root, 0, TRANS_JOIN, 0);
  379. }
  380. struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root)
  381. {
  382. return start_transaction(root, 0, TRANS_JOIN_NOLOCK, 0);
  383. }
  384. struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root)
  385. {
  386. return start_transaction(root, 0, TRANS_USERSPACE, 0);
  387. }
  388. struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root)
  389. {
  390. return start_transaction(root, 0, TRANS_ATTACH, 0);
  391. }
  392. /* wait for a transaction commit to be fully complete */
  393. static noinline void wait_for_commit(struct btrfs_root *root,
  394. struct btrfs_transaction *commit)
  395. {
  396. wait_event(commit->commit_wait, commit->commit_done);
  397. }
  398. int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
  399. {
  400. struct btrfs_transaction *cur_trans = NULL, *t;
  401. int ret = 0;
  402. if (transid) {
  403. if (transid <= root->fs_info->last_trans_committed)
  404. goto out;
  405. ret = -EINVAL;
  406. /* find specified transaction */
  407. spin_lock(&root->fs_info->trans_lock);
  408. list_for_each_entry(t, &root->fs_info->trans_list, list) {
  409. if (t->transid == transid) {
  410. cur_trans = t;
  411. atomic_inc(&cur_trans->use_count);
  412. ret = 0;
  413. break;
  414. }
  415. if (t->transid > transid) {
  416. ret = 0;
  417. break;
  418. }
  419. }
  420. spin_unlock(&root->fs_info->trans_lock);
  421. /* The specified transaction doesn't exist */
  422. if (!cur_trans)
  423. goto out;
  424. } else {
  425. /* find newest transaction that is committing | committed */
  426. spin_lock(&root->fs_info->trans_lock);
  427. list_for_each_entry_reverse(t, &root->fs_info->trans_list,
  428. list) {
  429. if (t->in_commit) {
  430. if (t->commit_done)
  431. break;
  432. cur_trans = t;
  433. atomic_inc(&cur_trans->use_count);
  434. break;
  435. }
  436. }
  437. spin_unlock(&root->fs_info->trans_lock);
  438. if (!cur_trans)
  439. goto out; /* nothing committing|committed */
  440. }
  441. wait_for_commit(root, cur_trans);
  442. put_transaction(cur_trans);
  443. out:
  444. return ret;
  445. }
  446. void btrfs_throttle(struct btrfs_root *root)
  447. {
  448. if (!atomic_read(&root->fs_info->open_ioctl_trans))
  449. wait_current_trans(root);
  450. }
  451. static int should_end_transaction(struct btrfs_trans_handle *trans,
  452. struct btrfs_root *root)
  453. {
  454. int ret;
  455. ret = btrfs_block_rsv_check(root, &root->fs_info->global_block_rsv, 5);
  456. return ret ? 1 : 0;
  457. }
  458. int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
  459. struct btrfs_root *root)
  460. {
  461. struct btrfs_transaction *cur_trans = trans->transaction;
  462. int updates;
  463. int err;
  464. smp_mb();
  465. if (cur_trans->blocked || cur_trans->delayed_refs.flushing)
  466. return 1;
  467. updates = trans->delayed_ref_updates;
  468. trans->delayed_ref_updates = 0;
  469. if (updates) {
  470. err = btrfs_run_delayed_refs(trans, root, updates);
  471. if (err) /* Error code will also eval true */
  472. return err;
  473. }
  474. return should_end_transaction(trans, root);
  475. }
  476. static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
  477. struct btrfs_root *root, int throttle)
  478. {
  479. struct btrfs_transaction *cur_trans = trans->transaction;
  480. struct btrfs_fs_info *info = root->fs_info;
  481. int count = 0;
  482. int lock = (trans->type != TRANS_JOIN_NOLOCK);
  483. int err = 0;
  484. if (--trans->use_count) {
  485. trans->block_rsv = trans->orig_rsv;
  486. return 0;
  487. }
  488. /*
  489. * do the qgroup accounting as early as possible
  490. */
  491. err = btrfs_delayed_refs_qgroup_accounting(trans, info);
  492. btrfs_trans_release_metadata(trans, root);
  493. trans->block_rsv = NULL;
  494. /*
  495. * the same root has to be passed to start_transaction and
  496. * end_transaction. Subvolume quota depends on this.
  497. */
  498. WARN_ON(trans->root != root);
  499. if (trans->qgroup_reserved) {
  500. btrfs_qgroup_free(root, trans->qgroup_reserved);
  501. trans->qgroup_reserved = 0;
  502. }
  503. if (!list_empty(&trans->new_bgs))
  504. btrfs_create_pending_block_groups(trans, root);
  505. while (count < 2) {
  506. unsigned long cur = trans->delayed_ref_updates;
  507. trans->delayed_ref_updates = 0;
  508. if (cur &&
  509. trans->transaction->delayed_refs.num_heads_ready > 64) {
  510. trans->delayed_ref_updates = 0;
  511. btrfs_run_delayed_refs(trans, root, cur);
  512. } else {
  513. break;
  514. }
  515. count++;
  516. }
  517. btrfs_trans_release_metadata(trans, root);
  518. trans->block_rsv = NULL;
  519. if (!list_empty(&trans->new_bgs))
  520. btrfs_create_pending_block_groups(trans, root);
  521. if (lock && !atomic_read(&root->fs_info->open_ioctl_trans) &&
  522. should_end_transaction(trans, root)) {
  523. trans->transaction->blocked = 1;
  524. smp_wmb();
  525. }
  526. if (lock && cur_trans->blocked && !cur_trans->in_commit) {
  527. if (throttle) {
  528. /*
  529. * We may race with somebody else here so end up having
  530. * to call end_transaction on ourselves again, so inc
  531. * our use_count.
  532. */
  533. trans->use_count++;
  534. return btrfs_commit_transaction(trans, root);
  535. } else {
  536. wake_up_process(info->transaction_kthread);
  537. }
  538. }
  539. if (trans->type < TRANS_JOIN_NOLOCK)
  540. sb_end_intwrite(root->fs_info->sb);
  541. WARN_ON(cur_trans != info->running_transaction);
  542. WARN_ON(atomic_read(&cur_trans->num_writers) < 1);
  543. atomic_dec(&cur_trans->num_writers);
  544. smp_mb();
  545. if (waitqueue_active(&cur_trans->writer_wait))
  546. wake_up(&cur_trans->writer_wait);
  547. put_transaction(cur_trans);
  548. if (current->journal_info == trans)
  549. current->journal_info = NULL;
  550. if (throttle)
  551. btrfs_run_delayed_iputs(root);
  552. if (trans->aborted ||
  553. root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
  554. err = -EIO;
  555. }
  556. assert_qgroups_uptodate(trans);
  557. memset(trans, 0, sizeof(*trans));
  558. kmem_cache_free(btrfs_trans_handle_cachep, trans);
  559. return err;
  560. }
  561. int btrfs_end_transaction(struct btrfs_trans_handle *trans,
  562. struct btrfs_root *root)
  563. {
  564. int ret;
  565. ret = __btrfs_end_transaction(trans, root, 0);
  566. if (ret)
  567. return ret;
  568. return 0;
  569. }
  570. int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
  571. struct btrfs_root *root)
  572. {
  573. int ret;
  574. ret = __btrfs_end_transaction(trans, root, 1);
  575. if (ret)
  576. return ret;
  577. return 0;
  578. }
  579. int btrfs_end_transaction_dmeta(struct btrfs_trans_handle *trans,
  580. struct btrfs_root *root)
  581. {
  582. return __btrfs_end_transaction(trans, root, 1);
  583. }
  584. /*
  585. * when btree blocks are allocated, they have some corresponding bits set for
  586. * them in one of two extent_io trees. This is used to make sure all of
  587. * those extents are sent to disk but does not wait on them
  588. */
  589. int btrfs_write_marked_extents(struct btrfs_root *root,
  590. struct extent_io_tree *dirty_pages, int mark)
  591. {
  592. int err = 0;
  593. int werr = 0;
  594. struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
  595. struct extent_state *cached_state = NULL;
  596. u64 start = 0;
  597. u64 end;
  598. while (!find_first_extent_bit(dirty_pages, start, &start, &end,
  599. mark, &cached_state)) {
  600. convert_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT,
  601. mark, &cached_state, GFP_NOFS);
  602. cached_state = NULL;
  603. err = filemap_fdatawrite_range(mapping, start, end);
  604. if (err)
  605. werr = err;
  606. cond_resched();
  607. start = end + 1;
  608. }
  609. if (err)
  610. werr = err;
  611. return werr;
  612. }
  613. /*
  614. * when btree blocks are allocated, they have some corresponding bits set for
  615. * them in one of two extent_io trees. This is used to make sure all of
  616. * those extents are on disk for transaction or log commit. We wait
  617. * on all the pages and clear them from the dirty pages state tree
  618. */
  619. int btrfs_wait_marked_extents(struct btrfs_root *root,
  620. struct extent_io_tree *dirty_pages, int mark)
  621. {
  622. int err = 0;
  623. int werr = 0;
  624. struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
  625. struct extent_state *cached_state = NULL;
  626. u64 start = 0;
  627. u64 end;
  628. while (!find_first_extent_bit(dirty_pages, start, &start, &end,
  629. EXTENT_NEED_WAIT, &cached_state)) {
  630. clear_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT,
  631. 0, 0, &cached_state, GFP_NOFS);
  632. err = filemap_fdatawait_range(mapping, start, end);
  633. if (err)
  634. werr = err;
  635. cond_resched();
  636. start = end + 1;
  637. }
  638. if (err)
  639. werr = err;
  640. return werr;
  641. }
  642. /*
  643. * when btree blocks are allocated, they have some corresponding bits set for
  644. * them in one of two extent_io trees. This is used to make sure all of
  645. * those extents are on disk for transaction or log commit
  646. */
  647. int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
  648. struct extent_io_tree *dirty_pages, int mark)
  649. {
  650. int ret;
  651. int ret2;
  652. ret = btrfs_write_marked_extents(root, dirty_pages, mark);
  653. ret2 = btrfs_wait_marked_extents(root, dirty_pages, mark);
  654. if (ret)
  655. return ret;
  656. if (ret2)
  657. return ret2;
  658. return 0;
  659. }
  660. int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
  661. struct btrfs_root *root)
  662. {
  663. if (!trans || !trans->transaction) {
  664. struct inode *btree_inode;
  665. btree_inode = root->fs_info->btree_inode;
  666. return filemap_write_and_wait(btree_inode->i_mapping);
  667. }
  668. return btrfs_write_and_wait_marked_extents(root,
  669. &trans->transaction->dirty_pages,
  670. EXTENT_DIRTY);
  671. }
  672. /*
  673. * this is used to update the root pointer in the tree of tree roots.
  674. *
  675. * But, in the case of the extent allocation tree, updating the root
  676. * pointer may allocate blocks which may change the root of the extent
  677. * allocation tree.
  678. *
  679. * So, this loops and repeats and makes sure the cowonly root didn't
  680. * change while the root pointer was being updated in the metadata.
  681. */
  682. static int update_cowonly_root(struct btrfs_trans_handle *trans,
  683. struct btrfs_root *root)
  684. {
  685. int ret;
  686. u64 old_root_bytenr;
  687. u64 old_root_used;
  688. struct btrfs_root *tree_root = root->fs_info->tree_root;
  689. old_root_used = btrfs_root_used(&root->root_item);
  690. btrfs_write_dirty_block_groups(trans, root);
  691. while (1) {
  692. old_root_bytenr = btrfs_root_bytenr(&root->root_item);
  693. if (old_root_bytenr == root->node->start &&
  694. old_root_used == btrfs_root_used(&root->root_item))
  695. break;
  696. btrfs_set_root_node(&root->root_item, root->node);
  697. ret = btrfs_update_root(trans, tree_root,
  698. &root->root_key,
  699. &root->root_item);
  700. if (ret)
  701. return ret;
  702. old_root_used = btrfs_root_used(&root->root_item);
  703. ret = btrfs_write_dirty_block_groups(trans, root);
  704. if (ret)
  705. return ret;
  706. }
  707. if (root != root->fs_info->extent_root)
  708. switch_commit_root(root);
  709. return 0;
  710. }
  711. /*
  712. * update all the cowonly tree roots on disk
  713. *
  714. * The error handling in this function may not be obvious. Any of the
  715. * failures will cause the file system to go offline. We still need
  716. * to clean up the delayed refs.
  717. */
  718. static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
  719. struct btrfs_root *root)
  720. {
  721. struct btrfs_fs_info *fs_info = root->fs_info;
  722. struct list_head *next;
  723. struct extent_buffer *eb;
  724. int ret;
  725. ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
  726. if (ret)
  727. return ret;
  728. eb = btrfs_lock_root_node(fs_info->tree_root);
  729. ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL,
  730. 0, &eb);
  731. btrfs_tree_unlock(eb);
  732. free_extent_buffer(eb);
  733. if (ret)
  734. return ret;
  735. ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
  736. if (ret)
  737. return ret;
  738. ret = btrfs_run_dev_stats(trans, root->fs_info);
  739. WARN_ON(ret);
  740. ret = btrfs_run_dev_replace(trans, root->fs_info);
  741. WARN_ON(ret);
  742. ret = btrfs_run_qgroups(trans, root->fs_info);
  743. BUG_ON(ret);
  744. /* run_qgroups might have added some more refs */
  745. ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
  746. BUG_ON(ret);
  747. while (!list_empty(&fs_info->dirty_cowonly_roots)) {
  748. next = fs_info->dirty_cowonly_roots.next;
  749. list_del_init(next);
  750. root = list_entry(next, struct btrfs_root, dirty_list);
  751. ret = update_cowonly_root(trans, root);
  752. if (ret)
  753. return ret;
  754. }
  755. down_write(&fs_info->extent_commit_sem);
  756. switch_commit_root(fs_info->extent_root);
  757. up_write(&fs_info->extent_commit_sem);
  758. btrfs_after_dev_replace_commit(fs_info);
  759. return 0;
  760. }
  761. /*
  762. * dead roots are old snapshots that need to be deleted. This allocates
  763. * a dirty root struct and adds it into the list of dead roots that need to
  764. * be deleted
  765. */
  766. int btrfs_add_dead_root(struct btrfs_root *root)
  767. {
  768. spin_lock(&root->fs_info->trans_lock);
  769. list_add(&root->root_list, &root->fs_info->dead_roots);
  770. spin_unlock(&root->fs_info->trans_lock);
  771. return 0;
  772. }
  773. /*
  774. * update all the cowonly tree roots on disk
  775. */
  776. static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
  777. struct btrfs_root *root)
  778. {
  779. struct btrfs_root *gang[8];
  780. struct btrfs_fs_info *fs_info = root->fs_info;
  781. int i;
  782. int ret;
  783. int err = 0;
  784. spin_lock(&fs_info->fs_roots_radix_lock);
  785. while (1) {
  786. ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
  787. (void **)gang, 0,
  788. ARRAY_SIZE(gang),
  789. BTRFS_ROOT_TRANS_TAG);
  790. if (ret == 0)
  791. break;
  792. for (i = 0; i < ret; i++) {
  793. root = gang[i];
  794. radix_tree_tag_clear(&fs_info->fs_roots_radix,
  795. (unsigned long)root->root_key.objectid,
  796. BTRFS_ROOT_TRANS_TAG);
  797. spin_unlock(&fs_info->fs_roots_radix_lock);
  798. btrfs_free_log(trans, root);
  799. btrfs_update_reloc_root(trans, root);
  800. btrfs_orphan_commit_root(trans, root);
  801. btrfs_save_ino_cache(root, trans);
  802. /* see comments in should_cow_block() */
  803. root->force_cow = 0;
  804. smp_wmb();
  805. if (root->commit_root != root->node) {
  806. mutex_lock(&root->fs_commit_mutex);
  807. switch_commit_root(root);
  808. btrfs_unpin_free_ino(root);
  809. mutex_unlock(&root->fs_commit_mutex);
  810. btrfs_set_root_node(&root->root_item,
  811. root->node);
  812. }
  813. err = btrfs_update_root(trans, fs_info->tree_root,
  814. &root->root_key,
  815. &root->root_item);
  816. spin_lock(&fs_info->fs_roots_radix_lock);
  817. if (err)
  818. break;
  819. }
  820. }
  821. spin_unlock(&fs_info->fs_roots_radix_lock);
  822. return err;
  823. }
  824. /*
  825. * defrag a given btree. If cacheonly == 1, this won't read from the disk,
  826. * otherwise every leaf in the btree is read and defragged.
  827. */
  828. int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
  829. {
  830. struct btrfs_fs_info *info = root->fs_info;
  831. struct btrfs_trans_handle *trans;
  832. int ret;
  833. if (xchg(&root->defrag_running, 1))
  834. return 0;
  835. while (1) {
  836. trans = btrfs_start_transaction(root, 0);
  837. if (IS_ERR(trans))
  838. return PTR_ERR(trans);
  839. ret = btrfs_defrag_leaves(trans, root, cacheonly);
  840. btrfs_end_transaction(trans, root);
  841. btrfs_btree_balance_dirty(info->tree_root);
  842. cond_resched();
  843. if (btrfs_fs_closing(root->fs_info) || ret != -EAGAIN)
  844. break;
  845. }
  846. root->defrag_running = 0;
  847. return ret;
  848. }
  849. /*
  850. * new snapshots need to be created at a very specific time in the
  851. * transaction commit. This does the actual creation
  852. */
  853. static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
  854. struct btrfs_fs_info *fs_info,
  855. struct btrfs_pending_snapshot *pending)
  856. {
  857. struct btrfs_key key;
  858. struct btrfs_root_item *new_root_item;
  859. struct btrfs_root *tree_root = fs_info->tree_root;
  860. struct btrfs_root *root = pending->root;
  861. struct btrfs_root *parent_root;
  862. struct btrfs_block_rsv *rsv;
  863. struct inode *parent_inode;
  864. struct btrfs_path *path;
  865. struct btrfs_dir_item *dir_item;
  866. struct dentry *parent;
  867. struct dentry *dentry;
  868. struct extent_buffer *tmp;
  869. struct extent_buffer *old;
  870. struct timespec cur_time = CURRENT_TIME;
  871. int ret;
  872. u64 to_reserve = 0;
  873. u64 index = 0;
  874. u64 objectid;
  875. u64 root_flags;
  876. uuid_le new_uuid;
  877. path = btrfs_alloc_path();
  878. if (!path) {
  879. ret = pending->error = -ENOMEM;
  880. goto path_alloc_fail;
  881. }
  882. new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS);
  883. if (!new_root_item) {
  884. ret = pending->error = -ENOMEM;
  885. goto root_item_alloc_fail;
  886. }
  887. ret = btrfs_find_free_objectid(tree_root, &objectid);
  888. if (ret) {
  889. pending->error = ret;
  890. goto no_free_objectid;
  891. }
  892. btrfs_reloc_pre_snapshot(trans, pending, &to_reserve);
  893. if (to_reserve > 0) {
  894. ret = btrfs_block_rsv_add(root, &pending->block_rsv,
  895. to_reserve,
  896. BTRFS_RESERVE_NO_FLUSH);
  897. if (ret) {
  898. pending->error = ret;
  899. goto no_free_objectid;
  900. }
  901. }
  902. ret = btrfs_qgroup_inherit(trans, fs_info, root->root_key.objectid,
  903. objectid, pending->inherit);
  904. if (ret) {
  905. pending->error = ret;
  906. goto no_free_objectid;
  907. }
  908. key.objectid = objectid;
  909. key.offset = (u64)-1;
  910. key.type = BTRFS_ROOT_ITEM_KEY;
  911. rsv = trans->block_rsv;
  912. trans->block_rsv = &pending->block_rsv;
  913. dentry = pending->dentry;
  914. parent = dget_parent(dentry);
  915. parent_inode = parent->d_inode;
  916. parent_root = BTRFS_I(parent_inode)->root;
  917. record_root_in_trans(trans, parent_root);
  918. /*
  919. * insert the directory item
  920. */
  921. ret = btrfs_set_inode_index(parent_inode, &index);
  922. BUG_ON(ret); /* -ENOMEM */
  923. /* check if there is a file/dir which has the same name. */
  924. dir_item = btrfs_lookup_dir_item(NULL, parent_root, path,
  925. btrfs_ino(parent_inode),
  926. dentry->d_name.name,
  927. dentry->d_name.len, 0);
  928. if (dir_item != NULL && !IS_ERR(dir_item)) {
  929. pending->error = -EEXIST;
  930. goto fail;
  931. } else if (IS_ERR(dir_item)) {
  932. ret = PTR_ERR(dir_item);
  933. btrfs_abort_transaction(trans, root, ret);
  934. goto fail;
  935. }
  936. btrfs_release_path(path);
  937. /*
  938. * pull in the delayed directory update
  939. * and the delayed inode item
  940. * otherwise we corrupt the FS during
  941. * snapshot
  942. */
  943. ret = btrfs_run_delayed_items(trans, root);
  944. if (ret) { /* Transaction aborted */
  945. btrfs_abort_transaction(trans, root, ret);
  946. goto fail;
  947. }
  948. record_root_in_trans(trans, root);
  949. btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
  950. memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
  951. btrfs_check_and_init_root_item(new_root_item);
  952. root_flags = btrfs_root_flags(new_root_item);
  953. if (pending->readonly)
  954. root_flags |= BTRFS_ROOT_SUBVOL_RDONLY;
  955. else
  956. root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY;
  957. btrfs_set_root_flags(new_root_item, root_flags);
  958. btrfs_set_root_generation_v2(new_root_item,
  959. trans->transid);
  960. uuid_le_gen(&new_uuid);
  961. memcpy(new_root_item->uuid, new_uuid.b, BTRFS_UUID_SIZE);
  962. memcpy(new_root_item->parent_uuid, root->root_item.uuid,
  963. BTRFS_UUID_SIZE);
  964. new_root_item->otime.sec = cpu_to_le64(cur_time.tv_sec);
  965. new_root_item->otime.nsec = cpu_to_le32(cur_time.tv_nsec);
  966. btrfs_set_root_otransid(new_root_item, trans->transid);
  967. memset(&new_root_item->stime, 0, sizeof(new_root_item->stime));
  968. memset(&new_root_item->rtime, 0, sizeof(new_root_item->rtime));
  969. btrfs_set_root_stransid(new_root_item, 0);
  970. btrfs_set_root_rtransid(new_root_item, 0);
  971. old = btrfs_lock_root_node(root);
  972. ret = btrfs_cow_block(trans, root, old, NULL, 0, &old);
  973. if (ret) {
  974. btrfs_tree_unlock(old);
  975. free_extent_buffer(old);
  976. btrfs_abort_transaction(trans, root, ret);
  977. goto fail;
  978. }
  979. btrfs_set_lock_blocking(old);
  980. ret = btrfs_copy_root(trans, root, old, &tmp, objectid);
  981. /* clean up in any case */
  982. btrfs_tree_unlock(old);
  983. free_extent_buffer(old);
  984. if (ret) {
  985. btrfs_abort_transaction(trans, root, ret);
  986. goto fail;
  987. }
  988. /* see comments in should_cow_block() */
  989. root->force_cow = 1;
  990. smp_wmb();
  991. btrfs_set_root_node(new_root_item, tmp);
  992. /* record when the snapshot was created in key.offset */
  993. key.offset = trans->transid;
  994. ret = btrfs_insert_root(trans, tree_root, &key, new_root_item);
  995. btrfs_tree_unlock(tmp);
  996. free_extent_buffer(tmp);
  997. if (ret) {
  998. btrfs_abort_transaction(trans, root, ret);
  999. goto fail;
  1000. }
  1001. /*
  1002. * insert root back/forward references
  1003. */
  1004. ret = btrfs_add_root_ref(trans, tree_root, objectid,
  1005. parent_root->root_key.objectid,
  1006. btrfs_ino(parent_inode), index,
  1007. dentry->d_name.name, dentry->d_name.len);
  1008. if (ret) {
  1009. btrfs_abort_transaction(trans, root, ret);
  1010. goto fail;
  1011. }
  1012. key.offset = (u64)-1;
  1013. pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key);
  1014. if (IS_ERR(pending->snap)) {
  1015. ret = PTR_ERR(pending->snap);
  1016. btrfs_abort_transaction(trans, root, ret);
  1017. goto fail;
  1018. }
  1019. ret = btrfs_reloc_post_snapshot(trans, pending);
  1020. if (ret) {
  1021. btrfs_abort_transaction(trans, root, ret);
  1022. goto fail;
  1023. }
  1024. ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
  1025. if (ret) {
  1026. btrfs_abort_transaction(trans, root, ret);
  1027. goto fail;
  1028. }
  1029. ret = btrfs_insert_dir_item(trans, parent_root,
  1030. dentry->d_name.name, dentry->d_name.len,
  1031. parent_inode, &key,
  1032. BTRFS_FT_DIR, index);
  1033. /* We have check then name at the beginning, so it is impossible. */
  1034. BUG_ON(ret == -EEXIST || ret == -EOVERFLOW);
  1035. if (ret) {
  1036. btrfs_abort_transaction(trans, root, ret);
  1037. goto fail;
  1038. }
  1039. btrfs_i_size_write(parent_inode, parent_inode->i_size +
  1040. dentry->d_name.len * 2);
  1041. parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
  1042. ret = btrfs_update_inode_fallback(trans, parent_root, parent_inode);
  1043. if (ret)
  1044. btrfs_abort_transaction(trans, root, ret);
  1045. fail:
  1046. dput(parent);
  1047. trans->block_rsv = rsv;
  1048. no_free_objectid:
  1049. kfree(new_root_item);
  1050. root_item_alloc_fail:
  1051. btrfs_free_path(path);
  1052. path_alloc_fail:
  1053. btrfs_block_rsv_release(root, &pending->block_rsv, (u64)-1);
  1054. return ret;
  1055. }
  1056. /*
  1057. * create all the snapshots we've scheduled for creation
  1058. */
  1059. static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
  1060. struct btrfs_fs_info *fs_info)
  1061. {
  1062. struct btrfs_pending_snapshot *pending;
  1063. struct list_head *head = &trans->transaction->pending_snapshots;
  1064. list_for_each_entry(pending, head, list)
  1065. create_pending_snapshot(trans, fs_info, pending);
  1066. return 0;
  1067. }
  1068. static void update_super_roots(struct btrfs_root *root)
  1069. {
  1070. struct btrfs_root_item *root_item;
  1071. struct btrfs_super_block *super;
  1072. super = root->fs_info->super_copy;
  1073. root_item = &root->fs_info->chunk_root->root_item;
  1074. super->chunk_root = root_item->bytenr;
  1075. super->chunk_root_generation = root_item->generation;
  1076. super->chunk_root_level = root_item->level;
  1077. root_item = &root->fs_info->tree_root->root_item;
  1078. super->root = root_item->bytenr;
  1079. super->generation = root_item->generation;
  1080. super->root_level = root_item->level;
  1081. if (btrfs_test_opt(root, SPACE_CACHE))
  1082. super->cache_generation = root_item->generation;
  1083. }
  1084. int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
  1085. {
  1086. int ret = 0;
  1087. spin_lock(&info->trans_lock);
  1088. if (info->running_transaction)
  1089. ret = info->running_transaction->in_commit;
  1090. spin_unlock(&info->trans_lock);
  1091. return ret;
  1092. }
  1093. int btrfs_transaction_blocked(struct btrfs_fs_info *info)
  1094. {
  1095. int ret = 0;
  1096. spin_lock(&info->trans_lock);
  1097. if (info->running_transaction)
  1098. ret = info->running_transaction->blocked;
  1099. spin_unlock(&info->trans_lock);
  1100. return ret;
  1101. }
  1102. /*
  1103. * wait for the current transaction commit to start and block subsequent
  1104. * transaction joins
  1105. */
  1106. static void wait_current_trans_commit_start(struct btrfs_root *root,
  1107. struct btrfs_transaction *trans)
  1108. {
  1109. wait_event(root->fs_info->transaction_blocked_wait, trans->in_commit);
  1110. }
  1111. /*
  1112. * wait for the current transaction to start and then become unblocked.
  1113. * caller holds ref.
  1114. */
  1115. static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root,
  1116. struct btrfs_transaction *trans)
  1117. {
  1118. wait_event(root->fs_info->transaction_wait,
  1119. trans->commit_done || (trans->in_commit && !trans->blocked));
  1120. }
  1121. /*
  1122. * commit transactions asynchronously. once btrfs_commit_transaction_async
  1123. * returns, any subsequent transaction will not be allowed to join.
  1124. */
  1125. struct btrfs_async_commit {
  1126. struct btrfs_trans_handle *newtrans;
  1127. struct btrfs_root *root;
  1128. struct delayed_work work;
  1129. };
  1130. static void do_async_commit(struct work_struct *work)
  1131. {
  1132. struct btrfs_async_commit *ac =
  1133. container_of(work, struct btrfs_async_commit, work.work);
  1134. /*
  1135. * We've got freeze protection passed with the transaction.
  1136. * Tell lockdep about it.
  1137. */
  1138. if (ac->newtrans->type < TRANS_JOIN_NOLOCK)
  1139. rwsem_acquire_read(
  1140. &ac->root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1],
  1141. 0, 1, _THIS_IP_);
  1142. current->journal_info = ac->newtrans;
  1143. btrfs_commit_transaction(ac->newtrans, ac->root);
  1144. kfree(ac);
  1145. }
  1146. int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
  1147. struct btrfs_root *root,
  1148. int wait_for_unblock)
  1149. {
  1150. struct btrfs_async_commit *ac;
  1151. struct btrfs_transaction *cur_trans;
  1152. ac = kmalloc(sizeof(*ac), GFP_NOFS);
  1153. if (!ac)
  1154. return -ENOMEM;
  1155. INIT_DELAYED_WORK(&ac->work, do_async_commit);
  1156. ac->root = root;
  1157. ac->newtrans = btrfs_join_transaction(root);
  1158. if (IS_ERR(ac->newtrans)) {
  1159. int err = PTR_ERR(ac->newtrans);
  1160. kfree(ac);
  1161. return err;
  1162. }
  1163. /* take transaction reference */
  1164. cur_trans = trans->transaction;
  1165. atomic_inc(&cur_trans->use_count);
  1166. btrfs_end_transaction(trans, root);
  1167. /*
  1168. * Tell lockdep we've released the freeze rwsem, since the
  1169. * async commit thread will be the one to unlock it.
  1170. */
  1171. if (trans->type < TRANS_JOIN_NOLOCK)
  1172. rwsem_release(
  1173. &root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1],
  1174. 1, _THIS_IP_);
  1175. schedule_delayed_work(&ac->work, 0);
  1176. /* wait for transaction to start and unblock */
  1177. if (wait_for_unblock)
  1178. wait_current_trans_commit_start_and_unblock(root, cur_trans);
  1179. else
  1180. wait_current_trans_commit_start(root, cur_trans);
  1181. if (current->journal_info == trans)
  1182. current->journal_info = NULL;
  1183. put_transaction(cur_trans);
  1184. return 0;
  1185. }
  1186. static void cleanup_transaction(struct btrfs_trans_handle *trans,
  1187. struct btrfs_root *root, int err)
  1188. {
  1189. struct btrfs_transaction *cur_trans = trans->transaction;
  1190. WARN_ON(trans->use_count > 1);
  1191. btrfs_abort_transaction(trans, root, err);
  1192. spin_lock(&root->fs_info->trans_lock);
  1193. list_del_init(&cur_trans->list);
  1194. if (cur_trans == root->fs_info->running_transaction) {
  1195. root->fs_info->running_transaction = NULL;
  1196. root->fs_info->trans_no_join = 0;
  1197. }
  1198. spin_unlock(&root->fs_info->trans_lock);
  1199. btrfs_cleanup_one_transaction(trans->transaction, root);
  1200. put_transaction(cur_trans);
  1201. put_transaction(cur_trans);
  1202. trace_btrfs_transaction_commit(root);
  1203. btrfs_scrub_continue(root);
  1204. if (current->journal_info == trans)
  1205. current->journal_info = NULL;
  1206. kmem_cache_free(btrfs_trans_handle_cachep, trans);
  1207. }
  1208. static int btrfs_flush_all_pending_stuffs(struct btrfs_trans_handle *trans,
  1209. struct btrfs_root *root)
  1210. {
  1211. int flush_on_commit = btrfs_test_opt(root, FLUSHONCOMMIT);
  1212. int snap_pending = 0;
  1213. int ret;
  1214. if (!flush_on_commit) {
  1215. spin_lock(&root->fs_info->trans_lock);
  1216. if (!list_empty(&trans->transaction->pending_snapshots))
  1217. snap_pending = 1;
  1218. spin_unlock(&root->fs_info->trans_lock);
  1219. }
  1220. if (flush_on_commit || snap_pending) {
  1221. btrfs_start_delalloc_inodes(root, 1);
  1222. btrfs_wait_ordered_extents(root, 1);
  1223. }
  1224. ret = btrfs_run_delayed_items(trans, root);
  1225. if (ret)
  1226. return ret;
  1227. /*
  1228. * running the delayed items may have added new refs. account
  1229. * them now so that they hinder processing of more delayed refs
  1230. * as little as possible.
  1231. */
  1232. btrfs_delayed_refs_qgroup_accounting(trans, root->fs_info);
  1233. /*
  1234. * rename don't use btrfs_join_transaction, so, once we
  1235. * set the transaction to blocked above, we aren't going
  1236. * to get any new ordered operations. We can safely run
  1237. * it here and no for sure that nothing new will be added
  1238. * to the list
  1239. */
  1240. btrfs_run_ordered_operations(root, 1);
  1241. return 0;
  1242. }
  1243. /*
  1244. * btrfs_transaction state sequence:
  1245. * in_commit = 0, blocked = 0 (initial)
  1246. * in_commit = 1, blocked = 1
  1247. * blocked = 0
  1248. * commit_done = 1
  1249. */
  1250. int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
  1251. struct btrfs_root *root)
  1252. {
  1253. unsigned long joined = 0;
  1254. struct btrfs_transaction *cur_trans = trans->transaction;
  1255. struct btrfs_transaction *prev_trans = NULL;
  1256. DEFINE_WAIT(wait);
  1257. int ret;
  1258. int should_grow = 0;
  1259. unsigned long now = get_seconds();
  1260. ret = btrfs_run_ordered_operations(root, 0);
  1261. if (ret) {
  1262. btrfs_abort_transaction(trans, root, ret);
  1263. goto cleanup_transaction;
  1264. }
  1265. /* Stop the commit early if ->aborted is set */
  1266. if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
  1267. ret = cur_trans->aborted;
  1268. goto cleanup_transaction;
  1269. }
  1270. /* make a pass through all the delayed refs we have so far
  1271. * any runnings procs may add more while we are here
  1272. */
  1273. ret = btrfs_run_delayed_refs(trans, root, 0);
  1274. if (ret)
  1275. goto cleanup_transaction;
  1276. btrfs_trans_release_metadata(trans, root);
  1277. trans->block_rsv = NULL;
  1278. cur_trans = trans->transaction;
  1279. /*
  1280. * set the flushing flag so procs in this transaction have to
  1281. * start sending their work down.
  1282. */
  1283. cur_trans->delayed_refs.flushing = 1;
  1284. if (!list_empty(&trans->new_bgs))
  1285. btrfs_create_pending_block_groups(trans, root);
  1286. ret = btrfs_run_delayed_refs(trans, root, 0);
  1287. if (ret)
  1288. goto cleanup_transaction;
  1289. spin_lock(&cur_trans->commit_lock);
  1290. if (cur_trans->in_commit) {
  1291. spin_unlock(&cur_trans->commit_lock);
  1292. atomic_inc(&cur_trans->use_count);
  1293. ret = btrfs_end_transaction(trans, root);
  1294. wait_for_commit(root, cur_trans);
  1295. put_transaction(cur_trans);
  1296. return ret;
  1297. }
  1298. trans->transaction->in_commit = 1;
  1299. trans->transaction->blocked = 1;
  1300. spin_unlock(&cur_trans->commit_lock);
  1301. wake_up(&root->fs_info->transaction_blocked_wait);
  1302. spin_lock(&root->fs_info->trans_lock);
  1303. if (cur_trans->list.prev != &root->fs_info->trans_list) {
  1304. prev_trans = list_entry(cur_trans->list.prev,
  1305. struct btrfs_transaction, list);
  1306. if (!prev_trans->commit_done) {
  1307. atomic_inc(&prev_trans->use_count);
  1308. spin_unlock(&root->fs_info->trans_lock);
  1309. wait_for_commit(root, prev_trans);
  1310. put_transaction(prev_trans);
  1311. } else {
  1312. spin_unlock(&root->fs_info->trans_lock);
  1313. }
  1314. } else {
  1315. spin_unlock(&root->fs_info->trans_lock);
  1316. }
  1317. if (!btrfs_test_opt(root, SSD) &&
  1318. (now < cur_trans->start_time || now - cur_trans->start_time < 1))
  1319. should_grow = 1;
  1320. do {
  1321. joined = cur_trans->num_joined;
  1322. WARN_ON(cur_trans != trans->transaction);
  1323. ret = btrfs_flush_all_pending_stuffs(trans, root);
  1324. if (ret)
  1325. goto cleanup_transaction;
  1326. prepare_to_wait(&cur_trans->writer_wait, &wait,
  1327. TASK_UNINTERRUPTIBLE);
  1328. if (atomic_read(&cur_trans->num_writers) > 1)
  1329. schedule_timeout(MAX_SCHEDULE_TIMEOUT);
  1330. else if (should_grow)
  1331. schedule_timeout(1);
  1332. finish_wait(&cur_trans->writer_wait, &wait);
  1333. } while (atomic_read(&cur_trans->num_writers) > 1 ||
  1334. (should_grow && cur_trans->num_joined != joined));
  1335. ret = btrfs_flush_all_pending_stuffs(trans, root);
  1336. if (ret)
  1337. goto cleanup_transaction;
  1338. /*
  1339. * Ok now we need to make sure to block out any other joins while we
  1340. * commit the transaction. We could have started a join before setting
  1341. * no_join so make sure to wait for num_writers to == 1 again.
  1342. */
  1343. spin_lock(&root->fs_info->trans_lock);
  1344. root->fs_info->trans_no_join = 1;
  1345. spin_unlock(&root->fs_info->trans_lock);
  1346. wait_event(cur_trans->writer_wait,
  1347. atomic_read(&cur_trans->num_writers) == 1);
  1348. /* ->aborted might be set after the previous check, so check it */
  1349. if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
  1350. ret = cur_trans->aborted;
  1351. goto cleanup_transaction;
  1352. }
  1353. /*
  1354. * the reloc mutex makes sure that we stop
  1355. * the balancing code from coming in and moving
  1356. * extents around in the middle of the commit
  1357. */
  1358. mutex_lock(&root->fs_info->reloc_mutex);
  1359. /*
  1360. * We needn't worry about the delayed items because we will
  1361. * deal with them in create_pending_snapshot(), which is the
  1362. * core function of the snapshot creation.
  1363. */
  1364. ret = create_pending_snapshots(trans, root->fs_info);
  1365. if (ret) {
  1366. mutex_unlock(&root->fs_info->reloc_mutex);
  1367. goto cleanup_transaction;
  1368. }
  1369. /*
  1370. * We insert the dir indexes of the snapshots and update the inode
  1371. * of the snapshots' parents after the snapshot creation, so there
  1372. * are some delayed items which are not dealt with. Now deal with
  1373. * them.
  1374. *
  1375. * We needn't worry that this operation will corrupt the snapshots,
  1376. * because all the tree which are snapshoted will be forced to COW
  1377. * the nodes and leaves.
  1378. */
  1379. ret = btrfs_run_delayed_items(trans, root);
  1380. if (ret) {
  1381. mutex_unlock(&root->fs_info->reloc_mutex);
  1382. goto cleanup_transaction;
  1383. }
  1384. ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
  1385. if (ret) {
  1386. mutex_unlock(&root->fs_info->reloc_mutex);
  1387. goto cleanup_transaction;
  1388. }
  1389. /*
  1390. * make sure none of the code above managed to slip in a
  1391. * delayed item
  1392. */
  1393. btrfs_assert_delayed_root_empty(root);
  1394. WARN_ON(cur_trans != trans->transaction);
  1395. btrfs_scrub_pause(root);
  1396. /* btrfs_commit_tree_roots is responsible for getting the
  1397. * various roots consistent with each other. Every pointer
  1398. * in the tree of tree roots has to point to the most up to date
  1399. * root for every subvolume and other tree. So, we have to keep
  1400. * the tree logging code from jumping in and changing any
  1401. * of the trees.
  1402. *
  1403. * At this point in the commit, there can't be any tree-log
  1404. * writers, but a little lower down we drop the trans mutex
  1405. * and let new people in. By holding the tree_log_mutex
  1406. * from now until after the super is written, we avoid races
  1407. * with the tree-log code.
  1408. */
  1409. mutex_lock(&root->fs_info->tree_log_mutex);
  1410. ret = commit_fs_roots(trans, root);
  1411. if (ret) {
  1412. mutex_unlock(&root->fs_info->tree_log_mutex);
  1413. mutex_unlock(&root->fs_info->reloc_mutex);
  1414. goto cleanup_transaction;
  1415. }
  1416. /* commit_fs_roots gets rid of all the tree log roots, it is now
  1417. * safe to free the root of tree log roots
  1418. */
  1419. btrfs_free_log_root_tree(trans, root->fs_info);
  1420. ret = commit_cowonly_roots(trans, root);
  1421. if (ret) {
  1422. mutex_unlock(&root->fs_info->tree_log_mutex);
  1423. mutex_unlock(&root->fs_info->reloc_mutex);
  1424. goto cleanup_transaction;
  1425. }
  1426. /*
  1427. * The tasks which save the space cache and inode cache may also
  1428. * update ->aborted, check it.
  1429. */
  1430. if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
  1431. ret = cur_trans->aborted;
  1432. mutex_unlock(&root->fs_info->tree_log_mutex);
  1433. mutex_unlock(&root->fs_info->reloc_mutex);
  1434. goto cleanup_transaction;
  1435. }
  1436. btrfs_prepare_extent_commit(trans, root);
  1437. cur_trans = root->fs_info->running_transaction;
  1438. btrfs_set_root_node(&root->fs_info->tree_root->root_item,
  1439. root->fs_info->tree_root->node);
  1440. switch_commit_root(root->fs_info->tree_root);
  1441. btrfs_set_root_node(&root->fs_info->chunk_root->root_item,
  1442. root->fs_info->chunk_root->node);
  1443. switch_commit_root(root->fs_info->chunk_root);
  1444. assert_qgroups_uptodate(trans);
  1445. update_super_roots(root);
  1446. if (!root->fs_info->log_root_recovering) {
  1447. btrfs_set_super_log_root(root->fs_info->super_copy, 0);
  1448. btrfs_set_super_log_root_level(root->fs_info->super_copy, 0);
  1449. }
  1450. memcpy(root->fs_info->super_for_commit, root->fs_info->super_copy,
  1451. sizeof(*root->fs_info->super_copy));
  1452. trans->transaction->blocked = 0;
  1453. spin_lock(&root->fs_info->trans_lock);
  1454. root->fs_info->running_transaction = NULL;
  1455. root->fs_info->trans_no_join = 0;
  1456. spin_unlock(&root->fs_info->trans_lock);
  1457. mutex_unlock(&root->fs_info->reloc_mutex);
  1458. wake_up(&root->fs_info->transaction_wait);
  1459. ret = btrfs_write_and_wait_transaction(trans, root);
  1460. if (ret) {
  1461. btrfs_error(root->fs_info, ret,
  1462. "Error while writing out transaction.");
  1463. mutex_unlock(&root->fs_info->tree_log_mutex);
  1464. goto cleanup_transaction;
  1465. }
  1466. ret = write_ctree_super(trans, root, 0);
  1467. if (ret) {
  1468. mutex_unlock(&root->fs_info->tree_log_mutex);
  1469. goto cleanup_transaction;
  1470. }
  1471. /*
  1472. * the super is written, we can safely allow the tree-loggers
  1473. * to go about their business
  1474. */
  1475. mutex_unlock(&root->fs_info->tree_log_mutex);
  1476. btrfs_finish_extent_commit(trans, root);
  1477. cur_trans->commit_done = 1;
  1478. root->fs_info->last_trans_committed = cur_trans->transid;
  1479. wake_up(&cur_trans->commit_wait);
  1480. spin_lock(&root->fs_info->trans_lock);
  1481. list_del_init(&cur_trans->list);
  1482. spin_unlock(&root->fs_info->trans_lock);
  1483. put_transaction(cur_trans);
  1484. put_transaction(cur_trans);
  1485. if (trans->type < TRANS_JOIN_NOLOCK)
  1486. sb_end_intwrite(root->fs_info->sb);
  1487. trace_btrfs_transaction_commit(root);
  1488. btrfs_scrub_continue(root);
  1489. if (current->journal_info == trans)
  1490. current->journal_info = NULL;
  1491. kmem_cache_free(btrfs_trans_handle_cachep, trans);
  1492. if (current != root->fs_info->transaction_kthread)
  1493. btrfs_run_delayed_iputs(root);
  1494. return ret;
  1495. cleanup_transaction:
  1496. btrfs_trans_release_metadata(trans, root);
  1497. trans->block_rsv = NULL;
  1498. btrfs_printk(root->fs_info, "Skipping commit of aborted transaction.\n");
  1499. // WARN_ON(1);
  1500. if (current->journal_info == trans)
  1501. current->journal_info = NULL;
  1502. cleanup_transaction(trans, root, ret);
  1503. return ret;
  1504. }
  1505. /*
  1506. * interface function to delete all the snapshots we have scheduled for deletion
  1507. */
  1508. int btrfs_clean_old_snapshots(struct btrfs_root *root)
  1509. {
  1510. LIST_HEAD(list);
  1511. struct btrfs_fs_info *fs_info = root->fs_info;
  1512. spin_lock(&fs_info->trans_lock);
  1513. list_splice_init(&fs_info->dead_roots, &list);
  1514. spin_unlock(&fs_info->trans_lock);
  1515. while (!list_empty(&list)) {
  1516. int ret;
  1517. root = list_entry(list.next, struct btrfs_root, root_list);
  1518. list_del(&root->root_list);
  1519. btrfs_kill_all_delayed_nodes(root);
  1520. if (btrfs_header_backref_rev(root->node) <
  1521. BTRFS_MIXED_BACKREF_REV)
  1522. ret = btrfs_drop_snapshot(root, NULL, 0, 0);
  1523. else
  1524. ret =btrfs_drop_snapshot(root, NULL, 1, 0);
  1525. BUG_ON(ret < 0);
  1526. }
  1527. return 0;
  1528. }