transaction.c 48 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813
  1. /*
  2. * Copyright (C) 2007 Oracle. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/fs.h>
  19. #include <linux/slab.h>
  20. #include <linux/sched.h>
  21. #include <linux/writeback.h>
  22. #include <linux/pagemap.h>
  23. #include <linux/blkdev.h>
  24. #include <linux/uuid.h>
  25. #include "ctree.h"
  26. #include "disk-io.h"
  27. #include "transaction.h"
  28. #include "locking.h"
  29. #include "tree-log.h"
  30. #include "inode-map.h"
  31. #include "volumes.h"
  32. #include "dev-replace.h"
  33. #define BTRFS_ROOT_TRANS_TAG 0
  34. void put_transaction(struct btrfs_transaction *transaction)
  35. {
  36. WARN_ON(atomic_read(&transaction->use_count) == 0);
  37. if (atomic_dec_and_test(&transaction->use_count)) {
  38. BUG_ON(!list_empty(&transaction->list));
  39. WARN_ON(transaction->delayed_refs.root.rb_node);
  40. kmem_cache_free(btrfs_transaction_cachep, transaction);
  41. }
  42. }
  43. static noinline void switch_commit_root(struct btrfs_root *root)
  44. {
  45. free_extent_buffer(root->commit_root);
  46. root->commit_root = btrfs_root_node(root);
  47. }
  48. /*
  49. * either allocate a new transaction or hop into the existing one
  50. */
  51. static noinline int join_transaction(struct btrfs_root *root, int type)
  52. {
  53. struct btrfs_transaction *cur_trans;
  54. struct btrfs_fs_info *fs_info = root->fs_info;
  55. spin_lock(&fs_info->trans_lock);
  56. loop:
  57. /* The file system has been taken offline. No new transactions. */
  58. if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
  59. spin_unlock(&fs_info->trans_lock);
  60. return -EROFS;
  61. }
  62. if (fs_info->trans_no_join) {
  63. /*
  64. * If we are JOIN_NOLOCK we're already committing a current
  65. * transaction, we just need a handle to deal with something
  66. * when committing the transaction, such as inode cache and
  67. * space cache. It is a special case.
  68. */
  69. if (type != TRANS_JOIN_NOLOCK) {
  70. spin_unlock(&fs_info->trans_lock);
  71. return -EBUSY;
  72. }
  73. }
  74. cur_trans = fs_info->running_transaction;
  75. if (cur_trans) {
  76. if (cur_trans->aborted) {
  77. spin_unlock(&fs_info->trans_lock);
  78. return cur_trans->aborted;
  79. }
  80. atomic_inc(&cur_trans->use_count);
  81. atomic_inc(&cur_trans->num_writers);
  82. cur_trans->num_joined++;
  83. spin_unlock(&fs_info->trans_lock);
  84. return 0;
  85. }
  86. spin_unlock(&fs_info->trans_lock);
  87. /*
  88. * If we are ATTACH, we just want to catch the current transaction,
  89. * and commit it. If there is no transaction, just return ENOENT.
  90. */
  91. if (type == TRANS_ATTACH)
  92. return -ENOENT;
  93. cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, GFP_NOFS);
  94. if (!cur_trans)
  95. return -ENOMEM;
  96. spin_lock(&fs_info->trans_lock);
  97. if (fs_info->running_transaction) {
  98. /*
  99. * someone started a transaction after we unlocked. Make sure
  100. * to redo the trans_no_join checks above
  101. */
  102. kmem_cache_free(btrfs_transaction_cachep, cur_trans);
  103. cur_trans = fs_info->running_transaction;
  104. goto loop;
  105. } else if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
  106. spin_unlock(&fs_info->trans_lock);
  107. kmem_cache_free(btrfs_transaction_cachep, cur_trans);
  108. return -EROFS;
  109. }
  110. atomic_set(&cur_trans->num_writers, 1);
  111. cur_trans->num_joined = 0;
  112. init_waitqueue_head(&cur_trans->writer_wait);
  113. init_waitqueue_head(&cur_trans->commit_wait);
  114. cur_trans->in_commit = 0;
  115. cur_trans->blocked = 0;
  116. /*
  117. * One for this trans handle, one so it will live on until we
  118. * commit the transaction.
  119. */
  120. atomic_set(&cur_trans->use_count, 2);
  121. cur_trans->commit_done = 0;
  122. cur_trans->start_time = get_seconds();
  123. cur_trans->delayed_refs.root = RB_ROOT;
  124. cur_trans->delayed_refs.num_entries = 0;
  125. cur_trans->delayed_refs.num_heads_ready = 0;
  126. cur_trans->delayed_refs.num_heads = 0;
  127. cur_trans->delayed_refs.flushing = 0;
  128. cur_trans->delayed_refs.run_delayed_start = 0;
  129. /*
  130. * although the tree mod log is per file system and not per transaction,
  131. * the log must never go across transaction boundaries.
  132. */
  133. smp_mb();
  134. if (!list_empty(&fs_info->tree_mod_seq_list))
  135. WARN(1, KERN_ERR "btrfs: tree_mod_seq_list not empty when "
  136. "creating a fresh transaction\n");
  137. if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log))
  138. WARN(1, KERN_ERR "btrfs: tree_mod_log rb tree not empty when "
  139. "creating a fresh transaction\n");
  140. atomic_set(&fs_info->tree_mod_seq, 0);
  141. spin_lock_init(&cur_trans->commit_lock);
  142. spin_lock_init(&cur_trans->delayed_refs.lock);
  143. INIT_LIST_HEAD(&cur_trans->pending_snapshots);
  144. list_add_tail(&cur_trans->list, &fs_info->trans_list);
  145. extent_io_tree_init(&cur_trans->dirty_pages,
  146. fs_info->btree_inode->i_mapping);
  147. fs_info->generation++;
  148. cur_trans->transid = fs_info->generation;
  149. fs_info->running_transaction = cur_trans;
  150. cur_trans->aborted = 0;
  151. spin_unlock(&fs_info->trans_lock);
  152. return 0;
  153. }
  154. /*
  155. * this does all the record keeping required to make sure that a reference
  156. * counted root is properly recorded in a given transaction. This is required
  157. * to make sure the old root from before we joined the transaction is deleted
  158. * when the transaction commits
  159. */
  160. static int record_root_in_trans(struct btrfs_trans_handle *trans,
  161. struct btrfs_root *root)
  162. {
  163. if (root->ref_cows && root->last_trans < trans->transid) {
  164. WARN_ON(root == root->fs_info->extent_root);
  165. WARN_ON(root->commit_root != root->node);
  166. /*
  167. * see below for in_trans_setup usage rules
  168. * we have the reloc mutex held now, so there
  169. * is only one writer in this function
  170. */
  171. root->in_trans_setup = 1;
  172. /* make sure readers find in_trans_setup before
  173. * they find our root->last_trans update
  174. */
  175. smp_wmb();
  176. spin_lock(&root->fs_info->fs_roots_radix_lock);
  177. if (root->last_trans == trans->transid) {
  178. spin_unlock(&root->fs_info->fs_roots_radix_lock);
  179. return 0;
  180. }
  181. radix_tree_tag_set(&root->fs_info->fs_roots_radix,
  182. (unsigned long)root->root_key.objectid,
  183. BTRFS_ROOT_TRANS_TAG);
  184. spin_unlock(&root->fs_info->fs_roots_radix_lock);
  185. root->last_trans = trans->transid;
  186. /* this is pretty tricky. We don't want to
  187. * take the relocation lock in btrfs_record_root_in_trans
  188. * unless we're really doing the first setup for this root in
  189. * this transaction.
  190. *
  191. * Normally we'd use root->last_trans as a flag to decide
  192. * if we want to take the expensive mutex.
  193. *
  194. * But, we have to set root->last_trans before we
  195. * init the relocation root, otherwise, we trip over warnings
  196. * in ctree.c. The solution used here is to flag ourselves
  197. * with root->in_trans_setup. When this is 1, we're still
  198. * fixing up the reloc trees and everyone must wait.
  199. *
  200. * When this is zero, they can trust root->last_trans and fly
  201. * through btrfs_record_root_in_trans without having to take the
  202. * lock. smp_wmb() makes sure that all the writes above are
  203. * done before we pop in the zero below
  204. */
  205. btrfs_init_reloc_root(trans, root);
  206. smp_wmb();
  207. root->in_trans_setup = 0;
  208. }
  209. return 0;
  210. }
  211. int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
  212. struct btrfs_root *root)
  213. {
  214. if (!root->ref_cows)
  215. return 0;
  216. /*
  217. * see record_root_in_trans for comments about in_trans_setup usage
  218. * and barriers
  219. */
  220. smp_rmb();
  221. if (root->last_trans == trans->transid &&
  222. !root->in_trans_setup)
  223. return 0;
  224. mutex_lock(&root->fs_info->reloc_mutex);
  225. record_root_in_trans(trans, root);
  226. mutex_unlock(&root->fs_info->reloc_mutex);
  227. return 0;
  228. }
  229. /* wait for commit against the current transaction to become unblocked
  230. * when this is done, it is safe to start a new transaction, but the current
  231. * transaction might not be fully on disk.
  232. */
  233. static void wait_current_trans(struct btrfs_root *root)
  234. {
  235. struct btrfs_transaction *cur_trans;
  236. spin_lock(&root->fs_info->trans_lock);
  237. cur_trans = root->fs_info->running_transaction;
  238. if (cur_trans && cur_trans->blocked) {
  239. atomic_inc(&cur_trans->use_count);
  240. spin_unlock(&root->fs_info->trans_lock);
  241. wait_event(root->fs_info->transaction_wait,
  242. !cur_trans->blocked);
  243. put_transaction(cur_trans);
  244. } else {
  245. spin_unlock(&root->fs_info->trans_lock);
  246. }
  247. }
  248. static int may_wait_transaction(struct btrfs_root *root, int type)
  249. {
  250. if (root->fs_info->log_root_recovering)
  251. return 0;
  252. if (type == TRANS_USERSPACE)
  253. return 1;
  254. if (type == TRANS_START &&
  255. !atomic_read(&root->fs_info->open_ioctl_trans))
  256. return 1;
  257. return 0;
  258. }
  259. static struct btrfs_trans_handle *
  260. start_transaction(struct btrfs_root *root, u64 num_items, int type,
  261. enum btrfs_reserve_flush_enum flush)
  262. {
  263. struct btrfs_trans_handle *h;
  264. struct btrfs_transaction *cur_trans;
  265. u64 num_bytes = 0;
  266. int ret;
  267. u64 qgroup_reserved = 0;
  268. if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
  269. return ERR_PTR(-EROFS);
  270. if (current->journal_info) {
  271. WARN_ON(type != TRANS_JOIN && type != TRANS_JOIN_NOLOCK);
  272. h = current->journal_info;
  273. h->use_count++;
  274. WARN_ON(h->use_count > 2);
  275. h->orig_rsv = h->block_rsv;
  276. h->block_rsv = NULL;
  277. goto got_it;
  278. }
  279. /*
  280. * Do the reservation before we join the transaction so we can do all
  281. * the appropriate flushing if need be.
  282. */
  283. if (num_items > 0 && root != root->fs_info->chunk_root) {
  284. if (root->fs_info->quota_enabled &&
  285. is_fstree(root->root_key.objectid)) {
  286. qgroup_reserved = num_items * root->leafsize;
  287. ret = btrfs_qgroup_reserve(root, qgroup_reserved);
  288. if (ret)
  289. return ERR_PTR(ret);
  290. }
  291. num_bytes = btrfs_calc_trans_metadata_size(root, num_items);
  292. ret = btrfs_block_rsv_add(root,
  293. &root->fs_info->trans_block_rsv,
  294. num_bytes, flush);
  295. if (ret)
  296. goto reserve_fail;
  297. }
  298. again:
  299. h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
  300. if (!h) {
  301. ret = -ENOMEM;
  302. goto alloc_fail;
  303. }
  304. /*
  305. * If we are JOIN_NOLOCK we're already committing a transaction and
  306. * waiting on this guy, so we don't need to do the sb_start_intwrite
  307. * because we're already holding a ref. We need this because we could
  308. * have raced in and did an fsync() on a file which can kick a commit
  309. * and then we deadlock with somebody doing a freeze.
  310. *
  311. * If we are ATTACH, it means we just want to catch the current
  312. * transaction and commit it, so we needn't do sb_start_intwrite().
  313. */
  314. if (type < TRANS_JOIN_NOLOCK)
  315. sb_start_intwrite(root->fs_info->sb);
  316. if (may_wait_transaction(root, type))
  317. wait_current_trans(root);
  318. do {
  319. ret = join_transaction(root, type);
  320. if (ret == -EBUSY)
  321. wait_current_trans(root);
  322. } while (ret == -EBUSY);
  323. if (ret < 0) {
  324. /* We must get the transaction if we are JOIN_NOLOCK. */
  325. BUG_ON(type == TRANS_JOIN_NOLOCK);
  326. goto join_fail;
  327. }
  328. cur_trans = root->fs_info->running_transaction;
  329. h->transid = cur_trans->transid;
  330. h->transaction = cur_trans;
  331. h->blocks_used = 0;
  332. h->bytes_reserved = 0;
  333. h->root = root;
  334. h->delayed_ref_updates = 0;
  335. h->use_count = 1;
  336. h->adding_csums = 0;
  337. h->block_rsv = NULL;
  338. h->orig_rsv = NULL;
  339. h->aborted = 0;
  340. h->qgroup_reserved = qgroup_reserved;
  341. h->delayed_ref_elem.seq = 0;
  342. h->type = type;
  343. h->allocating_chunk = false;
  344. INIT_LIST_HEAD(&h->qgroup_ref_list);
  345. INIT_LIST_HEAD(&h->new_bgs);
  346. smp_mb();
  347. if (cur_trans->blocked && may_wait_transaction(root, type)) {
  348. btrfs_commit_transaction(h, root);
  349. goto again;
  350. }
  351. if (num_bytes) {
  352. trace_btrfs_space_reservation(root->fs_info, "transaction",
  353. h->transid, num_bytes, 1);
  354. h->block_rsv = &root->fs_info->trans_block_rsv;
  355. h->bytes_reserved = num_bytes;
  356. }
  357. got_it:
  358. btrfs_record_root_in_trans(h, root);
  359. if (!current->journal_info && type != TRANS_USERSPACE)
  360. current->journal_info = h;
  361. return h;
  362. join_fail:
  363. if (type < TRANS_JOIN_NOLOCK)
  364. sb_end_intwrite(root->fs_info->sb);
  365. kmem_cache_free(btrfs_trans_handle_cachep, h);
  366. alloc_fail:
  367. if (num_bytes)
  368. btrfs_block_rsv_release(root, &root->fs_info->trans_block_rsv,
  369. num_bytes);
  370. reserve_fail:
  371. if (qgroup_reserved)
  372. btrfs_qgroup_free(root, qgroup_reserved);
  373. return ERR_PTR(ret);
  374. }
  375. struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
  376. int num_items)
  377. {
  378. return start_transaction(root, num_items, TRANS_START,
  379. BTRFS_RESERVE_FLUSH_ALL);
  380. }
  381. struct btrfs_trans_handle *btrfs_start_transaction_lflush(
  382. struct btrfs_root *root, int num_items)
  383. {
  384. return start_transaction(root, num_items, TRANS_START,
  385. BTRFS_RESERVE_FLUSH_LIMIT);
  386. }
  387. struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root)
  388. {
  389. return start_transaction(root, 0, TRANS_JOIN, 0);
  390. }
  391. struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root)
  392. {
  393. return start_transaction(root, 0, TRANS_JOIN_NOLOCK, 0);
  394. }
  395. struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root)
  396. {
  397. return start_transaction(root, 0, TRANS_USERSPACE, 0);
  398. }
  399. struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root)
  400. {
  401. return start_transaction(root, 0, TRANS_ATTACH, 0);
  402. }
  403. /* wait for a transaction commit to be fully complete */
  404. static noinline void wait_for_commit(struct btrfs_root *root,
  405. struct btrfs_transaction *commit)
  406. {
  407. wait_event(commit->commit_wait, commit->commit_done);
  408. }
  409. int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
  410. {
  411. struct btrfs_transaction *cur_trans = NULL, *t;
  412. int ret = 0;
  413. if (transid) {
  414. if (transid <= root->fs_info->last_trans_committed)
  415. goto out;
  416. ret = -EINVAL;
  417. /* find specified transaction */
  418. spin_lock(&root->fs_info->trans_lock);
  419. list_for_each_entry(t, &root->fs_info->trans_list, list) {
  420. if (t->transid == transid) {
  421. cur_trans = t;
  422. atomic_inc(&cur_trans->use_count);
  423. ret = 0;
  424. break;
  425. }
  426. if (t->transid > transid) {
  427. ret = 0;
  428. break;
  429. }
  430. }
  431. spin_unlock(&root->fs_info->trans_lock);
  432. /* The specified transaction doesn't exist */
  433. if (!cur_trans)
  434. goto out;
  435. } else {
  436. /* find newest transaction that is committing | committed */
  437. spin_lock(&root->fs_info->trans_lock);
  438. list_for_each_entry_reverse(t, &root->fs_info->trans_list,
  439. list) {
  440. if (t->in_commit) {
  441. if (t->commit_done)
  442. break;
  443. cur_trans = t;
  444. atomic_inc(&cur_trans->use_count);
  445. break;
  446. }
  447. }
  448. spin_unlock(&root->fs_info->trans_lock);
  449. if (!cur_trans)
  450. goto out; /* nothing committing|committed */
  451. }
  452. wait_for_commit(root, cur_trans);
  453. put_transaction(cur_trans);
  454. out:
  455. return ret;
  456. }
  457. void btrfs_throttle(struct btrfs_root *root)
  458. {
  459. if (!atomic_read(&root->fs_info->open_ioctl_trans))
  460. wait_current_trans(root);
  461. }
  462. static int should_end_transaction(struct btrfs_trans_handle *trans,
  463. struct btrfs_root *root)
  464. {
  465. int ret;
  466. ret = btrfs_block_rsv_check(root, &root->fs_info->global_block_rsv, 5);
  467. return ret ? 1 : 0;
  468. }
  469. int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
  470. struct btrfs_root *root)
  471. {
  472. struct btrfs_transaction *cur_trans = trans->transaction;
  473. int updates;
  474. int err;
  475. smp_mb();
  476. if (cur_trans->blocked || cur_trans->delayed_refs.flushing)
  477. return 1;
  478. updates = trans->delayed_ref_updates;
  479. trans->delayed_ref_updates = 0;
  480. if (updates) {
  481. err = btrfs_run_delayed_refs(trans, root, updates);
  482. if (err) /* Error code will also eval true */
  483. return err;
  484. }
  485. return should_end_transaction(trans, root);
  486. }
  487. static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
  488. struct btrfs_root *root, int throttle)
  489. {
  490. struct btrfs_transaction *cur_trans = trans->transaction;
  491. struct btrfs_fs_info *info = root->fs_info;
  492. int count = 0;
  493. int lock = (trans->type != TRANS_JOIN_NOLOCK);
  494. int err = 0;
  495. if (--trans->use_count) {
  496. trans->block_rsv = trans->orig_rsv;
  497. return 0;
  498. }
  499. /*
  500. * do the qgroup accounting as early as possible
  501. */
  502. err = btrfs_delayed_refs_qgroup_accounting(trans, info);
  503. btrfs_trans_release_metadata(trans, root);
  504. trans->block_rsv = NULL;
  505. /*
  506. * the same root has to be passed to start_transaction and
  507. * end_transaction. Subvolume quota depends on this.
  508. */
  509. WARN_ON(trans->root != root);
  510. if (trans->qgroup_reserved) {
  511. btrfs_qgroup_free(root, trans->qgroup_reserved);
  512. trans->qgroup_reserved = 0;
  513. }
  514. if (!list_empty(&trans->new_bgs))
  515. btrfs_create_pending_block_groups(trans, root);
  516. while (count < 2) {
  517. unsigned long cur = trans->delayed_ref_updates;
  518. trans->delayed_ref_updates = 0;
  519. if (cur &&
  520. trans->transaction->delayed_refs.num_heads_ready > 64) {
  521. trans->delayed_ref_updates = 0;
  522. btrfs_run_delayed_refs(trans, root, cur);
  523. } else {
  524. break;
  525. }
  526. count++;
  527. }
  528. btrfs_trans_release_metadata(trans, root);
  529. trans->block_rsv = NULL;
  530. if (!list_empty(&trans->new_bgs))
  531. btrfs_create_pending_block_groups(trans, root);
  532. if (lock && !atomic_read(&root->fs_info->open_ioctl_trans) &&
  533. should_end_transaction(trans, root)) {
  534. trans->transaction->blocked = 1;
  535. smp_wmb();
  536. }
  537. if (lock && cur_trans->blocked && !cur_trans->in_commit) {
  538. if (throttle) {
  539. /*
  540. * We may race with somebody else here so end up having
  541. * to call end_transaction on ourselves again, so inc
  542. * our use_count.
  543. */
  544. trans->use_count++;
  545. return btrfs_commit_transaction(trans, root);
  546. } else {
  547. wake_up_process(info->transaction_kthread);
  548. }
  549. }
  550. if (trans->type < TRANS_JOIN_NOLOCK)
  551. sb_end_intwrite(root->fs_info->sb);
  552. WARN_ON(cur_trans != info->running_transaction);
  553. WARN_ON(atomic_read(&cur_trans->num_writers) < 1);
  554. atomic_dec(&cur_trans->num_writers);
  555. smp_mb();
  556. if (waitqueue_active(&cur_trans->writer_wait))
  557. wake_up(&cur_trans->writer_wait);
  558. put_transaction(cur_trans);
  559. if (current->journal_info == trans)
  560. current->journal_info = NULL;
  561. if (throttle)
  562. btrfs_run_delayed_iputs(root);
  563. if (trans->aborted ||
  564. root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
  565. err = -EIO;
  566. }
  567. assert_qgroups_uptodate(trans);
  568. kmem_cache_free(btrfs_trans_handle_cachep, trans);
  569. return err;
  570. }
  571. int btrfs_end_transaction(struct btrfs_trans_handle *trans,
  572. struct btrfs_root *root)
  573. {
  574. int ret;
  575. ret = __btrfs_end_transaction(trans, root, 0);
  576. if (ret)
  577. return ret;
  578. return 0;
  579. }
  580. int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
  581. struct btrfs_root *root)
  582. {
  583. int ret;
  584. ret = __btrfs_end_transaction(trans, root, 1);
  585. if (ret)
  586. return ret;
  587. return 0;
  588. }
  589. int btrfs_end_transaction_dmeta(struct btrfs_trans_handle *trans,
  590. struct btrfs_root *root)
  591. {
  592. return __btrfs_end_transaction(trans, root, 1);
  593. }
  594. /*
  595. * when btree blocks are allocated, they have some corresponding bits set for
  596. * them in one of two extent_io trees. This is used to make sure all of
  597. * those extents are sent to disk but does not wait on them
  598. */
  599. int btrfs_write_marked_extents(struct btrfs_root *root,
  600. struct extent_io_tree *dirty_pages, int mark)
  601. {
  602. int err = 0;
  603. int werr = 0;
  604. struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
  605. struct extent_state *cached_state = NULL;
  606. u64 start = 0;
  607. u64 end;
  608. while (!find_first_extent_bit(dirty_pages, start, &start, &end,
  609. mark, &cached_state)) {
  610. convert_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT,
  611. mark, &cached_state, GFP_NOFS);
  612. cached_state = NULL;
  613. err = filemap_fdatawrite_range(mapping, start, end);
  614. if (err)
  615. werr = err;
  616. cond_resched();
  617. start = end + 1;
  618. }
  619. if (err)
  620. werr = err;
  621. return werr;
  622. }
  623. /*
  624. * when btree blocks are allocated, they have some corresponding bits set for
  625. * them in one of two extent_io trees. This is used to make sure all of
  626. * those extents are on disk for transaction or log commit. We wait
  627. * on all the pages and clear them from the dirty pages state tree
  628. */
  629. int btrfs_wait_marked_extents(struct btrfs_root *root,
  630. struct extent_io_tree *dirty_pages, int mark)
  631. {
  632. int err = 0;
  633. int werr = 0;
  634. struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
  635. struct extent_state *cached_state = NULL;
  636. u64 start = 0;
  637. u64 end;
  638. while (!find_first_extent_bit(dirty_pages, start, &start, &end,
  639. EXTENT_NEED_WAIT, &cached_state)) {
  640. clear_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT,
  641. 0, 0, &cached_state, GFP_NOFS);
  642. err = filemap_fdatawait_range(mapping, start, end);
  643. if (err)
  644. werr = err;
  645. cond_resched();
  646. start = end + 1;
  647. }
  648. if (err)
  649. werr = err;
  650. return werr;
  651. }
  652. /*
  653. * when btree blocks are allocated, they have some corresponding bits set for
  654. * them in one of two extent_io trees. This is used to make sure all of
  655. * those extents are on disk for transaction or log commit
  656. */
  657. int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
  658. struct extent_io_tree *dirty_pages, int mark)
  659. {
  660. int ret;
  661. int ret2;
  662. ret = btrfs_write_marked_extents(root, dirty_pages, mark);
  663. ret2 = btrfs_wait_marked_extents(root, dirty_pages, mark);
  664. if (ret)
  665. return ret;
  666. if (ret2)
  667. return ret2;
  668. return 0;
  669. }
  670. int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
  671. struct btrfs_root *root)
  672. {
  673. if (!trans || !trans->transaction) {
  674. struct inode *btree_inode;
  675. btree_inode = root->fs_info->btree_inode;
  676. return filemap_write_and_wait(btree_inode->i_mapping);
  677. }
  678. return btrfs_write_and_wait_marked_extents(root,
  679. &trans->transaction->dirty_pages,
  680. EXTENT_DIRTY);
  681. }
  682. /*
  683. * this is used to update the root pointer in the tree of tree roots.
  684. *
  685. * But, in the case of the extent allocation tree, updating the root
  686. * pointer may allocate blocks which may change the root of the extent
  687. * allocation tree.
  688. *
  689. * So, this loops and repeats and makes sure the cowonly root didn't
  690. * change while the root pointer was being updated in the metadata.
  691. */
  692. static int update_cowonly_root(struct btrfs_trans_handle *trans,
  693. struct btrfs_root *root)
  694. {
  695. int ret;
  696. u64 old_root_bytenr;
  697. u64 old_root_used;
  698. struct btrfs_root *tree_root = root->fs_info->tree_root;
  699. old_root_used = btrfs_root_used(&root->root_item);
  700. btrfs_write_dirty_block_groups(trans, root);
  701. while (1) {
  702. old_root_bytenr = btrfs_root_bytenr(&root->root_item);
  703. if (old_root_bytenr == root->node->start &&
  704. old_root_used == btrfs_root_used(&root->root_item))
  705. break;
  706. btrfs_set_root_node(&root->root_item, root->node);
  707. ret = btrfs_update_root(trans, tree_root,
  708. &root->root_key,
  709. &root->root_item);
  710. if (ret)
  711. return ret;
  712. old_root_used = btrfs_root_used(&root->root_item);
  713. ret = btrfs_write_dirty_block_groups(trans, root);
  714. if (ret)
  715. return ret;
  716. }
  717. if (root != root->fs_info->extent_root)
  718. switch_commit_root(root);
  719. return 0;
  720. }
  721. /*
  722. * update all the cowonly tree roots on disk
  723. *
  724. * The error handling in this function may not be obvious. Any of the
  725. * failures will cause the file system to go offline. We still need
  726. * to clean up the delayed refs.
  727. */
  728. static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
  729. struct btrfs_root *root)
  730. {
  731. struct btrfs_fs_info *fs_info = root->fs_info;
  732. struct list_head *next;
  733. struct extent_buffer *eb;
  734. int ret;
  735. ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
  736. if (ret)
  737. return ret;
  738. eb = btrfs_lock_root_node(fs_info->tree_root);
  739. ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL,
  740. 0, &eb);
  741. btrfs_tree_unlock(eb);
  742. free_extent_buffer(eb);
  743. if (ret)
  744. return ret;
  745. ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
  746. if (ret)
  747. return ret;
  748. ret = btrfs_run_dev_stats(trans, root->fs_info);
  749. WARN_ON(ret);
  750. ret = btrfs_run_dev_replace(trans, root->fs_info);
  751. WARN_ON(ret);
  752. ret = btrfs_run_qgroups(trans, root->fs_info);
  753. BUG_ON(ret);
  754. /* run_qgroups might have added some more refs */
  755. ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
  756. BUG_ON(ret);
  757. while (!list_empty(&fs_info->dirty_cowonly_roots)) {
  758. next = fs_info->dirty_cowonly_roots.next;
  759. list_del_init(next);
  760. root = list_entry(next, struct btrfs_root, dirty_list);
  761. ret = update_cowonly_root(trans, root);
  762. if (ret)
  763. return ret;
  764. }
  765. down_write(&fs_info->extent_commit_sem);
  766. switch_commit_root(fs_info->extent_root);
  767. up_write(&fs_info->extent_commit_sem);
  768. btrfs_after_dev_replace_commit(fs_info);
  769. return 0;
  770. }
  771. /*
  772. * dead roots are old snapshots that need to be deleted. This allocates
  773. * a dirty root struct and adds it into the list of dead roots that need to
  774. * be deleted
  775. */
  776. int btrfs_add_dead_root(struct btrfs_root *root)
  777. {
  778. spin_lock(&root->fs_info->trans_lock);
  779. list_add(&root->root_list, &root->fs_info->dead_roots);
  780. spin_unlock(&root->fs_info->trans_lock);
  781. return 0;
  782. }
  783. /*
  784. * update all the cowonly tree roots on disk
  785. */
  786. static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
  787. struct btrfs_root *root)
  788. {
  789. struct btrfs_root *gang[8];
  790. struct btrfs_fs_info *fs_info = root->fs_info;
  791. int i;
  792. int ret;
  793. int err = 0;
  794. spin_lock(&fs_info->fs_roots_radix_lock);
  795. while (1) {
  796. ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
  797. (void **)gang, 0,
  798. ARRAY_SIZE(gang),
  799. BTRFS_ROOT_TRANS_TAG);
  800. if (ret == 0)
  801. break;
  802. for (i = 0; i < ret; i++) {
  803. root = gang[i];
  804. radix_tree_tag_clear(&fs_info->fs_roots_radix,
  805. (unsigned long)root->root_key.objectid,
  806. BTRFS_ROOT_TRANS_TAG);
  807. spin_unlock(&fs_info->fs_roots_radix_lock);
  808. btrfs_free_log(trans, root);
  809. btrfs_update_reloc_root(trans, root);
  810. btrfs_orphan_commit_root(trans, root);
  811. btrfs_save_ino_cache(root, trans);
  812. /* see comments in should_cow_block() */
  813. root->force_cow = 0;
  814. smp_wmb();
  815. if (root->commit_root != root->node) {
  816. mutex_lock(&root->fs_commit_mutex);
  817. switch_commit_root(root);
  818. btrfs_unpin_free_ino(root);
  819. mutex_unlock(&root->fs_commit_mutex);
  820. btrfs_set_root_node(&root->root_item,
  821. root->node);
  822. }
  823. err = btrfs_update_root(trans, fs_info->tree_root,
  824. &root->root_key,
  825. &root->root_item);
  826. spin_lock(&fs_info->fs_roots_radix_lock);
  827. if (err)
  828. break;
  829. }
  830. }
  831. spin_unlock(&fs_info->fs_roots_radix_lock);
  832. return err;
  833. }
  834. /*
  835. * defrag a given btree. If cacheonly == 1, this won't read from the disk,
  836. * otherwise every leaf in the btree is read and defragged.
  837. */
  838. int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
  839. {
  840. struct btrfs_fs_info *info = root->fs_info;
  841. struct btrfs_trans_handle *trans;
  842. int ret;
  843. if (xchg(&root->defrag_running, 1))
  844. return 0;
  845. while (1) {
  846. trans = btrfs_start_transaction(root, 0);
  847. if (IS_ERR(trans))
  848. return PTR_ERR(trans);
  849. ret = btrfs_defrag_leaves(trans, root, cacheonly);
  850. btrfs_end_transaction(trans, root);
  851. btrfs_btree_balance_dirty(info->tree_root);
  852. cond_resched();
  853. if (btrfs_fs_closing(root->fs_info) || ret != -EAGAIN)
  854. break;
  855. }
  856. root->defrag_running = 0;
  857. return ret;
  858. }
  859. /*
  860. * new snapshots need to be created at a very specific time in the
  861. * transaction commit. This does the actual creation
  862. */
  863. static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
  864. struct btrfs_fs_info *fs_info,
  865. struct btrfs_pending_snapshot *pending)
  866. {
  867. struct btrfs_key key;
  868. struct btrfs_root_item *new_root_item;
  869. struct btrfs_root *tree_root = fs_info->tree_root;
  870. struct btrfs_root *root = pending->root;
  871. struct btrfs_root *parent_root;
  872. struct btrfs_block_rsv *rsv;
  873. struct inode *parent_inode;
  874. struct btrfs_path *path;
  875. struct btrfs_dir_item *dir_item;
  876. struct dentry *parent;
  877. struct dentry *dentry;
  878. struct extent_buffer *tmp;
  879. struct extent_buffer *old;
  880. struct timespec cur_time = CURRENT_TIME;
  881. int ret;
  882. u64 to_reserve = 0;
  883. u64 index = 0;
  884. u64 objectid;
  885. u64 root_flags;
  886. uuid_le new_uuid;
  887. path = btrfs_alloc_path();
  888. if (!path) {
  889. ret = pending->error = -ENOMEM;
  890. goto path_alloc_fail;
  891. }
  892. new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS);
  893. if (!new_root_item) {
  894. ret = pending->error = -ENOMEM;
  895. goto root_item_alloc_fail;
  896. }
  897. ret = btrfs_find_free_objectid(tree_root, &objectid);
  898. if (ret) {
  899. pending->error = ret;
  900. goto no_free_objectid;
  901. }
  902. btrfs_reloc_pre_snapshot(trans, pending, &to_reserve);
  903. if (to_reserve > 0) {
  904. ret = btrfs_block_rsv_add(root, &pending->block_rsv,
  905. to_reserve,
  906. BTRFS_RESERVE_NO_FLUSH);
  907. if (ret) {
  908. pending->error = ret;
  909. goto no_free_objectid;
  910. }
  911. }
  912. ret = btrfs_qgroup_inherit(trans, fs_info, root->root_key.objectid,
  913. objectid, pending->inherit);
  914. if (ret) {
  915. pending->error = ret;
  916. goto no_free_objectid;
  917. }
  918. key.objectid = objectid;
  919. key.offset = (u64)-1;
  920. key.type = BTRFS_ROOT_ITEM_KEY;
  921. rsv = trans->block_rsv;
  922. trans->block_rsv = &pending->block_rsv;
  923. dentry = pending->dentry;
  924. parent = dget_parent(dentry);
  925. parent_inode = parent->d_inode;
  926. parent_root = BTRFS_I(parent_inode)->root;
  927. record_root_in_trans(trans, parent_root);
  928. /*
  929. * insert the directory item
  930. */
  931. ret = btrfs_set_inode_index(parent_inode, &index);
  932. BUG_ON(ret); /* -ENOMEM */
  933. /* check if there is a file/dir which has the same name. */
  934. dir_item = btrfs_lookup_dir_item(NULL, parent_root, path,
  935. btrfs_ino(parent_inode),
  936. dentry->d_name.name,
  937. dentry->d_name.len, 0);
  938. if (dir_item != NULL && !IS_ERR(dir_item)) {
  939. pending->error = -EEXIST;
  940. goto fail;
  941. } else if (IS_ERR(dir_item)) {
  942. ret = PTR_ERR(dir_item);
  943. btrfs_abort_transaction(trans, root, ret);
  944. goto fail;
  945. }
  946. btrfs_release_path(path);
  947. /*
  948. * pull in the delayed directory update
  949. * and the delayed inode item
  950. * otherwise we corrupt the FS during
  951. * snapshot
  952. */
  953. ret = btrfs_run_delayed_items(trans, root);
  954. if (ret) { /* Transaction aborted */
  955. btrfs_abort_transaction(trans, root, ret);
  956. goto fail;
  957. }
  958. record_root_in_trans(trans, root);
  959. btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
  960. memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
  961. btrfs_check_and_init_root_item(new_root_item);
  962. root_flags = btrfs_root_flags(new_root_item);
  963. if (pending->readonly)
  964. root_flags |= BTRFS_ROOT_SUBVOL_RDONLY;
  965. else
  966. root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY;
  967. btrfs_set_root_flags(new_root_item, root_flags);
  968. btrfs_set_root_generation_v2(new_root_item,
  969. trans->transid);
  970. uuid_le_gen(&new_uuid);
  971. memcpy(new_root_item->uuid, new_uuid.b, BTRFS_UUID_SIZE);
  972. memcpy(new_root_item->parent_uuid, root->root_item.uuid,
  973. BTRFS_UUID_SIZE);
  974. new_root_item->otime.sec = cpu_to_le64(cur_time.tv_sec);
  975. new_root_item->otime.nsec = cpu_to_le32(cur_time.tv_nsec);
  976. btrfs_set_root_otransid(new_root_item, trans->transid);
  977. memset(&new_root_item->stime, 0, sizeof(new_root_item->stime));
  978. memset(&new_root_item->rtime, 0, sizeof(new_root_item->rtime));
  979. btrfs_set_root_stransid(new_root_item, 0);
  980. btrfs_set_root_rtransid(new_root_item, 0);
  981. old = btrfs_lock_root_node(root);
  982. ret = btrfs_cow_block(trans, root, old, NULL, 0, &old);
  983. if (ret) {
  984. btrfs_tree_unlock(old);
  985. free_extent_buffer(old);
  986. btrfs_abort_transaction(trans, root, ret);
  987. goto fail;
  988. }
  989. btrfs_set_lock_blocking(old);
  990. ret = btrfs_copy_root(trans, root, old, &tmp, objectid);
  991. /* clean up in any case */
  992. btrfs_tree_unlock(old);
  993. free_extent_buffer(old);
  994. if (ret) {
  995. btrfs_abort_transaction(trans, root, ret);
  996. goto fail;
  997. }
  998. /* see comments in should_cow_block() */
  999. root->force_cow = 1;
  1000. smp_wmb();
  1001. btrfs_set_root_node(new_root_item, tmp);
  1002. /* record when the snapshot was created in key.offset */
  1003. key.offset = trans->transid;
  1004. ret = btrfs_insert_root(trans, tree_root, &key, new_root_item);
  1005. btrfs_tree_unlock(tmp);
  1006. free_extent_buffer(tmp);
  1007. if (ret) {
  1008. btrfs_abort_transaction(trans, root, ret);
  1009. goto fail;
  1010. }
  1011. /*
  1012. * insert root back/forward references
  1013. */
  1014. ret = btrfs_add_root_ref(trans, tree_root, objectid,
  1015. parent_root->root_key.objectid,
  1016. btrfs_ino(parent_inode), index,
  1017. dentry->d_name.name, dentry->d_name.len);
  1018. if (ret) {
  1019. btrfs_abort_transaction(trans, root, ret);
  1020. goto fail;
  1021. }
  1022. key.offset = (u64)-1;
  1023. pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key);
  1024. if (IS_ERR(pending->snap)) {
  1025. ret = PTR_ERR(pending->snap);
  1026. btrfs_abort_transaction(trans, root, ret);
  1027. goto fail;
  1028. }
  1029. ret = btrfs_reloc_post_snapshot(trans, pending);
  1030. if (ret) {
  1031. btrfs_abort_transaction(trans, root, ret);
  1032. goto fail;
  1033. }
  1034. ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
  1035. if (ret) {
  1036. btrfs_abort_transaction(trans, root, ret);
  1037. goto fail;
  1038. }
  1039. ret = btrfs_insert_dir_item(trans, parent_root,
  1040. dentry->d_name.name, dentry->d_name.len,
  1041. parent_inode, &key,
  1042. BTRFS_FT_DIR, index);
  1043. /* We have check then name at the beginning, so it is impossible. */
  1044. BUG_ON(ret == -EEXIST || ret == -EOVERFLOW);
  1045. if (ret) {
  1046. btrfs_abort_transaction(trans, root, ret);
  1047. goto fail;
  1048. }
  1049. btrfs_i_size_write(parent_inode, parent_inode->i_size +
  1050. dentry->d_name.len * 2);
  1051. parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
  1052. ret = btrfs_update_inode_fallback(trans, parent_root, parent_inode);
  1053. if (ret)
  1054. btrfs_abort_transaction(trans, root, ret);
  1055. fail:
  1056. dput(parent);
  1057. trans->block_rsv = rsv;
  1058. no_free_objectid:
  1059. kfree(new_root_item);
  1060. root_item_alloc_fail:
  1061. btrfs_free_path(path);
  1062. path_alloc_fail:
  1063. btrfs_block_rsv_release(root, &pending->block_rsv, (u64)-1);
  1064. return ret;
  1065. }
  1066. /*
  1067. * create all the snapshots we've scheduled for creation
  1068. */
  1069. static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
  1070. struct btrfs_fs_info *fs_info)
  1071. {
  1072. struct btrfs_pending_snapshot *pending;
  1073. struct list_head *head = &trans->transaction->pending_snapshots;
  1074. list_for_each_entry(pending, head, list)
  1075. create_pending_snapshot(trans, fs_info, pending);
  1076. return 0;
  1077. }
  1078. static void update_super_roots(struct btrfs_root *root)
  1079. {
  1080. struct btrfs_root_item *root_item;
  1081. struct btrfs_super_block *super;
  1082. super = root->fs_info->super_copy;
  1083. root_item = &root->fs_info->chunk_root->root_item;
  1084. super->chunk_root = root_item->bytenr;
  1085. super->chunk_root_generation = root_item->generation;
  1086. super->chunk_root_level = root_item->level;
  1087. root_item = &root->fs_info->tree_root->root_item;
  1088. super->root = root_item->bytenr;
  1089. super->generation = root_item->generation;
  1090. super->root_level = root_item->level;
  1091. if (btrfs_test_opt(root, SPACE_CACHE))
  1092. super->cache_generation = root_item->generation;
  1093. }
  1094. int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
  1095. {
  1096. int ret = 0;
  1097. spin_lock(&info->trans_lock);
  1098. if (info->running_transaction)
  1099. ret = info->running_transaction->in_commit;
  1100. spin_unlock(&info->trans_lock);
  1101. return ret;
  1102. }
  1103. int btrfs_transaction_blocked(struct btrfs_fs_info *info)
  1104. {
  1105. int ret = 0;
  1106. spin_lock(&info->trans_lock);
  1107. if (info->running_transaction)
  1108. ret = info->running_transaction->blocked;
  1109. spin_unlock(&info->trans_lock);
  1110. return ret;
  1111. }
  1112. /*
  1113. * wait for the current transaction commit to start and block subsequent
  1114. * transaction joins
  1115. */
  1116. static void wait_current_trans_commit_start(struct btrfs_root *root,
  1117. struct btrfs_transaction *trans)
  1118. {
  1119. wait_event(root->fs_info->transaction_blocked_wait, trans->in_commit);
  1120. }
  1121. /*
  1122. * wait for the current transaction to start and then become unblocked.
  1123. * caller holds ref.
  1124. */
  1125. static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root,
  1126. struct btrfs_transaction *trans)
  1127. {
  1128. wait_event(root->fs_info->transaction_wait,
  1129. trans->commit_done || (trans->in_commit && !trans->blocked));
  1130. }
  1131. /*
  1132. * commit transactions asynchronously. once btrfs_commit_transaction_async
  1133. * returns, any subsequent transaction will not be allowed to join.
  1134. */
  1135. struct btrfs_async_commit {
  1136. struct btrfs_trans_handle *newtrans;
  1137. struct btrfs_root *root;
  1138. struct work_struct work;
  1139. };
  1140. static void do_async_commit(struct work_struct *work)
  1141. {
  1142. struct btrfs_async_commit *ac =
  1143. container_of(work, struct btrfs_async_commit, work);
  1144. /*
  1145. * We've got freeze protection passed with the transaction.
  1146. * Tell lockdep about it.
  1147. */
  1148. if (ac->newtrans->type < TRANS_JOIN_NOLOCK)
  1149. rwsem_acquire_read(
  1150. &ac->root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1],
  1151. 0, 1, _THIS_IP_);
  1152. current->journal_info = ac->newtrans;
  1153. btrfs_commit_transaction(ac->newtrans, ac->root);
  1154. kfree(ac);
  1155. }
  1156. int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
  1157. struct btrfs_root *root,
  1158. int wait_for_unblock)
  1159. {
  1160. struct btrfs_async_commit *ac;
  1161. struct btrfs_transaction *cur_trans;
  1162. ac = kmalloc(sizeof(*ac), GFP_NOFS);
  1163. if (!ac)
  1164. return -ENOMEM;
  1165. INIT_WORK(&ac->work, do_async_commit);
  1166. ac->root = root;
  1167. ac->newtrans = btrfs_join_transaction(root);
  1168. if (IS_ERR(ac->newtrans)) {
  1169. int err = PTR_ERR(ac->newtrans);
  1170. kfree(ac);
  1171. return err;
  1172. }
  1173. /* take transaction reference */
  1174. cur_trans = trans->transaction;
  1175. atomic_inc(&cur_trans->use_count);
  1176. btrfs_end_transaction(trans, root);
  1177. /*
  1178. * Tell lockdep we've released the freeze rwsem, since the
  1179. * async commit thread will be the one to unlock it.
  1180. */
  1181. if (trans->type < TRANS_JOIN_NOLOCK)
  1182. rwsem_release(
  1183. &root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1],
  1184. 1, _THIS_IP_);
  1185. schedule_work(&ac->work);
  1186. /* wait for transaction to start and unblock */
  1187. if (wait_for_unblock)
  1188. wait_current_trans_commit_start_and_unblock(root, cur_trans);
  1189. else
  1190. wait_current_trans_commit_start(root, cur_trans);
  1191. if (current->journal_info == trans)
  1192. current->journal_info = NULL;
  1193. put_transaction(cur_trans);
  1194. return 0;
  1195. }
  1196. static void cleanup_transaction(struct btrfs_trans_handle *trans,
  1197. struct btrfs_root *root, int err)
  1198. {
  1199. struct btrfs_transaction *cur_trans = trans->transaction;
  1200. WARN_ON(trans->use_count > 1);
  1201. btrfs_abort_transaction(trans, root, err);
  1202. spin_lock(&root->fs_info->trans_lock);
  1203. list_del_init(&cur_trans->list);
  1204. if (cur_trans == root->fs_info->running_transaction) {
  1205. root->fs_info->running_transaction = NULL;
  1206. root->fs_info->trans_no_join = 0;
  1207. }
  1208. spin_unlock(&root->fs_info->trans_lock);
  1209. btrfs_cleanup_one_transaction(trans->transaction, root);
  1210. put_transaction(cur_trans);
  1211. put_transaction(cur_trans);
  1212. trace_btrfs_transaction_commit(root);
  1213. btrfs_scrub_continue(root);
  1214. if (current->journal_info == trans)
  1215. current->journal_info = NULL;
  1216. kmem_cache_free(btrfs_trans_handle_cachep, trans);
  1217. }
  1218. static int btrfs_flush_all_pending_stuffs(struct btrfs_trans_handle *trans,
  1219. struct btrfs_root *root)
  1220. {
  1221. int flush_on_commit = btrfs_test_opt(root, FLUSHONCOMMIT);
  1222. int snap_pending = 0;
  1223. int ret;
  1224. if (!flush_on_commit) {
  1225. spin_lock(&root->fs_info->trans_lock);
  1226. if (!list_empty(&trans->transaction->pending_snapshots))
  1227. snap_pending = 1;
  1228. spin_unlock(&root->fs_info->trans_lock);
  1229. }
  1230. if (flush_on_commit || snap_pending) {
  1231. ret = btrfs_start_delalloc_inodes(root, 1);
  1232. if (ret)
  1233. return ret;
  1234. btrfs_wait_ordered_extents(root, 1);
  1235. }
  1236. ret = btrfs_run_delayed_items(trans, root);
  1237. if (ret)
  1238. return ret;
  1239. /*
  1240. * running the delayed items may have added new refs. account
  1241. * them now so that they hinder processing of more delayed refs
  1242. * as little as possible.
  1243. */
  1244. btrfs_delayed_refs_qgroup_accounting(trans, root->fs_info);
  1245. /*
  1246. * rename don't use btrfs_join_transaction, so, once we
  1247. * set the transaction to blocked above, we aren't going
  1248. * to get any new ordered operations. We can safely run
  1249. * it here and no for sure that nothing new will be added
  1250. * to the list
  1251. */
  1252. btrfs_run_ordered_operations(root, 1);
  1253. return 0;
  1254. }
  1255. /*
  1256. * btrfs_transaction state sequence:
  1257. * in_commit = 0, blocked = 0 (initial)
  1258. * in_commit = 1, blocked = 1
  1259. * blocked = 0
  1260. * commit_done = 1
  1261. */
  1262. int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
  1263. struct btrfs_root *root)
  1264. {
  1265. unsigned long joined = 0;
  1266. struct btrfs_transaction *cur_trans = trans->transaction;
  1267. struct btrfs_transaction *prev_trans = NULL;
  1268. DEFINE_WAIT(wait);
  1269. int ret;
  1270. int should_grow = 0;
  1271. unsigned long now = get_seconds();
  1272. ret = btrfs_run_ordered_operations(root, 0);
  1273. if (ret) {
  1274. btrfs_abort_transaction(trans, root, ret);
  1275. goto cleanup_transaction;
  1276. }
  1277. /* Stop the commit early if ->aborted is set */
  1278. if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
  1279. ret = cur_trans->aborted;
  1280. goto cleanup_transaction;
  1281. }
  1282. /* make a pass through all the delayed refs we have so far
  1283. * any runnings procs may add more while we are here
  1284. */
  1285. ret = btrfs_run_delayed_refs(trans, root, 0);
  1286. if (ret)
  1287. goto cleanup_transaction;
  1288. btrfs_trans_release_metadata(trans, root);
  1289. trans->block_rsv = NULL;
  1290. cur_trans = trans->transaction;
  1291. /*
  1292. * set the flushing flag so procs in this transaction have to
  1293. * start sending their work down.
  1294. */
  1295. cur_trans->delayed_refs.flushing = 1;
  1296. if (!list_empty(&trans->new_bgs))
  1297. btrfs_create_pending_block_groups(trans, root);
  1298. ret = btrfs_run_delayed_refs(trans, root, 0);
  1299. if (ret)
  1300. goto cleanup_transaction;
  1301. spin_lock(&cur_trans->commit_lock);
  1302. if (cur_trans->in_commit) {
  1303. spin_unlock(&cur_trans->commit_lock);
  1304. atomic_inc(&cur_trans->use_count);
  1305. ret = btrfs_end_transaction(trans, root);
  1306. wait_for_commit(root, cur_trans);
  1307. put_transaction(cur_trans);
  1308. return ret;
  1309. }
  1310. trans->transaction->in_commit = 1;
  1311. trans->transaction->blocked = 1;
  1312. spin_unlock(&cur_trans->commit_lock);
  1313. wake_up(&root->fs_info->transaction_blocked_wait);
  1314. spin_lock(&root->fs_info->trans_lock);
  1315. if (cur_trans->list.prev != &root->fs_info->trans_list) {
  1316. prev_trans = list_entry(cur_trans->list.prev,
  1317. struct btrfs_transaction, list);
  1318. if (!prev_trans->commit_done) {
  1319. atomic_inc(&prev_trans->use_count);
  1320. spin_unlock(&root->fs_info->trans_lock);
  1321. wait_for_commit(root, prev_trans);
  1322. put_transaction(prev_trans);
  1323. } else {
  1324. spin_unlock(&root->fs_info->trans_lock);
  1325. }
  1326. } else {
  1327. spin_unlock(&root->fs_info->trans_lock);
  1328. }
  1329. if (!btrfs_test_opt(root, SSD) &&
  1330. (now < cur_trans->start_time || now - cur_trans->start_time < 1))
  1331. should_grow = 1;
  1332. do {
  1333. joined = cur_trans->num_joined;
  1334. WARN_ON(cur_trans != trans->transaction);
  1335. ret = btrfs_flush_all_pending_stuffs(trans, root);
  1336. if (ret)
  1337. goto cleanup_transaction;
  1338. prepare_to_wait(&cur_trans->writer_wait, &wait,
  1339. TASK_UNINTERRUPTIBLE);
  1340. if (atomic_read(&cur_trans->num_writers) > 1)
  1341. schedule_timeout(MAX_SCHEDULE_TIMEOUT);
  1342. else if (should_grow)
  1343. schedule_timeout(1);
  1344. finish_wait(&cur_trans->writer_wait, &wait);
  1345. } while (atomic_read(&cur_trans->num_writers) > 1 ||
  1346. (should_grow && cur_trans->num_joined != joined));
  1347. ret = btrfs_flush_all_pending_stuffs(trans, root);
  1348. if (ret)
  1349. goto cleanup_transaction;
  1350. /*
  1351. * Ok now we need to make sure to block out any other joins while we
  1352. * commit the transaction. We could have started a join before setting
  1353. * no_join so make sure to wait for num_writers to == 1 again.
  1354. */
  1355. spin_lock(&root->fs_info->trans_lock);
  1356. root->fs_info->trans_no_join = 1;
  1357. spin_unlock(&root->fs_info->trans_lock);
  1358. wait_event(cur_trans->writer_wait,
  1359. atomic_read(&cur_trans->num_writers) == 1);
  1360. /* ->aborted might be set after the previous check, so check it */
  1361. if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
  1362. ret = cur_trans->aborted;
  1363. goto cleanup_transaction;
  1364. }
  1365. /*
  1366. * the reloc mutex makes sure that we stop
  1367. * the balancing code from coming in and moving
  1368. * extents around in the middle of the commit
  1369. */
  1370. mutex_lock(&root->fs_info->reloc_mutex);
  1371. /*
  1372. * We needn't worry about the delayed items because we will
  1373. * deal with them in create_pending_snapshot(), which is the
  1374. * core function of the snapshot creation.
  1375. */
  1376. ret = create_pending_snapshots(trans, root->fs_info);
  1377. if (ret) {
  1378. mutex_unlock(&root->fs_info->reloc_mutex);
  1379. goto cleanup_transaction;
  1380. }
  1381. /*
  1382. * We insert the dir indexes of the snapshots and update the inode
  1383. * of the snapshots' parents after the snapshot creation, so there
  1384. * are some delayed items which are not dealt with. Now deal with
  1385. * them.
  1386. *
  1387. * We needn't worry that this operation will corrupt the snapshots,
  1388. * because all the tree which are snapshoted will be forced to COW
  1389. * the nodes and leaves.
  1390. */
  1391. ret = btrfs_run_delayed_items(trans, root);
  1392. if (ret) {
  1393. mutex_unlock(&root->fs_info->reloc_mutex);
  1394. goto cleanup_transaction;
  1395. }
  1396. ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
  1397. if (ret) {
  1398. mutex_unlock(&root->fs_info->reloc_mutex);
  1399. goto cleanup_transaction;
  1400. }
  1401. /*
  1402. * make sure none of the code above managed to slip in a
  1403. * delayed item
  1404. */
  1405. btrfs_assert_delayed_root_empty(root);
  1406. WARN_ON(cur_trans != trans->transaction);
  1407. btrfs_scrub_pause(root);
  1408. /* btrfs_commit_tree_roots is responsible for getting the
  1409. * various roots consistent with each other. Every pointer
  1410. * in the tree of tree roots has to point to the most up to date
  1411. * root for every subvolume and other tree. So, we have to keep
  1412. * the tree logging code from jumping in and changing any
  1413. * of the trees.
  1414. *
  1415. * At this point in the commit, there can't be any tree-log
  1416. * writers, but a little lower down we drop the trans mutex
  1417. * and let new people in. By holding the tree_log_mutex
  1418. * from now until after the super is written, we avoid races
  1419. * with the tree-log code.
  1420. */
  1421. mutex_lock(&root->fs_info->tree_log_mutex);
  1422. ret = commit_fs_roots(trans, root);
  1423. if (ret) {
  1424. mutex_unlock(&root->fs_info->tree_log_mutex);
  1425. mutex_unlock(&root->fs_info->reloc_mutex);
  1426. goto cleanup_transaction;
  1427. }
  1428. /* commit_fs_roots gets rid of all the tree log roots, it is now
  1429. * safe to free the root of tree log roots
  1430. */
  1431. btrfs_free_log_root_tree(trans, root->fs_info);
  1432. ret = commit_cowonly_roots(trans, root);
  1433. if (ret) {
  1434. mutex_unlock(&root->fs_info->tree_log_mutex);
  1435. mutex_unlock(&root->fs_info->reloc_mutex);
  1436. goto cleanup_transaction;
  1437. }
  1438. /*
  1439. * The tasks which save the space cache and inode cache may also
  1440. * update ->aborted, check it.
  1441. */
  1442. if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
  1443. ret = cur_trans->aborted;
  1444. mutex_unlock(&root->fs_info->tree_log_mutex);
  1445. mutex_unlock(&root->fs_info->reloc_mutex);
  1446. goto cleanup_transaction;
  1447. }
  1448. btrfs_prepare_extent_commit(trans, root);
  1449. cur_trans = root->fs_info->running_transaction;
  1450. btrfs_set_root_node(&root->fs_info->tree_root->root_item,
  1451. root->fs_info->tree_root->node);
  1452. switch_commit_root(root->fs_info->tree_root);
  1453. btrfs_set_root_node(&root->fs_info->chunk_root->root_item,
  1454. root->fs_info->chunk_root->node);
  1455. switch_commit_root(root->fs_info->chunk_root);
  1456. assert_qgroups_uptodate(trans);
  1457. update_super_roots(root);
  1458. if (!root->fs_info->log_root_recovering) {
  1459. btrfs_set_super_log_root(root->fs_info->super_copy, 0);
  1460. btrfs_set_super_log_root_level(root->fs_info->super_copy, 0);
  1461. }
  1462. memcpy(root->fs_info->super_for_commit, root->fs_info->super_copy,
  1463. sizeof(*root->fs_info->super_copy));
  1464. trans->transaction->blocked = 0;
  1465. spin_lock(&root->fs_info->trans_lock);
  1466. root->fs_info->running_transaction = NULL;
  1467. root->fs_info->trans_no_join = 0;
  1468. spin_unlock(&root->fs_info->trans_lock);
  1469. mutex_unlock(&root->fs_info->reloc_mutex);
  1470. wake_up(&root->fs_info->transaction_wait);
  1471. ret = btrfs_write_and_wait_transaction(trans, root);
  1472. if (ret) {
  1473. btrfs_error(root->fs_info, ret,
  1474. "Error while writing out transaction.");
  1475. mutex_unlock(&root->fs_info->tree_log_mutex);
  1476. goto cleanup_transaction;
  1477. }
  1478. ret = write_ctree_super(trans, root, 0);
  1479. if (ret) {
  1480. mutex_unlock(&root->fs_info->tree_log_mutex);
  1481. goto cleanup_transaction;
  1482. }
  1483. /*
  1484. * the super is written, we can safely allow the tree-loggers
  1485. * to go about their business
  1486. */
  1487. mutex_unlock(&root->fs_info->tree_log_mutex);
  1488. btrfs_finish_extent_commit(trans, root);
  1489. cur_trans->commit_done = 1;
  1490. root->fs_info->last_trans_committed = cur_trans->transid;
  1491. wake_up(&cur_trans->commit_wait);
  1492. spin_lock(&root->fs_info->trans_lock);
  1493. list_del_init(&cur_trans->list);
  1494. spin_unlock(&root->fs_info->trans_lock);
  1495. put_transaction(cur_trans);
  1496. put_transaction(cur_trans);
  1497. if (trans->type < TRANS_JOIN_NOLOCK)
  1498. sb_end_intwrite(root->fs_info->sb);
  1499. trace_btrfs_transaction_commit(root);
  1500. btrfs_scrub_continue(root);
  1501. if (current->journal_info == trans)
  1502. current->journal_info = NULL;
  1503. kmem_cache_free(btrfs_trans_handle_cachep, trans);
  1504. if (current != root->fs_info->transaction_kthread)
  1505. btrfs_run_delayed_iputs(root);
  1506. return ret;
  1507. cleanup_transaction:
  1508. btrfs_trans_release_metadata(trans, root);
  1509. trans->block_rsv = NULL;
  1510. btrfs_printk(root->fs_info, "Skipping commit of aborted transaction.\n");
  1511. // WARN_ON(1);
  1512. if (current->journal_info == trans)
  1513. current->journal_info = NULL;
  1514. cleanup_transaction(trans, root, ret);
  1515. return ret;
  1516. }
  1517. /*
  1518. * interface function to delete all the snapshots we have scheduled for deletion
  1519. */
  1520. int btrfs_clean_old_snapshots(struct btrfs_root *root)
  1521. {
  1522. LIST_HEAD(list);
  1523. struct btrfs_fs_info *fs_info = root->fs_info;
  1524. spin_lock(&fs_info->trans_lock);
  1525. list_splice_init(&fs_info->dead_roots, &list);
  1526. spin_unlock(&fs_info->trans_lock);
  1527. while (!list_empty(&list)) {
  1528. int ret;
  1529. root = list_entry(list.next, struct btrfs_root, root_list);
  1530. list_del(&root->root_list);
  1531. btrfs_kill_all_delayed_nodes(root);
  1532. if (btrfs_header_backref_rev(root->node) <
  1533. BTRFS_MIXED_BACKREF_REV)
  1534. ret = btrfs_drop_snapshot(root, NULL, 0, 0);
  1535. else
  1536. ret =btrfs_drop_snapshot(root, NULL, 1, 0);
  1537. BUG_ON(ret < 0);
  1538. }
  1539. return 0;
  1540. }