transaction.c 48 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811
  1. /*
  2. * Copyright (C) 2007 Oracle. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/fs.h>
  19. #include <linux/slab.h>
  20. #include <linux/sched.h>
  21. #include <linux/writeback.h>
  22. #include <linux/pagemap.h>
  23. #include <linux/blkdev.h>
  24. #include <linux/uuid.h>
  25. #include "ctree.h"
  26. #include "disk-io.h"
  27. #include "transaction.h"
  28. #include "locking.h"
  29. #include "tree-log.h"
  30. #include "inode-map.h"
  31. #include "volumes.h"
  32. #include "dev-replace.h"
  33. #define BTRFS_ROOT_TRANS_TAG 0
  34. void put_transaction(struct btrfs_transaction *transaction)
  35. {
  36. WARN_ON(atomic_read(&transaction->use_count) == 0);
  37. if (atomic_dec_and_test(&transaction->use_count)) {
  38. BUG_ON(!list_empty(&transaction->list));
  39. WARN_ON(transaction->delayed_refs.root.rb_node);
  40. memset(transaction, 0, sizeof(*transaction));
  41. kmem_cache_free(btrfs_transaction_cachep, transaction);
  42. }
  43. }
  44. static noinline void switch_commit_root(struct btrfs_root *root)
  45. {
  46. free_extent_buffer(root->commit_root);
  47. root->commit_root = btrfs_root_node(root);
  48. }
  49. /*
  50. * either allocate a new transaction or hop into the existing one
  51. */
  52. static noinline int join_transaction(struct btrfs_root *root, int type)
  53. {
  54. struct btrfs_transaction *cur_trans;
  55. struct btrfs_fs_info *fs_info = root->fs_info;
  56. spin_lock(&fs_info->trans_lock);
  57. loop:
  58. /* The file system has been taken offline. No new transactions. */
  59. if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
  60. spin_unlock(&fs_info->trans_lock);
  61. return -EROFS;
  62. }
  63. if (fs_info->trans_no_join) {
  64. /*
  65. * If we are JOIN_NOLOCK we're already committing a current
  66. * transaction, we just need a handle to deal with something
  67. * when committing the transaction, such as inode cache and
  68. * space cache. It is a special case.
  69. */
  70. if (type != TRANS_JOIN_NOLOCK) {
  71. spin_unlock(&fs_info->trans_lock);
  72. return -EBUSY;
  73. }
  74. }
  75. cur_trans = fs_info->running_transaction;
  76. if (cur_trans) {
  77. if (cur_trans->aborted) {
  78. spin_unlock(&fs_info->trans_lock);
  79. return cur_trans->aborted;
  80. }
  81. atomic_inc(&cur_trans->use_count);
  82. atomic_inc(&cur_trans->num_writers);
  83. cur_trans->num_joined++;
  84. spin_unlock(&fs_info->trans_lock);
  85. return 0;
  86. }
  87. spin_unlock(&fs_info->trans_lock);
  88. /*
  89. * If we are ATTACH, we just want to catch the current transaction,
  90. * and commit it. If there is no transaction, just return ENOENT.
  91. */
  92. if (type == TRANS_ATTACH)
  93. return -ENOENT;
  94. cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, GFP_NOFS);
  95. if (!cur_trans)
  96. return -ENOMEM;
  97. spin_lock(&fs_info->trans_lock);
  98. if (fs_info->running_transaction) {
  99. /*
  100. * someone started a transaction after we unlocked. Make sure
  101. * to redo the trans_no_join checks above
  102. */
  103. kmem_cache_free(btrfs_transaction_cachep, cur_trans);
  104. goto loop;
  105. } else if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
  106. spin_unlock(&fs_info->trans_lock);
  107. kmem_cache_free(btrfs_transaction_cachep, cur_trans);
  108. return -EROFS;
  109. }
  110. atomic_set(&cur_trans->num_writers, 1);
  111. cur_trans->num_joined = 0;
  112. init_waitqueue_head(&cur_trans->writer_wait);
  113. init_waitqueue_head(&cur_trans->commit_wait);
  114. cur_trans->in_commit = 0;
  115. cur_trans->blocked = 0;
  116. /*
  117. * One for this trans handle, one so it will live on until we
  118. * commit the transaction.
  119. */
  120. atomic_set(&cur_trans->use_count, 2);
  121. cur_trans->commit_done = 0;
  122. cur_trans->start_time = get_seconds();
  123. cur_trans->delayed_refs.root = RB_ROOT;
  124. cur_trans->delayed_refs.num_entries = 0;
  125. cur_trans->delayed_refs.num_heads_ready = 0;
  126. cur_trans->delayed_refs.num_heads = 0;
  127. cur_trans->delayed_refs.flushing = 0;
  128. cur_trans->delayed_refs.run_delayed_start = 0;
  129. /*
  130. * although the tree mod log is per file system and not per transaction,
  131. * the log must never go across transaction boundaries.
  132. */
  133. smp_mb();
  134. if (!list_empty(&fs_info->tree_mod_seq_list))
  135. WARN(1, KERN_ERR "btrfs: tree_mod_seq_list not empty when "
  136. "creating a fresh transaction\n");
  137. if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log))
  138. WARN(1, KERN_ERR "btrfs: tree_mod_log rb tree not empty when "
  139. "creating a fresh transaction\n");
  140. atomic_set(&fs_info->tree_mod_seq, 0);
  141. spin_lock_init(&cur_trans->commit_lock);
  142. spin_lock_init(&cur_trans->delayed_refs.lock);
  143. INIT_LIST_HEAD(&cur_trans->pending_snapshots);
  144. list_add_tail(&cur_trans->list, &fs_info->trans_list);
  145. extent_io_tree_init(&cur_trans->dirty_pages,
  146. fs_info->btree_inode->i_mapping);
  147. fs_info->generation++;
  148. cur_trans->transid = fs_info->generation;
  149. fs_info->running_transaction = cur_trans;
  150. cur_trans->aborted = 0;
  151. spin_unlock(&fs_info->trans_lock);
  152. return 0;
  153. }
  154. /*
  155. * this does all the record keeping required to make sure that a reference
  156. * counted root is properly recorded in a given transaction. This is required
  157. * to make sure the old root from before we joined the transaction is deleted
  158. * when the transaction commits
  159. */
  160. static int record_root_in_trans(struct btrfs_trans_handle *trans,
  161. struct btrfs_root *root)
  162. {
  163. if (root->ref_cows && root->last_trans < trans->transid) {
  164. WARN_ON(root == root->fs_info->extent_root);
  165. WARN_ON(root->commit_root != root->node);
  166. /*
  167. * see below for in_trans_setup usage rules
  168. * we have the reloc mutex held now, so there
  169. * is only one writer in this function
  170. */
  171. root->in_trans_setup = 1;
  172. /* make sure readers find in_trans_setup before
  173. * they find our root->last_trans update
  174. */
  175. smp_wmb();
  176. spin_lock(&root->fs_info->fs_roots_radix_lock);
  177. if (root->last_trans == trans->transid) {
  178. spin_unlock(&root->fs_info->fs_roots_radix_lock);
  179. return 0;
  180. }
  181. radix_tree_tag_set(&root->fs_info->fs_roots_radix,
  182. (unsigned long)root->root_key.objectid,
  183. BTRFS_ROOT_TRANS_TAG);
  184. spin_unlock(&root->fs_info->fs_roots_radix_lock);
  185. root->last_trans = trans->transid;
  186. /* this is pretty tricky. We don't want to
  187. * take the relocation lock in btrfs_record_root_in_trans
  188. * unless we're really doing the first setup for this root in
  189. * this transaction.
  190. *
  191. * Normally we'd use root->last_trans as a flag to decide
  192. * if we want to take the expensive mutex.
  193. *
  194. * But, we have to set root->last_trans before we
  195. * init the relocation root, otherwise, we trip over warnings
  196. * in ctree.c. The solution used here is to flag ourselves
  197. * with root->in_trans_setup. When this is 1, we're still
  198. * fixing up the reloc trees and everyone must wait.
  199. *
  200. * When this is zero, they can trust root->last_trans and fly
  201. * through btrfs_record_root_in_trans without having to take the
  202. * lock. smp_wmb() makes sure that all the writes above are
  203. * done before we pop in the zero below
  204. */
  205. btrfs_init_reloc_root(trans, root);
  206. smp_wmb();
  207. root->in_trans_setup = 0;
  208. }
  209. return 0;
  210. }
  211. int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
  212. struct btrfs_root *root)
  213. {
  214. if (!root->ref_cows)
  215. return 0;
  216. /*
  217. * see record_root_in_trans for comments about in_trans_setup usage
  218. * and barriers
  219. */
  220. smp_rmb();
  221. if (root->last_trans == trans->transid &&
  222. !root->in_trans_setup)
  223. return 0;
  224. mutex_lock(&root->fs_info->reloc_mutex);
  225. record_root_in_trans(trans, root);
  226. mutex_unlock(&root->fs_info->reloc_mutex);
  227. return 0;
  228. }
  229. /* wait for commit against the current transaction to become unblocked
  230. * when this is done, it is safe to start a new transaction, but the current
  231. * transaction might not be fully on disk.
  232. */
  233. static void wait_current_trans(struct btrfs_root *root)
  234. {
  235. struct btrfs_transaction *cur_trans;
  236. spin_lock(&root->fs_info->trans_lock);
  237. cur_trans = root->fs_info->running_transaction;
  238. if (cur_trans && cur_trans->blocked) {
  239. atomic_inc(&cur_trans->use_count);
  240. spin_unlock(&root->fs_info->trans_lock);
  241. wait_event(root->fs_info->transaction_wait,
  242. !cur_trans->blocked);
  243. put_transaction(cur_trans);
  244. } else {
  245. spin_unlock(&root->fs_info->trans_lock);
  246. }
  247. }
  248. static int may_wait_transaction(struct btrfs_root *root, int type)
  249. {
  250. if (root->fs_info->log_root_recovering)
  251. return 0;
  252. if (type == TRANS_USERSPACE)
  253. return 1;
  254. if (type == TRANS_START &&
  255. !atomic_read(&root->fs_info->open_ioctl_trans))
  256. return 1;
  257. return 0;
  258. }
  259. static struct btrfs_trans_handle *
  260. start_transaction(struct btrfs_root *root, u64 num_items, int type,
  261. enum btrfs_reserve_flush_enum flush)
  262. {
  263. struct btrfs_trans_handle *h;
  264. struct btrfs_transaction *cur_trans;
  265. u64 num_bytes = 0;
  266. int ret;
  267. u64 qgroup_reserved = 0;
  268. if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
  269. return ERR_PTR(-EROFS);
  270. if (current->journal_info) {
  271. WARN_ON(type != TRANS_JOIN && type != TRANS_JOIN_NOLOCK);
  272. h = current->journal_info;
  273. h->use_count++;
  274. WARN_ON(h->use_count > 2);
  275. h->orig_rsv = h->block_rsv;
  276. h->block_rsv = NULL;
  277. goto got_it;
  278. }
  279. /*
  280. * Do the reservation before we join the transaction so we can do all
  281. * the appropriate flushing if need be.
  282. */
  283. if (num_items > 0 && root != root->fs_info->chunk_root) {
  284. if (root->fs_info->quota_enabled &&
  285. is_fstree(root->root_key.objectid)) {
  286. qgroup_reserved = num_items * root->leafsize;
  287. ret = btrfs_qgroup_reserve(root, qgroup_reserved);
  288. if (ret)
  289. return ERR_PTR(ret);
  290. }
  291. num_bytes = btrfs_calc_trans_metadata_size(root, num_items);
  292. ret = btrfs_block_rsv_add(root,
  293. &root->fs_info->trans_block_rsv,
  294. num_bytes, flush);
  295. if (ret)
  296. goto reserve_fail;
  297. }
  298. again:
  299. h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
  300. if (!h) {
  301. ret = -ENOMEM;
  302. goto alloc_fail;
  303. }
  304. /*
  305. * If we are JOIN_NOLOCK we're already committing a transaction and
  306. * waiting on this guy, so we don't need to do the sb_start_intwrite
  307. * because we're already holding a ref. We need this because we could
  308. * have raced in and did an fsync() on a file which can kick a commit
  309. * and then we deadlock with somebody doing a freeze.
  310. *
  311. * If we are ATTACH, it means we just want to catch the current
  312. * transaction and commit it, so we needn't do sb_start_intwrite().
  313. */
  314. if (type < TRANS_JOIN_NOLOCK)
  315. sb_start_intwrite(root->fs_info->sb);
  316. if (may_wait_transaction(root, type))
  317. wait_current_trans(root);
  318. do {
  319. ret = join_transaction(root, type);
  320. if (ret == -EBUSY)
  321. wait_current_trans(root);
  322. } while (ret == -EBUSY);
  323. if (ret < 0) {
  324. /* We must get the transaction if we are JOIN_NOLOCK. */
  325. BUG_ON(type == TRANS_JOIN_NOLOCK);
  326. goto join_fail;
  327. }
  328. cur_trans = root->fs_info->running_transaction;
  329. h->transid = cur_trans->transid;
  330. h->transaction = cur_trans;
  331. h->blocks_used = 0;
  332. h->bytes_reserved = 0;
  333. h->root = root;
  334. h->delayed_ref_updates = 0;
  335. h->use_count = 1;
  336. h->adding_csums = 0;
  337. h->block_rsv = NULL;
  338. h->orig_rsv = NULL;
  339. h->aborted = 0;
  340. h->qgroup_reserved = qgroup_reserved;
  341. h->delayed_ref_elem.seq = 0;
  342. h->type = type;
  343. INIT_LIST_HEAD(&h->qgroup_ref_list);
  344. INIT_LIST_HEAD(&h->new_bgs);
  345. smp_mb();
  346. if (cur_trans->blocked && may_wait_transaction(root, type)) {
  347. btrfs_commit_transaction(h, root);
  348. goto again;
  349. }
  350. if (num_bytes) {
  351. trace_btrfs_space_reservation(root->fs_info, "transaction",
  352. h->transid, num_bytes, 1);
  353. h->block_rsv = &root->fs_info->trans_block_rsv;
  354. h->bytes_reserved = num_bytes;
  355. }
  356. got_it:
  357. btrfs_record_root_in_trans(h, root);
  358. if (!current->journal_info && type != TRANS_USERSPACE)
  359. current->journal_info = h;
  360. return h;
  361. join_fail:
  362. if (type < TRANS_JOIN_NOLOCK)
  363. sb_end_intwrite(root->fs_info->sb);
  364. kmem_cache_free(btrfs_trans_handle_cachep, h);
  365. alloc_fail:
  366. if (num_bytes)
  367. btrfs_block_rsv_release(root, &root->fs_info->trans_block_rsv,
  368. num_bytes);
  369. reserve_fail:
  370. if (qgroup_reserved)
  371. btrfs_qgroup_free(root, qgroup_reserved);
  372. return ERR_PTR(ret);
  373. }
  374. struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
  375. int num_items)
  376. {
  377. return start_transaction(root, num_items, TRANS_START,
  378. BTRFS_RESERVE_FLUSH_ALL);
  379. }
  380. struct btrfs_trans_handle *btrfs_start_transaction_lflush(
  381. struct btrfs_root *root, int num_items)
  382. {
  383. return start_transaction(root, num_items, TRANS_START,
  384. BTRFS_RESERVE_FLUSH_LIMIT);
  385. }
  386. struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root)
  387. {
  388. return start_transaction(root, 0, TRANS_JOIN, 0);
  389. }
  390. struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root)
  391. {
  392. return start_transaction(root, 0, TRANS_JOIN_NOLOCK, 0);
  393. }
  394. struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root)
  395. {
  396. return start_transaction(root, 0, TRANS_USERSPACE, 0);
  397. }
  398. struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root)
  399. {
  400. return start_transaction(root, 0, TRANS_ATTACH, 0);
  401. }
  402. /* wait for a transaction commit to be fully complete */
  403. static noinline void wait_for_commit(struct btrfs_root *root,
  404. struct btrfs_transaction *commit)
  405. {
  406. wait_event(commit->commit_wait, commit->commit_done);
  407. }
  408. int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
  409. {
  410. struct btrfs_transaction *cur_trans = NULL, *t;
  411. int ret = 0;
  412. if (transid) {
  413. if (transid <= root->fs_info->last_trans_committed)
  414. goto out;
  415. ret = -EINVAL;
  416. /* find specified transaction */
  417. spin_lock(&root->fs_info->trans_lock);
  418. list_for_each_entry(t, &root->fs_info->trans_list, list) {
  419. if (t->transid == transid) {
  420. cur_trans = t;
  421. atomic_inc(&cur_trans->use_count);
  422. ret = 0;
  423. break;
  424. }
  425. if (t->transid > transid) {
  426. ret = 0;
  427. break;
  428. }
  429. }
  430. spin_unlock(&root->fs_info->trans_lock);
  431. /* The specified transaction doesn't exist */
  432. if (!cur_trans)
  433. goto out;
  434. } else {
  435. /* find newest transaction that is committing | committed */
  436. spin_lock(&root->fs_info->trans_lock);
  437. list_for_each_entry_reverse(t, &root->fs_info->trans_list,
  438. list) {
  439. if (t->in_commit) {
  440. if (t->commit_done)
  441. break;
  442. cur_trans = t;
  443. atomic_inc(&cur_trans->use_count);
  444. break;
  445. }
  446. }
  447. spin_unlock(&root->fs_info->trans_lock);
  448. if (!cur_trans)
  449. goto out; /* nothing committing|committed */
  450. }
  451. wait_for_commit(root, cur_trans);
  452. put_transaction(cur_trans);
  453. out:
  454. return ret;
  455. }
  456. void btrfs_throttle(struct btrfs_root *root)
  457. {
  458. if (!atomic_read(&root->fs_info->open_ioctl_trans))
  459. wait_current_trans(root);
  460. }
  461. static int should_end_transaction(struct btrfs_trans_handle *trans,
  462. struct btrfs_root *root)
  463. {
  464. int ret;
  465. ret = btrfs_block_rsv_check(root, &root->fs_info->global_block_rsv, 5);
  466. return ret ? 1 : 0;
  467. }
  468. int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
  469. struct btrfs_root *root)
  470. {
  471. struct btrfs_transaction *cur_trans = trans->transaction;
  472. int updates;
  473. int err;
  474. smp_mb();
  475. if (cur_trans->blocked || cur_trans->delayed_refs.flushing)
  476. return 1;
  477. updates = trans->delayed_ref_updates;
  478. trans->delayed_ref_updates = 0;
  479. if (updates) {
  480. err = btrfs_run_delayed_refs(trans, root, updates);
  481. if (err) /* Error code will also eval true */
  482. return err;
  483. }
  484. return should_end_transaction(trans, root);
  485. }
  486. static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
  487. struct btrfs_root *root, int throttle)
  488. {
  489. struct btrfs_transaction *cur_trans = trans->transaction;
  490. struct btrfs_fs_info *info = root->fs_info;
  491. int count = 0;
  492. int lock = (trans->type != TRANS_JOIN_NOLOCK);
  493. int err = 0;
  494. if (--trans->use_count) {
  495. trans->block_rsv = trans->orig_rsv;
  496. return 0;
  497. }
  498. /*
  499. * do the qgroup accounting as early as possible
  500. */
  501. err = btrfs_delayed_refs_qgroup_accounting(trans, info);
  502. btrfs_trans_release_metadata(trans, root);
  503. trans->block_rsv = NULL;
  504. /*
  505. * the same root has to be passed to start_transaction and
  506. * end_transaction. Subvolume quota depends on this.
  507. */
  508. WARN_ON(trans->root != root);
  509. if (trans->qgroup_reserved) {
  510. btrfs_qgroup_free(root, trans->qgroup_reserved);
  511. trans->qgroup_reserved = 0;
  512. }
  513. if (!list_empty(&trans->new_bgs))
  514. btrfs_create_pending_block_groups(trans, root);
  515. while (count < 2) {
  516. unsigned long cur = trans->delayed_ref_updates;
  517. trans->delayed_ref_updates = 0;
  518. if (cur &&
  519. trans->transaction->delayed_refs.num_heads_ready > 64) {
  520. trans->delayed_ref_updates = 0;
  521. btrfs_run_delayed_refs(trans, root, cur);
  522. } else {
  523. break;
  524. }
  525. count++;
  526. }
  527. btrfs_trans_release_metadata(trans, root);
  528. trans->block_rsv = NULL;
  529. if (!list_empty(&trans->new_bgs))
  530. btrfs_create_pending_block_groups(trans, root);
  531. if (lock && !atomic_read(&root->fs_info->open_ioctl_trans) &&
  532. should_end_transaction(trans, root)) {
  533. trans->transaction->blocked = 1;
  534. smp_wmb();
  535. }
  536. if (lock && cur_trans->blocked && !cur_trans->in_commit) {
  537. if (throttle) {
  538. /*
  539. * We may race with somebody else here so end up having
  540. * to call end_transaction on ourselves again, so inc
  541. * our use_count.
  542. */
  543. trans->use_count++;
  544. return btrfs_commit_transaction(trans, root);
  545. } else {
  546. wake_up_process(info->transaction_kthread);
  547. }
  548. }
  549. if (trans->type < TRANS_JOIN_NOLOCK)
  550. sb_end_intwrite(root->fs_info->sb);
  551. WARN_ON(cur_trans != info->running_transaction);
  552. WARN_ON(atomic_read(&cur_trans->num_writers) < 1);
  553. atomic_dec(&cur_trans->num_writers);
  554. smp_mb();
  555. if (waitqueue_active(&cur_trans->writer_wait))
  556. wake_up(&cur_trans->writer_wait);
  557. put_transaction(cur_trans);
  558. if (current->journal_info == trans)
  559. current->journal_info = NULL;
  560. if (throttle)
  561. btrfs_run_delayed_iputs(root);
  562. if (trans->aborted ||
  563. root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
  564. err = -EIO;
  565. }
  566. assert_qgroups_uptodate(trans);
  567. memset(trans, 0, sizeof(*trans));
  568. kmem_cache_free(btrfs_trans_handle_cachep, trans);
  569. return err;
  570. }
  571. int btrfs_end_transaction(struct btrfs_trans_handle *trans,
  572. struct btrfs_root *root)
  573. {
  574. int ret;
  575. ret = __btrfs_end_transaction(trans, root, 0);
  576. if (ret)
  577. return ret;
  578. return 0;
  579. }
  580. int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
  581. struct btrfs_root *root)
  582. {
  583. int ret;
  584. ret = __btrfs_end_transaction(trans, root, 1);
  585. if (ret)
  586. return ret;
  587. return 0;
  588. }
  589. int btrfs_end_transaction_dmeta(struct btrfs_trans_handle *trans,
  590. struct btrfs_root *root)
  591. {
  592. return __btrfs_end_transaction(trans, root, 1);
  593. }
  594. /*
  595. * when btree blocks are allocated, they have some corresponding bits set for
  596. * them in one of two extent_io trees. This is used to make sure all of
  597. * those extents are sent to disk but does not wait on them
  598. */
  599. int btrfs_write_marked_extents(struct btrfs_root *root,
  600. struct extent_io_tree *dirty_pages, int mark)
  601. {
  602. int err = 0;
  603. int werr = 0;
  604. struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
  605. struct extent_state *cached_state = NULL;
  606. u64 start = 0;
  607. u64 end;
  608. while (!find_first_extent_bit(dirty_pages, start, &start, &end,
  609. mark, &cached_state)) {
  610. convert_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT,
  611. mark, &cached_state, GFP_NOFS);
  612. cached_state = NULL;
  613. err = filemap_fdatawrite_range(mapping, start, end);
  614. if (err)
  615. werr = err;
  616. cond_resched();
  617. start = end + 1;
  618. }
  619. if (err)
  620. werr = err;
  621. return werr;
  622. }
  623. /*
  624. * when btree blocks are allocated, they have some corresponding bits set for
  625. * them in one of two extent_io trees. This is used to make sure all of
  626. * those extents are on disk for transaction or log commit. We wait
  627. * on all the pages and clear them from the dirty pages state tree
  628. */
  629. int btrfs_wait_marked_extents(struct btrfs_root *root,
  630. struct extent_io_tree *dirty_pages, int mark)
  631. {
  632. int err = 0;
  633. int werr = 0;
  634. struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
  635. struct extent_state *cached_state = NULL;
  636. u64 start = 0;
  637. u64 end;
  638. while (!find_first_extent_bit(dirty_pages, start, &start, &end,
  639. EXTENT_NEED_WAIT, &cached_state)) {
  640. clear_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT,
  641. 0, 0, &cached_state, GFP_NOFS);
  642. err = filemap_fdatawait_range(mapping, start, end);
  643. if (err)
  644. werr = err;
  645. cond_resched();
  646. start = end + 1;
  647. }
  648. if (err)
  649. werr = err;
  650. return werr;
  651. }
  652. /*
  653. * when btree blocks are allocated, they have some corresponding bits set for
  654. * them in one of two extent_io trees. This is used to make sure all of
  655. * those extents are on disk for transaction or log commit
  656. */
  657. int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
  658. struct extent_io_tree *dirty_pages, int mark)
  659. {
  660. int ret;
  661. int ret2;
  662. ret = btrfs_write_marked_extents(root, dirty_pages, mark);
  663. ret2 = btrfs_wait_marked_extents(root, dirty_pages, mark);
  664. if (ret)
  665. return ret;
  666. if (ret2)
  667. return ret2;
  668. return 0;
  669. }
  670. int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
  671. struct btrfs_root *root)
  672. {
  673. if (!trans || !trans->transaction) {
  674. struct inode *btree_inode;
  675. btree_inode = root->fs_info->btree_inode;
  676. return filemap_write_and_wait(btree_inode->i_mapping);
  677. }
  678. return btrfs_write_and_wait_marked_extents(root,
  679. &trans->transaction->dirty_pages,
  680. EXTENT_DIRTY);
  681. }
  682. /*
  683. * this is used to update the root pointer in the tree of tree roots.
  684. *
  685. * But, in the case of the extent allocation tree, updating the root
  686. * pointer may allocate blocks which may change the root of the extent
  687. * allocation tree.
  688. *
  689. * So, this loops and repeats and makes sure the cowonly root didn't
  690. * change while the root pointer was being updated in the metadata.
  691. */
  692. static int update_cowonly_root(struct btrfs_trans_handle *trans,
  693. struct btrfs_root *root)
  694. {
  695. int ret;
  696. u64 old_root_bytenr;
  697. u64 old_root_used;
  698. struct btrfs_root *tree_root = root->fs_info->tree_root;
  699. old_root_used = btrfs_root_used(&root->root_item);
  700. btrfs_write_dirty_block_groups(trans, root);
  701. while (1) {
  702. old_root_bytenr = btrfs_root_bytenr(&root->root_item);
  703. if (old_root_bytenr == root->node->start &&
  704. old_root_used == btrfs_root_used(&root->root_item))
  705. break;
  706. btrfs_set_root_node(&root->root_item, root->node);
  707. ret = btrfs_update_root(trans, tree_root,
  708. &root->root_key,
  709. &root->root_item);
  710. if (ret)
  711. return ret;
  712. old_root_used = btrfs_root_used(&root->root_item);
  713. ret = btrfs_write_dirty_block_groups(trans, root);
  714. if (ret)
  715. return ret;
  716. }
  717. if (root != root->fs_info->extent_root)
  718. switch_commit_root(root);
  719. return 0;
  720. }
  721. /*
  722. * update all the cowonly tree roots on disk
  723. *
  724. * The error handling in this function may not be obvious. Any of the
  725. * failures will cause the file system to go offline. We still need
  726. * to clean up the delayed refs.
  727. */
  728. static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
  729. struct btrfs_root *root)
  730. {
  731. struct btrfs_fs_info *fs_info = root->fs_info;
  732. struct list_head *next;
  733. struct extent_buffer *eb;
  734. int ret;
  735. ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
  736. if (ret)
  737. return ret;
  738. eb = btrfs_lock_root_node(fs_info->tree_root);
  739. ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL,
  740. 0, &eb);
  741. btrfs_tree_unlock(eb);
  742. free_extent_buffer(eb);
  743. if (ret)
  744. return ret;
  745. ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
  746. if (ret)
  747. return ret;
  748. ret = btrfs_run_dev_stats(trans, root->fs_info);
  749. WARN_ON(ret);
  750. ret = btrfs_run_dev_replace(trans, root->fs_info);
  751. WARN_ON(ret);
  752. ret = btrfs_run_qgroups(trans, root->fs_info);
  753. BUG_ON(ret);
  754. /* run_qgroups might have added some more refs */
  755. ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
  756. BUG_ON(ret);
  757. while (!list_empty(&fs_info->dirty_cowonly_roots)) {
  758. next = fs_info->dirty_cowonly_roots.next;
  759. list_del_init(next);
  760. root = list_entry(next, struct btrfs_root, dirty_list);
  761. ret = update_cowonly_root(trans, root);
  762. if (ret)
  763. return ret;
  764. }
  765. down_write(&fs_info->extent_commit_sem);
  766. switch_commit_root(fs_info->extent_root);
  767. up_write(&fs_info->extent_commit_sem);
  768. btrfs_after_dev_replace_commit(fs_info);
  769. return 0;
  770. }
  771. /*
  772. * dead roots are old snapshots that need to be deleted. This allocates
  773. * a dirty root struct and adds it into the list of dead roots that need to
  774. * be deleted
  775. */
  776. int btrfs_add_dead_root(struct btrfs_root *root)
  777. {
  778. spin_lock(&root->fs_info->trans_lock);
  779. list_add(&root->root_list, &root->fs_info->dead_roots);
  780. spin_unlock(&root->fs_info->trans_lock);
  781. return 0;
  782. }
  783. /*
  784. * update all the cowonly tree roots on disk
  785. */
  786. static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
  787. struct btrfs_root *root)
  788. {
  789. struct btrfs_root *gang[8];
  790. struct btrfs_fs_info *fs_info = root->fs_info;
  791. int i;
  792. int ret;
  793. int err = 0;
  794. spin_lock(&fs_info->fs_roots_radix_lock);
  795. while (1) {
  796. ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
  797. (void **)gang, 0,
  798. ARRAY_SIZE(gang),
  799. BTRFS_ROOT_TRANS_TAG);
  800. if (ret == 0)
  801. break;
  802. for (i = 0; i < ret; i++) {
  803. root = gang[i];
  804. radix_tree_tag_clear(&fs_info->fs_roots_radix,
  805. (unsigned long)root->root_key.objectid,
  806. BTRFS_ROOT_TRANS_TAG);
  807. spin_unlock(&fs_info->fs_roots_radix_lock);
  808. btrfs_free_log(trans, root);
  809. btrfs_update_reloc_root(trans, root);
  810. btrfs_orphan_commit_root(trans, root);
  811. btrfs_save_ino_cache(root, trans);
  812. /* see comments in should_cow_block() */
  813. root->force_cow = 0;
  814. smp_wmb();
  815. if (root->commit_root != root->node) {
  816. mutex_lock(&root->fs_commit_mutex);
  817. switch_commit_root(root);
  818. btrfs_unpin_free_ino(root);
  819. mutex_unlock(&root->fs_commit_mutex);
  820. btrfs_set_root_node(&root->root_item,
  821. root->node);
  822. }
  823. err = btrfs_update_root(trans, fs_info->tree_root,
  824. &root->root_key,
  825. &root->root_item);
  826. spin_lock(&fs_info->fs_roots_radix_lock);
  827. if (err)
  828. break;
  829. }
  830. }
  831. spin_unlock(&fs_info->fs_roots_radix_lock);
  832. return err;
  833. }
  834. /*
  835. * defrag a given btree. If cacheonly == 1, this won't read from the disk,
  836. * otherwise every leaf in the btree is read and defragged.
  837. */
  838. int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
  839. {
  840. struct btrfs_fs_info *info = root->fs_info;
  841. struct btrfs_trans_handle *trans;
  842. int ret;
  843. if (xchg(&root->defrag_running, 1))
  844. return 0;
  845. while (1) {
  846. trans = btrfs_start_transaction(root, 0);
  847. if (IS_ERR(trans))
  848. return PTR_ERR(trans);
  849. ret = btrfs_defrag_leaves(trans, root, cacheonly);
  850. btrfs_end_transaction(trans, root);
  851. btrfs_btree_balance_dirty(info->tree_root);
  852. cond_resched();
  853. if (btrfs_fs_closing(root->fs_info) || ret != -EAGAIN)
  854. break;
  855. }
  856. root->defrag_running = 0;
  857. return ret;
  858. }
  859. /*
  860. * new snapshots need to be created at a very specific time in the
  861. * transaction commit. This does the actual creation
  862. */
  863. static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
  864. struct btrfs_fs_info *fs_info,
  865. struct btrfs_pending_snapshot *pending)
  866. {
  867. struct btrfs_key key;
  868. struct btrfs_root_item *new_root_item;
  869. struct btrfs_root *tree_root = fs_info->tree_root;
  870. struct btrfs_root *root = pending->root;
  871. struct btrfs_root *parent_root;
  872. struct btrfs_block_rsv *rsv;
  873. struct inode *parent_inode;
  874. struct btrfs_path *path;
  875. struct btrfs_dir_item *dir_item;
  876. struct dentry *parent;
  877. struct dentry *dentry;
  878. struct extent_buffer *tmp;
  879. struct extent_buffer *old;
  880. struct timespec cur_time = CURRENT_TIME;
  881. int ret;
  882. u64 to_reserve = 0;
  883. u64 index = 0;
  884. u64 objectid;
  885. u64 root_flags;
  886. uuid_le new_uuid;
  887. path = btrfs_alloc_path();
  888. if (!path) {
  889. ret = pending->error = -ENOMEM;
  890. goto path_alloc_fail;
  891. }
  892. new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS);
  893. if (!new_root_item) {
  894. ret = pending->error = -ENOMEM;
  895. goto root_item_alloc_fail;
  896. }
  897. ret = btrfs_find_free_objectid(tree_root, &objectid);
  898. if (ret) {
  899. pending->error = ret;
  900. goto no_free_objectid;
  901. }
  902. btrfs_reloc_pre_snapshot(trans, pending, &to_reserve);
  903. if (to_reserve > 0) {
  904. ret = btrfs_block_rsv_add(root, &pending->block_rsv,
  905. to_reserve,
  906. BTRFS_RESERVE_NO_FLUSH);
  907. if (ret) {
  908. pending->error = ret;
  909. goto no_free_objectid;
  910. }
  911. }
  912. ret = btrfs_qgroup_inherit(trans, fs_info, root->root_key.objectid,
  913. objectid, pending->inherit);
  914. if (ret) {
  915. pending->error = ret;
  916. goto no_free_objectid;
  917. }
  918. key.objectid = objectid;
  919. key.offset = (u64)-1;
  920. key.type = BTRFS_ROOT_ITEM_KEY;
  921. rsv = trans->block_rsv;
  922. trans->block_rsv = &pending->block_rsv;
  923. dentry = pending->dentry;
  924. parent = dget_parent(dentry);
  925. parent_inode = parent->d_inode;
  926. parent_root = BTRFS_I(parent_inode)->root;
  927. record_root_in_trans(trans, parent_root);
  928. /*
  929. * insert the directory item
  930. */
  931. ret = btrfs_set_inode_index(parent_inode, &index);
  932. BUG_ON(ret); /* -ENOMEM */
  933. /* check if there is a file/dir which has the same name. */
  934. dir_item = btrfs_lookup_dir_item(NULL, parent_root, path,
  935. btrfs_ino(parent_inode),
  936. dentry->d_name.name,
  937. dentry->d_name.len, 0);
  938. if (dir_item != NULL && !IS_ERR(dir_item)) {
  939. pending->error = -EEXIST;
  940. goto fail;
  941. } else if (IS_ERR(dir_item)) {
  942. ret = PTR_ERR(dir_item);
  943. btrfs_abort_transaction(trans, root, ret);
  944. goto fail;
  945. }
  946. btrfs_release_path(path);
  947. /*
  948. * pull in the delayed directory update
  949. * and the delayed inode item
  950. * otherwise we corrupt the FS during
  951. * snapshot
  952. */
  953. ret = btrfs_run_delayed_items(trans, root);
  954. if (ret) { /* Transaction aborted */
  955. btrfs_abort_transaction(trans, root, ret);
  956. goto fail;
  957. }
  958. record_root_in_trans(trans, root);
  959. btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
  960. memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
  961. btrfs_check_and_init_root_item(new_root_item);
  962. root_flags = btrfs_root_flags(new_root_item);
  963. if (pending->readonly)
  964. root_flags |= BTRFS_ROOT_SUBVOL_RDONLY;
  965. else
  966. root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY;
  967. btrfs_set_root_flags(new_root_item, root_flags);
  968. btrfs_set_root_generation_v2(new_root_item,
  969. trans->transid);
  970. uuid_le_gen(&new_uuid);
  971. memcpy(new_root_item->uuid, new_uuid.b, BTRFS_UUID_SIZE);
  972. memcpy(new_root_item->parent_uuid, root->root_item.uuid,
  973. BTRFS_UUID_SIZE);
  974. new_root_item->otime.sec = cpu_to_le64(cur_time.tv_sec);
  975. new_root_item->otime.nsec = cpu_to_le32(cur_time.tv_nsec);
  976. btrfs_set_root_otransid(new_root_item, trans->transid);
  977. memset(&new_root_item->stime, 0, sizeof(new_root_item->stime));
  978. memset(&new_root_item->rtime, 0, sizeof(new_root_item->rtime));
  979. btrfs_set_root_stransid(new_root_item, 0);
  980. btrfs_set_root_rtransid(new_root_item, 0);
  981. old = btrfs_lock_root_node(root);
  982. ret = btrfs_cow_block(trans, root, old, NULL, 0, &old);
  983. if (ret) {
  984. btrfs_tree_unlock(old);
  985. free_extent_buffer(old);
  986. btrfs_abort_transaction(trans, root, ret);
  987. goto fail;
  988. }
  989. btrfs_set_lock_blocking(old);
  990. ret = btrfs_copy_root(trans, root, old, &tmp, objectid);
  991. /* clean up in any case */
  992. btrfs_tree_unlock(old);
  993. free_extent_buffer(old);
  994. if (ret) {
  995. btrfs_abort_transaction(trans, root, ret);
  996. goto fail;
  997. }
  998. /* see comments in should_cow_block() */
  999. root->force_cow = 1;
  1000. smp_wmb();
  1001. btrfs_set_root_node(new_root_item, tmp);
  1002. /* record when the snapshot was created in key.offset */
  1003. key.offset = trans->transid;
  1004. ret = btrfs_insert_root(trans, tree_root, &key, new_root_item);
  1005. btrfs_tree_unlock(tmp);
  1006. free_extent_buffer(tmp);
  1007. if (ret) {
  1008. btrfs_abort_transaction(trans, root, ret);
  1009. goto fail;
  1010. }
  1011. /*
  1012. * insert root back/forward references
  1013. */
  1014. ret = btrfs_add_root_ref(trans, tree_root, objectid,
  1015. parent_root->root_key.objectid,
  1016. btrfs_ino(parent_inode), index,
  1017. dentry->d_name.name, dentry->d_name.len);
  1018. if (ret) {
  1019. btrfs_abort_transaction(trans, root, ret);
  1020. goto fail;
  1021. }
  1022. key.offset = (u64)-1;
  1023. pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key);
  1024. if (IS_ERR(pending->snap)) {
  1025. ret = PTR_ERR(pending->snap);
  1026. btrfs_abort_transaction(trans, root, ret);
  1027. goto fail;
  1028. }
  1029. ret = btrfs_reloc_post_snapshot(trans, pending);
  1030. if (ret) {
  1031. btrfs_abort_transaction(trans, root, ret);
  1032. goto fail;
  1033. }
  1034. ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
  1035. if (ret) {
  1036. btrfs_abort_transaction(trans, root, ret);
  1037. goto fail;
  1038. }
  1039. ret = btrfs_insert_dir_item(trans, parent_root,
  1040. dentry->d_name.name, dentry->d_name.len,
  1041. parent_inode, &key,
  1042. BTRFS_FT_DIR, index);
  1043. /* We have check then name at the beginning, so it is impossible. */
  1044. BUG_ON(ret == -EEXIST || ret == -EOVERFLOW);
  1045. if (ret) {
  1046. btrfs_abort_transaction(trans, root, ret);
  1047. goto fail;
  1048. }
  1049. btrfs_i_size_write(parent_inode, parent_inode->i_size +
  1050. dentry->d_name.len * 2);
  1051. parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
  1052. ret = btrfs_update_inode_fallback(trans, parent_root, parent_inode);
  1053. if (ret)
  1054. btrfs_abort_transaction(trans, root, ret);
  1055. fail:
  1056. dput(parent);
  1057. trans->block_rsv = rsv;
  1058. no_free_objectid:
  1059. kfree(new_root_item);
  1060. root_item_alloc_fail:
  1061. btrfs_free_path(path);
  1062. path_alloc_fail:
  1063. btrfs_block_rsv_release(root, &pending->block_rsv, (u64)-1);
  1064. return ret;
  1065. }
  1066. /*
  1067. * create all the snapshots we've scheduled for creation
  1068. */
  1069. static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
  1070. struct btrfs_fs_info *fs_info)
  1071. {
  1072. struct btrfs_pending_snapshot *pending;
  1073. struct list_head *head = &trans->transaction->pending_snapshots;
  1074. list_for_each_entry(pending, head, list)
  1075. create_pending_snapshot(trans, fs_info, pending);
  1076. return 0;
  1077. }
  1078. static void update_super_roots(struct btrfs_root *root)
  1079. {
  1080. struct btrfs_root_item *root_item;
  1081. struct btrfs_super_block *super;
  1082. super = root->fs_info->super_copy;
  1083. root_item = &root->fs_info->chunk_root->root_item;
  1084. super->chunk_root = root_item->bytenr;
  1085. super->chunk_root_generation = root_item->generation;
  1086. super->chunk_root_level = root_item->level;
  1087. root_item = &root->fs_info->tree_root->root_item;
  1088. super->root = root_item->bytenr;
  1089. super->generation = root_item->generation;
  1090. super->root_level = root_item->level;
  1091. if (btrfs_test_opt(root, SPACE_CACHE))
  1092. super->cache_generation = root_item->generation;
  1093. }
  1094. int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
  1095. {
  1096. int ret = 0;
  1097. spin_lock(&info->trans_lock);
  1098. if (info->running_transaction)
  1099. ret = info->running_transaction->in_commit;
  1100. spin_unlock(&info->trans_lock);
  1101. return ret;
  1102. }
  1103. int btrfs_transaction_blocked(struct btrfs_fs_info *info)
  1104. {
  1105. int ret = 0;
  1106. spin_lock(&info->trans_lock);
  1107. if (info->running_transaction)
  1108. ret = info->running_transaction->blocked;
  1109. spin_unlock(&info->trans_lock);
  1110. return ret;
  1111. }
  1112. /*
  1113. * wait for the current transaction commit to start and block subsequent
  1114. * transaction joins
  1115. */
  1116. static void wait_current_trans_commit_start(struct btrfs_root *root,
  1117. struct btrfs_transaction *trans)
  1118. {
  1119. wait_event(root->fs_info->transaction_blocked_wait, trans->in_commit);
  1120. }
  1121. /*
  1122. * wait for the current transaction to start and then become unblocked.
  1123. * caller holds ref.
  1124. */
  1125. static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root,
  1126. struct btrfs_transaction *trans)
  1127. {
  1128. wait_event(root->fs_info->transaction_wait,
  1129. trans->commit_done || (trans->in_commit && !trans->blocked));
  1130. }
  1131. /*
  1132. * commit transactions asynchronously. once btrfs_commit_transaction_async
  1133. * returns, any subsequent transaction will not be allowed to join.
  1134. */
  1135. struct btrfs_async_commit {
  1136. struct btrfs_trans_handle *newtrans;
  1137. struct btrfs_root *root;
  1138. struct delayed_work work;
  1139. };
  1140. static void do_async_commit(struct work_struct *work)
  1141. {
  1142. struct btrfs_async_commit *ac =
  1143. container_of(work, struct btrfs_async_commit, work.work);
  1144. /*
  1145. * We've got freeze protection passed with the transaction.
  1146. * Tell lockdep about it.
  1147. */
  1148. if (ac->newtrans->type < TRANS_JOIN_NOLOCK)
  1149. rwsem_acquire_read(
  1150. &ac->root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1],
  1151. 0, 1, _THIS_IP_);
  1152. current->journal_info = ac->newtrans;
  1153. btrfs_commit_transaction(ac->newtrans, ac->root);
  1154. kfree(ac);
  1155. }
  1156. int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
  1157. struct btrfs_root *root,
  1158. int wait_for_unblock)
  1159. {
  1160. struct btrfs_async_commit *ac;
  1161. struct btrfs_transaction *cur_trans;
  1162. ac = kmalloc(sizeof(*ac), GFP_NOFS);
  1163. if (!ac)
  1164. return -ENOMEM;
  1165. INIT_DELAYED_WORK(&ac->work, do_async_commit);
  1166. ac->root = root;
  1167. ac->newtrans = btrfs_join_transaction(root);
  1168. if (IS_ERR(ac->newtrans)) {
  1169. int err = PTR_ERR(ac->newtrans);
  1170. kfree(ac);
  1171. return err;
  1172. }
  1173. /* take transaction reference */
  1174. cur_trans = trans->transaction;
  1175. atomic_inc(&cur_trans->use_count);
  1176. btrfs_end_transaction(trans, root);
  1177. /*
  1178. * Tell lockdep we've released the freeze rwsem, since the
  1179. * async commit thread will be the one to unlock it.
  1180. */
  1181. if (trans->type < TRANS_JOIN_NOLOCK)
  1182. rwsem_release(
  1183. &root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1],
  1184. 1, _THIS_IP_);
  1185. schedule_delayed_work(&ac->work, 0);
  1186. /* wait for transaction to start and unblock */
  1187. if (wait_for_unblock)
  1188. wait_current_trans_commit_start_and_unblock(root, cur_trans);
  1189. else
  1190. wait_current_trans_commit_start(root, cur_trans);
  1191. if (current->journal_info == trans)
  1192. current->journal_info = NULL;
  1193. put_transaction(cur_trans);
  1194. return 0;
  1195. }
  1196. static void cleanup_transaction(struct btrfs_trans_handle *trans,
  1197. struct btrfs_root *root, int err)
  1198. {
  1199. struct btrfs_transaction *cur_trans = trans->transaction;
  1200. WARN_ON(trans->use_count > 1);
  1201. btrfs_abort_transaction(trans, root, err);
  1202. spin_lock(&root->fs_info->trans_lock);
  1203. list_del_init(&cur_trans->list);
  1204. if (cur_trans == root->fs_info->running_transaction) {
  1205. root->fs_info->running_transaction = NULL;
  1206. root->fs_info->trans_no_join = 0;
  1207. }
  1208. spin_unlock(&root->fs_info->trans_lock);
  1209. btrfs_cleanup_one_transaction(trans->transaction, root);
  1210. put_transaction(cur_trans);
  1211. put_transaction(cur_trans);
  1212. trace_btrfs_transaction_commit(root);
  1213. btrfs_scrub_continue(root);
  1214. if (current->journal_info == trans)
  1215. current->journal_info = NULL;
  1216. kmem_cache_free(btrfs_trans_handle_cachep, trans);
  1217. }
  1218. static int btrfs_flush_all_pending_stuffs(struct btrfs_trans_handle *trans,
  1219. struct btrfs_root *root)
  1220. {
  1221. int flush_on_commit = btrfs_test_opt(root, FLUSHONCOMMIT);
  1222. int snap_pending = 0;
  1223. int ret;
  1224. if (!flush_on_commit) {
  1225. spin_lock(&root->fs_info->trans_lock);
  1226. if (!list_empty(&trans->transaction->pending_snapshots))
  1227. snap_pending = 1;
  1228. spin_unlock(&root->fs_info->trans_lock);
  1229. }
  1230. if (flush_on_commit || snap_pending) {
  1231. btrfs_start_delalloc_inodes(root, 1);
  1232. btrfs_wait_ordered_extents(root, 1);
  1233. }
  1234. ret = btrfs_run_delayed_items(trans, root);
  1235. if (ret)
  1236. return ret;
  1237. /*
  1238. * running the delayed items may have added new refs. account
  1239. * them now so that they hinder processing of more delayed refs
  1240. * as little as possible.
  1241. */
  1242. btrfs_delayed_refs_qgroup_accounting(trans, root->fs_info);
  1243. /*
  1244. * rename don't use btrfs_join_transaction, so, once we
  1245. * set the transaction to blocked above, we aren't going
  1246. * to get any new ordered operations. We can safely run
  1247. * it here and no for sure that nothing new will be added
  1248. * to the list
  1249. */
  1250. btrfs_run_ordered_operations(root, 1);
  1251. return 0;
  1252. }
  1253. /*
  1254. * btrfs_transaction state sequence:
  1255. * in_commit = 0, blocked = 0 (initial)
  1256. * in_commit = 1, blocked = 1
  1257. * blocked = 0
  1258. * commit_done = 1
  1259. */
  1260. int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
  1261. struct btrfs_root *root)
  1262. {
  1263. unsigned long joined = 0;
  1264. struct btrfs_transaction *cur_trans = trans->transaction;
  1265. struct btrfs_transaction *prev_trans = NULL;
  1266. DEFINE_WAIT(wait);
  1267. int ret;
  1268. int should_grow = 0;
  1269. unsigned long now = get_seconds();
  1270. ret = btrfs_run_ordered_operations(root, 0);
  1271. if (ret) {
  1272. btrfs_abort_transaction(trans, root, ret);
  1273. goto cleanup_transaction;
  1274. }
  1275. /* Stop the commit early if ->aborted is set */
  1276. if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
  1277. ret = cur_trans->aborted;
  1278. goto cleanup_transaction;
  1279. }
  1280. /* make a pass through all the delayed refs we have so far
  1281. * any runnings procs may add more while we are here
  1282. */
  1283. ret = btrfs_run_delayed_refs(trans, root, 0);
  1284. if (ret)
  1285. goto cleanup_transaction;
  1286. btrfs_trans_release_metadata(trans, root);
  1287. trans->block_rsv = NULL;
  1288. cur_trans = trans->transaction;
  1289. /*
  1290. * set the flushing flag so procs in this transaction have to
  1291. * start sending their work down.
  1292. */
  1293. cur_trans->delayed_refs.flushing = 1;
  1294. if (!list_empty(&trans->new_bgs))
  1295. btrfs_create_pending_block_groups(trans, root);
  1296. ret = btrfs_run_delayed_refs(trans, root, 0);
  1297. if (ret)
  1298. goto cleanup_transaction;
  1299. spin_lock(&cur_trans->commit_lock);
  1300. if (cur_trans->in_commit) {
  1301. spin_unlock(&cur_trans->commit_lock);
  1302. atomic_inc(&cur_trans->use_count);
  1303. ret = btrfs_end_transaction(trans, root);
  1304. wait_for_commit(root, cur_trans);
  1305. put_transaction(cur_trans);
  1306. return ret;
  1307. }
  1308. trans->transaction->in_commit = 1;
  1309. trans->transaction->blocked = 1;
  1310. spin_unlock(&cur_trans->commit_lock);
  1311. wake_up(&root->fs_info->transaction_blocked_wait);
  1312. spin_lock(&root->fs_info->trans_lock);
  1313. if (cur_trans->list.prev != &root->fs_info->trans_list) {
  1314. prev_trans = list_entry(cur_trans->list.prev,
  1315. struct btrfs_transaction, list);
  1316. if (!prev_trans->commit_done) {
  1317. atomic_inc(&prev_trans->use_count);
  1318. spin_unlock(&root->fs_info->trans_lock);
  1319. wait_for_commit(root, prev_trans);
  1320. put_transaction(prev_trans);
  1321. } else {
  1322. spin_unlock(&root->fs_info->trans_lock);
  1323. }
  1324. } else {
  1325. spin_unlock(&root->fs_info->trans_lock);
  1326. }
  1327. if (!btrfs_test_opt(root, SSD) &&
  1328. (now < cur_trans->start_time || now - cur_trans->start_time < 1))
  1329. should_grow = 1;
  1330. do {
  1331. joined = cur_trans->num_joined;
  1332. WARN_ON(cur_trans != trans->transaction);
  1333. ret = btrfs_flush_all_pending_stuffs(trans, root);
  1334. if (ret)
  1335. goto cleanup_transaction;
  1336. prepare_to_wait(&cur_trans->writer_wait, &wait,
  1337. TASK_UNINTERRUPTIBLE);
  1338. if (atomic_read(&cur_trans->num_writers) > 1)
  1339. schedule_timeout(MAX_SCHEDULE_TIMEOUT);
  1340. else if (should_grow)
  1341. schedule_timeout(1);
  1342. finish_wait(&cur_trans->writer_wait, &wait);
  1343. } while (atomic_read(&cur_trans->num_writers) > 1 ||
  1344. (should_grow && cur_trans->num_joined != joined));
  1345. ret = btrfs_flush_all_pending_stuffs(trans, root);
  1346. if (ret)
  1347. goto cleanup_transaction;
  1348. /*
  1349. * Ok now we need to make sure to block out any other joins while we
  1350. * commit the transaction. We could have started a join before setting
  1351. * no_join so make sure to wait for num_writers to == 1 again.
  1352. */
  1353. spin_lock(&root->fs_info->trans_lock);
  1354. root->fs_info->trans_no_join = 1;
  1355. spin_unlock(&root->fs_info->trans_lock);
  1356. wait_event(cur_trans->writer_wait,
  1357. atomic_read(&cur_trans->num_writers) == 1);
  1358. /* ->aborted might be set after the previous check, so check it */
  1359. if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
  1360. ret = cur_trans->aborted;
  1361. goto cleanup_transaction;
  1362. }
  1363. /*
  1364. * the reloc mutex makes sure that we stop
  1365. * the balancing code from coming in and moving
  1366. * extents around in the middle of the commit
  1367. */
  1368. mutex_lock(&root->fs_info->reloc_mutex);
  1369. /*
  1370. * We needn't worry about the delayed items because we will
  1371. * deal with them in create_pending_snapshot(), which is the
  1372. * core function of the snapshot creation.
  1373. */
  1374. ret = create_pending_snapshots(trans, root->fs_info);
  1375. if (ret) {
  1376. mutex_unlock(&root->fs_info->reloc_mutex);
  1377. goto cleanup_transaction;
  1378. }
  1379. /*
  1380. * We insert the dir indexes of the snapshots and update the inode
  1381. * of the snapshots' parents after the snapshot creation, so there
  1382. * are some delayed items which are not dealt with. Now deal with
  1383. * them.
  1384. *
  1385. * We needn't worry that this operation will corrupt the snapshots,
  1386. * because all the tree which are snapshoted will be forced to COW
  1387. * the nodes and leaves.
  1388. */
  1389. ret = btrfs_run_delayed_items(trans, root);
  1390. if (ret) {
  1391. mutex_unlock(&root->fs_info->reloc_mutex);
  1392. goto cleanup_transaction;
  1393. }
  1394. ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
  1395. if (ret) {
  1396. mutex_unlock(&root->fs_info->reloc_mutex);
  1397. goto cleanup_transaction;
  1398. }
  1399. /*
  1400. * make sure none of the code above managed to slip in a
  1401. * delayed item
  1402. */
  1403. btrfs_assert_delayed_root_empty(root);
  1404. WARN_ON(cur_trans != trans->transaction);
  1405. btrfs_scrub_pause(root);
  1406. /* btrfs_commit_tree_roots is responsible for getting the
  1407. * various roots consistent with each other. Every pointer
  1408. * in the tree of tree roots has to point to the most up to date
  1409. * root for every subvolume and other tree. So, we have to keep
  1410. * the tree logging code from jumping in and changing any
  1411. * of the trees.
  1412. *
  1413. * At this point in the commit, there can't be any tree-log
  1414. * writers, but a little lower down we drop the trans mutex
  1415. * and let new people in. By holding the tree_log_mutex
  1416. * from now until after the super is written, we avoid races
  1417. * with the tree-log code.
  1418. */
  1419. mutex_lock(&root->fs_info->tree_log_mutex);
  1420. ret = commit_fs_roots(trans, root);
  1421. if (ret) {
  1422. mutex_unlock(&root->fs_info->tree_log_mutex);
  1423. mutex_unlock(&root->fs_info->reloc_mutex);
  1424. goto cleanup_transaction;
  1425. }
  1426. /* commit_fs_roots gets rid of all the tree log roots, it is now
  1427. * safe to free the root of tree log roots
  1428. */
  1429. btrfs_free_log_root_tree(trans, root->fs_info);
  1430. ret = commit_cowonly_roots(trans, root);
  1431. if (ret) {
  1432. mutex_unlock(&root->fs_info->tree_log_mutex);
  1433. mutex_unlock(&root->fs_info->reloc_mutex);
  1434. goto cleanup_transaction;
  1435. }
  1436. /*
  1437. * The tasks which save the space cache and inode cache may also
  1438. * update ->aborted, check it.
  1439. */
  1440. if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
  1441. ret = cur_trans->aborted;
  1442. mutex_unlock(&root->fs_info->tree_log_mutex);
  1443. mutex_unlock(&root->fs_info->reloc_mutex);
  1444. goto cleanup_transaction;
  1445. }
  1446. btrfs_prepare_extent_commit(trans, root);
  1447. cur_trans = root->fs_info->running_transaction;
  1448. btrfs_set_root_node(&root->fs_info->tree_root->root_item,
  1449. root->fs_info->tree_root->node);
  1450. switch_commit_root(root->fs_info->tree_root);
  1451. btrfs_set_root_node(&root->fs_info->chunk_root->root_item,
  1452. root->fs_info->chunk_root->node);
  1453. switch_commit_root(root->fs_info->chunk_root);
  1454. assert_qgroups_uptodate(trans);
  1455. update_super_roots(root);
  1456. if (!root->fs_info->log_root_recovering) {
  1457. btrfs_set_super_log_root(root->fs_info->super_copy, 0);
  1458. btrfs_set_super_log_root_level(root->fs_info->super_copy, 0);
  1459. }
  1460. memcpy(root->fs_info->super_for_commit, root->fs_info->super_copy,
  1461. sizeof(*root->fs_info->super_copy));
  1462. trans->transaction->blocked = 0;
  1463. spin_lock(&root->fs_info->trans_lock);
  1464. root->fs_info->running_transaction = NULL;
  1465. root->fs_info->trans_no_join = 0;
  1466. spin_unlock(&root->fs_info->trans_lock);
  1467. mutex_unlock(&root->fs_info->reloc_mutex);
  1468. wake_up(&root->fs_info->transaction_wait);
  1469. ret = btrfs_write_and_wait_transaction(trans, root);
  1470. if (ret) {
  1471. btrfs_error(root->fs_info, ret,
  1472. "Error while writing out transaction.");
  1473. mutex_unlock(&root->fs_info->tree_log_mutex);
  1474. goto cleanup_transaction;
  1475. }
  1476. ret = write_ctree_super(trans, root, 0);
  1477. if (ret) {
  1478. mutex_unlock(&root->fs_info->tree_log_mutex);
  1479. goto cleanup_transaction;
  1480. }
  1481. /*
  1482. * the super is written, we can safely allow the tree-loggers
  1483. * to go about their business
  1484. */
  1485. mutex_unlock(&root->fs_info->tree_log_mutex);
  1486. btrfs_finish_extent_commit(trans, root);
  1487. cur_trans->commit_done = 1;
  1488. root->fs_info->last_trans_committed = cur_trans->transid;
  1489. wake_up(&cur_trans->commit_wait);
  1490. spin_lock(&root->fs_info->trans_lock);
  1491. list_del_init(&cur_trans->list);
  1492. spin_unlock(&root->fs_info->trans_lock);
  1493. put_transaction(cur_trans);
  1494. put_transaction(cur_trans);
  1495. if (trans->type < TRANS_JOIN_NOLOCK)
  1496. sb_end_intwrite(root->fs_info->sb);
  1497. trace_btrfs_transaction_commit(root);
  1498. btrfs_scrub_continue(root);
  1499. if (current->journal_info == trans)
  1500. current->journal_info = NULL;
  1501. kmem_cache_free(btrfs_trans_handle_cachep, trans);
  1502. if (current != root->fs_info->transaction_kthread)
  1503. btrfs_run_delayed_iputs(root);
  1504. return ret;
  1505. cleanup_transaction:
  1506. btrfs_trans_release_metadata(trans, root);
  1507. trans->block_rsv = NULL;
  1508. btrfs_printk(root->fs_info, "Skipping commit of aborted transaction.\n");
  1509. // WARN_ON(1);
  1510. if (current->journal_info == trans)
  1511. current->journal_info = NULL;
  1512. cleanup_transaction(trans, root, ret);
  1513. return ret;
  1514. }
  1515. /*
  1516. * interface function to delete all the snapshots we have scheduled for deletion
  1517. */
  1518. int btrfs_clean_old_snapshots(struct btrfs_root *root)
  1519. {
  1520. LIST_HEAD(list);
  1521. struct btrfs_fs_info *fs_info = root->fs_info;
  1522. spin_lock(&fs_info->trans_lock);
  1523. list_splice_init(&fs_info->dead_roots, &list);
  1524. spin_unlock(&fs_info->trans_lock);
  1525. while (!list_empty(&list)) {
  1526. int ret;
  1527. root = list_entry(list.next, struct btrfs_root, root_list);
  1528. list_del(&root->root_list);
  1529. btrfs_kill_all_delayed_nodes(root);
  1530. if (btrfs_header_backref_rev(root->node) <
  1531. BTRFS_MIXED_BACKREF_REV)
  1532. ret = btrfs_drop_snapshot(root, NULL, 0, 0);
  1533. else
  1534. ret =btrfs_drop_snapshot(root, NULL, 1, 0);
  1535. BUG_ON(ret < 0);
  1536. }
  1537. return 0;
  1538. }