commit.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920
  1. /*
  2. * linux/fs/jbd2/commit.c
  3. *
  4. * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
  5. *
  6. * Copyright 1998 Red Hat corp --- All Rights Reserved
  7. *
  8. * This file is part of the Linux kernel and is made available under
  9. * the terms of the GNU General Public License, version 2, or at your
  10. * option, any later version, incorporated herein by reference.
  11. *
  12. * Journal commit routines for the generic filesystem journaling code;
  13. * part of the ext2fs journaling system.
  14. */
  15. #include <linux/time.h>
  16. #include <linux/fs.h>
  17. #include <linux/jbd2.h>
  18. #include <linux/errno.h>
  19. #include <linux/slab.h>
  20. #include <linux/mm.h>
  21. #include <linux/pagemap.h>
  22. #include <linux/smp_lock.h>
  23. /*
  24. * Default IO end handler for temporary BJ_IO buffer_heads.
  25. */
  26. static void journal_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
  27. {
  28. BUFFER_TRACE(bh, "");
  29. if (uptodate)
  30. set_buffer_uptodate(bh);
  31. else
  32. clear_buffer_uptodate(bh);
  33. unlock_buffer(bh);
  34. }
  35. /*
  36. * When an ext3-ordered file is truncated, it is possible that many pages are
  37. * not sucessfully freed, because they are attached to a committing transaction.
  38. * After the transaction commits, these pages are left on the LRU, with no
  39. * ->mapping, and with attached buffers. These pages are trivially reclaimable
  40. * by the VM, but their apparent absence upsets the VM accounting, and it makes
  41. * the numbers in /proc/meminfo look odd.
  42. *
  43. * So here, we have a buffer which has just come off the forget list. Look to
  44. * see if we can strip all buffers from the backing page.
  45. *
  46. * Called under lock_journal(), and possibly under journal_datalist_lock. The
  47. * caller provided us with a ref against the buffer, and we drop that here.
  48. */
  49. static void release_buffer_page(struct buffer_head *bh)
  50. {
  51. struct page *page;
  52. if (buffer_dirty(bh))
  53. goto nope;
  54. if (atomic_read(&bh->b_count) != 1)
  55. goto nope;
  56. page = bh->b_page;
  57. if (!page)
  58. goto nope;
  59. if (page->mapping)
  60. goto nope;
  61. /* OK, it's a truncated page */
  62. if (TestSetPageLocked(page))
  63. goto nope;
  64. page_cache_get(page);
  65. __brelse(bh);
  66. try_to_free_buffers(page);
  67. unlock_page(page);
  68. page_cache_release(page);
  69. return;
  70. nope:
  71. __brelse(bh);
  72. }
  73. /*
  74. * Try to acquire jbd_lock_bh_state() against the buffer, when j_list_lock is
  75. * held. For ranking reasons we must trylock. If we lose, schedule away and
  76. * return 0. j_list_lock is dropped in this case.
  77. */
  78. static int inverted_lock(journal_t *journal, struct buffer_head *bh)
  79. {
  80. if (!jbd_trylock_bh_state(bh)) {
  81. spin_unlock(&journal->j_list_lock);
  82. schedule();
  83. return 0;
  84. }
  85. return 1;
  86. }
  87. /* Done it all: now write the commit record. We should have
  88. * cleaned up our previous buffers by now, so if we are in abort
  89. * mode we can now just skip the rest of the journal write
  90. * entirely.
  91. *
  92. * Returns 1 if the journal needs to be aborted or 0 on success
  93. */
  94. static int journal_write_commit_record(journal_t *journal,
  95. transaction_t *commit_transaction)
  96. {
  97. struct journal_head *descriptor;
  98. struct buffer_head *bh;
  99. int i, ret;
  100. int barrier_done = 0;
  101. if (is_journal_aborted(journal))
  102. return 0;
  103. descriptor = jbd2_journal_get_descriptor_buffer(journal);
  104. if (!descriptor)
  105. return 1;
  106. bh = jh2bh(descriptor);
  107. /* AKPM: buglet - add `i' to tmp! */
  108. for (i = 0; i < bh->b_size; i += 512) {
  109. journal_header_t *tmp = (journal_header_t*)bh->b_data;
  110. tmp->h_magic = cpu_to_be32(JBD2_MAGIC_NUMBER);
  111. tmp->h_blocktype = cpu_to_be32(JBD2_COMMIT_BLOCK);
  112. tmp->h_sequence = cpu_to_be32(commit_transaction->t_tid);
  113. }
  114. JBUFFER_TRACE(descriptor, "write commit block");
  115. set_buffer_dirty(bh);
  116. if (journal->j_flags & JBD2_BARRIER) {
  117. set_buffer_ordered(bh);
  118. barrier_done = 1;
  119. }
  120. ret = sync_dirty_buffer(bh);
  121. /* is it possible for another commit to fail at roughly
  122. * the same time as this one? If so, we don't want to
  123. * trust the barrier flag in the super, but instead want
  124. * to remember if we sent a barrier request
  125. */
  126. if (ret == -EOPNOTSUPP && barrier_done) {
  127. char b[BDEVNAME_SIZE];
  128. printk(KERN_WARNING
  129. "JBD: barrier-based sync failed on %s - "
  130. "disabling barriers\n",
  131. bdevname(journal->j_dev, b));
  132. spin_lock(&journal->j_state_lock);
  133. journal->j_flags &= ~JBD2_BARRIER;
  134. spin_unlock(&journal->j_state_lock);
  135. /* And try again, without the barrier */
  136. clear_buffer_ordered(bh);
  137. set_buffer_uptodate(bh);
  138. set_buffer_dirty(bh);
  139. ret = sync_dirty_buffer(bh);
  140. }
  141. put_bh(bh); /* One for getblk() */
  142. jbd2_journal_put_journal_head(descriptor);
  143. return (ret == -EIO);
  144. }
  145. static void journal_do_submit_data(struct buffer_head **wbuf, int bufs)
  146. {
  147. int i;
  148. for (i = 0; i < bufs; i++) {
  149. wbuf[i]->b_end_io = end_buffer_write_sync;
  150. /* We use-up our safety reference in submit_bh() */
  151. submit_bh(WRITE, wbuf[i]);
  152. }
  153. }
  154. /*
  155. * Submit all the data buffers to disk
  156. */
  157. static void journal_submit_data_buffers(journal_t *journal,
  158. transaction_t *commit_transaction)
  159. {
  160. struct journal_head *jh;
  161. struct buffer_head *bh;
  162. int locked;
  163. int bufs = 0;
  164. struct buffer_head **wbuf = journal->j_wbuf;
  165. /*
  166. * Whenever we unlock the journal and sleep, things can get added
  167. * onto ->t_sync_datalist, so we have to keep looping back to
  168. * write_out_data until we *know* that the list is empty.
  169. *
  170. * Cleanup any flushed data buffers from the data list. Even in
  171. * abort mode, we want to flush this out as soon as possible.
  172. */
  173. write_out_data:
  174. cond_resched();
  175. spin_lock(&journal->j_list_lock);
  176. while (commit_transaction->t_sync_datalist) {
  177. jh = commit_transaction->t_sync_datalist;
  178. bh = jh2bh(jh);
  179. locked = 0;
  180. /* Get reference just to make sure buffer does not disappear
  181. * when we are forced to drop various locks */
  182. get_bh(bh);
  183. /* If the buffer is dirty, we need to submit IO and hence
  184. * we need the buffer lock. We try to lock the buffer without
  185. * blocking. If we fail, we need to drop j_list_lock and do
  186. * blocking lock_buffer().
  187. */
  188. if (buffer_dirty(bh)) {
  189. if (test_set_buffer_locked(bh)) {
  190. BUFFER_TRACE(bh, "needs blocking lock");
  191. spin_unlock(&journal->j_list_lock);
  192. /* Write out all data to prevent deadlocks */
  193. journal_do_submit_data(wbuf, bufs);
  194. bufs = 0;
  195. lock_buffer(bh);
  196. spin_lock(&journal->j_list_lock);
  197. }
  198. locked = 1;
  199. }
  200. /* We have to get bh_state lock. Again out of order, sigh. */
  201. if (!inverted_lock(journal, bh)) {
  202. jbd_lock_bh_state(bh);
  203. spin_lock(&journal->j_list_lock);
  204. }
  205. /* Someone already cleaned up the buffer? */
  206. if (!buffer_jbd(bh)
  207. || jh->b_transaction != commit_transaction
  208. || jh->b_jlist != BJ_SyncData) {
  209. jbd_unlock_bh_state(bh);
  210. if (locked)
  211. unlock_buffer(bh);
  212. BUFFER_TRACE(bh, "already cleaned up");
  213. put_bh(bh);
  214. continue;
  215. }
  216. if (locked && test_clear_buffer_dirty(bh)) {
  217. BUFFER_TRACE(bh, "needs writeout, adding to array");
  218. wbuf[bufs++] = bh;
  219. __jbd2_journal_file_buffer(jh, commit_transaction,
  220. BJ_Locked);
  221. jbd_unlock_bh_state(bh);
  222. if (bufs == journal->j_wbufsize) {
  223. spin_unlock(&journal->j_list_lock);
  224. journal_do_submit_data(wbuf, bufs);
  225. bufs = 0;
  226. goto write_out_data;
  227. }
  228. }
  229. else {
  230. BUFFER_TRACE(bh, "writeout complete: unfile");
  231. __jbd2_journal_unfile_buffer(jh);
  232. jbd_unlock_bh_state(bh);
  233. if (locked)
  234. unlock_buffer(bh);
  235. jbd2_journal_remove_journal_head(bh);
  236. /* Once for our safety reference, once for
  237. * jbd2_journal_remove_journal_head() */
  238. put_bh(bh);
  239. put_bh(bh);
  240. }
  241. if (lock_need_resched(&journal->j_list_lock)) {
  242. spin_unlock(&journal->j_list_lock);
  243. goto write_out_data;
  244. }
  245. }
  246. spin_unlock(&journal->j_list_lock);
  247. journal_do_submit_data(wbuf, bufs);
  248. }
  249. static inline void write_tag_block(int tag_bytes, journal_block_tag_t *tag,
  250. sector_t block)
  251. {
  252. tag->t_blocknr = cpu_to_be32(block & (u32)~0);
  253. if (tag_bytes > JBD_TAG_SIZE32)
  254. tag->t_blocknr_high = cpu_to_be32((block >> 31) >> 1);
  255. }
  256. /*
  257. * jbd2_journal_commit_transaction
  258. *
  259. * The primary function for committing a transaction to the log. This
  260. * function is called by the journal thread to begin a complete commit.
  261. */
  262. void jbd2_journal_commit_transaction(journal_t *journal)
  263. {
  264. transaction_t *commit_transaction;
  265. struct journal_head *jh, *new_jh, *descriptor;
  266. struct buffer_head **wbuf = journal->j_wbuf;
  267. int bufs;
  268. int flags;
  269. int err;
  270. unsigned long blocknr;
  271. char *tagp = NULL;
  272. journal_header_t *header;
  273. journal_block_tag_t *tag = NULL;
  274. int space_left = 0;
  275. int first_tag = 0;
  276. int tag_flag;
  277. int i;
  278. int tag_bytes = journal_tag_bytes(journal);
  279. /*
  280. * First job: lock down the current transaction and wait for
  281. * all outstanding updates to complete.
  282. */
  283. #ifdef COMMIT_STATS
  284. spin_lock(&journal->j_list_lock);
  285. summarise_journal_usage(journal);
  286. spin_unlock(&journal->j_list_lock);
  287. #endif
  288. /* Do we need to erase the effects of a prior jbd2_journal_flush? */
  289. if (journal->j_flags & JBD2_FLUSHED) {
  290. jbd_debug(3, "super block updated\n");
  291. jbd2_journal_update_superblock(journal, 1);
  292. } else {
  293. jbd_debug(3, "superblock not updated\n");
  294. }
  295. J_ASSERT(journal->j_running_transaction != NULL);
  296. J_ASSERT(journal->j_committing_transaction == NULL);
  297. commit_transaction = journal->j_running_transaction;
  298. J_ASSERT(commit_transaction->t_state == T_RUNNING);
  299. jbd_debug(1, "JBD: starting commit of transaction %d\n",
  300. commit_transaction->t_tid);
  301. spin_lock(&journal->j_state_lock);
  302. commit_transaction->t_state = T_LOCKED;
  303. spin_lock(&commit_transaction->t_handle_lock);
  304. while (commit_transaction->t_updates) {
  305. DEFINE_WAIT(wait);
  306. prepare_to_wait(&journal->j_wait_updates, &wait,
  307. TASK_UNINTERRUPTIBLE);
  308. if (commit_transaction->t_updates) {
  309. spin_unlock(&commit_transaction->t_handle_lock);
  310. spin_unlock(&journal->j_state_lock);
  311. schedule();
  312. spin_lock(&journal->j_state_lock);
  313. spin_lock(&commit_transaction->t_handle_lock);
  314. }
  315. finish_wait(&journal->j_wait_updates, &wait);
  316. }
  317. spin_unlock(&commit_transaction->t_handle_lock);
  318. J_ASSERT (commit_transaction->t_outstanding_credits <=
  319. journal->j_max_transaction_buffers);
  320. /*
  321. * First thing we are allowed to do is to discard any remaining
  322. * BJ_Reserved buffers. Note, it is _not_ permissible to assume
  323. * that there are no such buffers: if a large filesystem
  324. * operation like a truncate needs to split itself over multiple
  325. * transactions, then it may try to do a jbd2_journal_restart() while
  326. * there are still BJ_Reserved buffers outstanding. These must
  327. * be released cleanly from the current transaction.
  328. *
  329. * In this case, the filesystem must still reserve write access
  330. * again before modifying the buffer in the new transaction, but
  331. * we do not require it to remember exactly which old buffers it
  332. * has reserved. This is consistent with the existing behaviour
  333. * that multiple jbd2_journal_get_write_access() calls to the same
  334. * buffer are perfectly permissable.
  335. */
  336. while (commit_transaction->t_reserved_list) {
  337. jh = commit_transaction->t_reserved_list;
  338. JBUFFER_TRACE(jh, "reserved, unused: refile");
  339. /*
  340. * A jbd2_journal_get_undo_access()+jbd2_journal_release_buffer() may
  341. * leave undo-committed data.
  342. */
  343. if (jh->b_committed_data) {
  344. struct buffer_head *bh = jh2bh(jh);
  345. jbd_lock_bh_state(bh);
  346. jbd2_slab_free(jh->b_committed_data, bh->b_size);
  347. jh->b_committed_data = NULL;
  348. jbd_unlock_bh_state(bh);
  349. }
  350. jbd2_journal_refile_buffer(journal, jh);
  351. }
  352. /*
  353. * Now try to drop any written-back buffers from the journal's
  354. * checkpoint lists. We do this *before* commit because it potentially
  355. * frees some memory
  356. */
  357. spin_lock(&journal->j_list_lock);
  358. __jbd2_journal_clean_checkpoint_list(journal);
  359. spin_unlock(&journal->j_list_lock);
  360. jbd_debug (3, "JBD: commit phase 1\n");
  361. /*
  362. * Switch to a new revoke table.
  363. */
  364. jbd2_journal_switch_revoke_table(journal);
  365. commit_transaction->t_state = T_FLUSH;
  366. journal->j_committing_transaction = commit_transaction;
  367. journal->j_running_transaction = NULL;
  368. commit_transaction->t_log_start = journal->j_head;
  369. wake_up(&journal->j_wait_transaction_locked);
  370. spin_unlock(&journal->j_state_lock);
  371. jbd_debug (3, "JBD: commit phase 2\n");
  372. /*
  373. * First, drop modified flag: all accesses to the buffers
  374. * will be tracked for a new trasaction only -bzzz
  375. */
  376. spin_lock(&journal->j_list_lock);
  377. if (commit_transaction->t_buffers) {
  378. new_jh = jh = commit_transaction->t_buffers->b_tnext;
  379. do {
  380. J_ASSERT_JH(new_jh, new_jh->b_modified == 1 ||
  381. new_jh->b_modified == 0);
  382. new_jh->b_modified = 0;
  383. new_jh = new_jh->b_tnext;
  384. } while (new_jh != jh);
  385. }
  386. spin_unlock(&journal->j_list_lock);
  387. /*
  388. * Now start flushing things to disk, in the order they appear
  389. * on the transaction lists. Data blocks go first.
  390. */
  391. err = 0;
  392. journal_submit_data_buffers(journal, commit_transaction);
  393. /*
  394. * Wait for all previously submitted IO to complete.
  395. */
  396. spin_lock(&journal->j_list_lock);
  397. while (commit_transaction->t_locked_list) {
  398. struct buffer_head *bh;
  399. jh = commit_transaction->t_locked_list->b_tprev;
  400. bh = jh2bh(jh);
  401. get_bh(bh);
  402. if (buffer_locked(bh)) {
  403. spin_unlock(&journal->j_list_lock);
  404. wait_on_buffer(bh);
  405. if (unlikely(!buffer_uptodate(bh)))
  406. err = -EIO;
  407. spin_lock(&journal->j_list_lock);
  408. }
  409. if (!inverted_lock(journal, bh)) {
  410. put_bh(bh);
  411. spin_lock(&journal->j_list_lock);
  412. continue;
  413. }
  414. if (buffer_jbd(bh) && jh->b_jlist == BJ_Locked) {
  415. __jbd2_journal_unfile_buffer(jh);
  416. jbd_unlock_bh_state(bh);
  417. jbd2_journal_remove_journal_head(bh);
  418. put_bh(bh);
  419. } else {
  420. jbd_unlock_bh_state(bh);
  421. }
  422. put_bh(bh);
  423. cond_resched_lock(&journal->j_list_lock);
  424. }
  425. spin_unlock(&journal->j_list_lock);
  426. if (err)
  427. __jbd2_journal_abort_hard(journal);
  428. jbd2_journal_write_revoke_records(journal, commit_transaction);
  429. jbd_debug(3, "JBD: commit phase 2\n");
  430. /*
  431. * If we found any dirty or locked buffers, then we should have
  432. * looped back up to the write_out_data label. If there weren't
  433. * any then journal_clean_data_list should have wiped the list
  434. * clean by now, so check that it is in fact empty.
  435. */
  436. J_ASSERT (commit_transaction->t_sync_datalist == NULL);
  437. jbd_debug (3, "JBD: commit phase 3\n");
  438. /*
  439. * Way to go: we have now written out all of the data for a
  440. * transaction! Now comes the tricky part: we need to write out
  441. * metadata. Loop over the transaction's entire buffer list:
  442. */
  443. commit_transaction->t_state = T_COMMIT;
  444. descriptor = NULL;
  445. bufs = 0;
  446. while (commit_transaction->t_buffers) {
  447. /* Find the next buffer to be journaled... */
  448. jh = commit_transaction->t_buffers;
  449. /* If we're in abort mode, we just un-journal the buffer and
  450. release it for background writing. */
  451. if (is_journal_aborted(journal)) {
  452. JBUFFER_TRACE(jh, "journal is aborting: refile");
  453. jbd2_journal_refile_buffer(journal, jh);
  454. /* If that was the last one, we need to clean up
  455. * any descriptor buffers which may have been
  456. * already allocated, even if we are now
  457. * aborting. */
  458. if (!commit_transaction->t_buffers)
  459. goto start_journal_io;
  460. continue;
  461. }
  462. /* Make sure we have a descriptor block in which to
  463. record the metadata buffer. */
  464. if (!descriptor) {
  465. struct buffer_head *bh;
  466. J_ASSERT (bufs == 0);
  467. jbd_debug(4, "JBD: get descriptor\n");
  468. descriptor = jbd2_journal_get_descriptor_buffer(journal);
  469. if (!descriptor) {
  470. __jbd2_journal_abort_hard(journal);
  471. continue;
  472. }
  473. bh = jh2bh(descriptor);
  474. jbd_debug(4, "JBD: got buffer %llu (%p)\n",
  475. (unsigned long long)bh->b_blocknr, bh->b_data);
  476. header = (journal_header_t *)&bh->b_data[0];
  477. header->h_magic = cpu_to_be32(JBD2_MAGIC_NUMBER);
  478. header->h_blocktype = cpu_to_be32(JBD2_DESCRIPTOR_BLOCK);
  479. header->h_sequence = cpu_to_be32(commit_transaction->t_tid);
  480. tagp = &bh->b_data[sizeof(journal_header_t)];
  481. space_left = bh->b_size - sizeof(journal_header_t);
  482. first_tag = 1;
  483. set_buffer_jwrite(bh);
  484. set_buffer_dirty(bh);
  485. wbuf[bufs++] = bh;
  486. /* Record it so that we can wait for IO
  487. completion later */
  488. BUFFER_TRACE(bh, "ph3: file as descriptor");
  489. jbd2_journal_file_buffer(descriptor, commit_transaction,
  490. BJ_LogCtl);
  491. }
  492. /* Where is the buffer to be written? */
  493. err = jbd2_journal_next_log_block(journal, &blocknr);
  494. /* If the block mapping failed, just abandon the buffer
  495. and repeat this loop: we'll fall into the
  496. refile-on-abort condition above. */
  497. if (err) {
  498. __jbd2_journal_abort_hard(journal);
  499. continue;
  500. }
  501. /*
  502. * start_this_handle() uses t_outstanding_credits to determine
  503. * the free space in the log, but this counter is changed
  504. * by jbd2_journal_next_log_block() also.
  505. */
  506. commit_transaction->t_outstanding_credits--;
  507. /* Bump b_count to prevent truncate from stumbling over
  508. the shadowed buffer! @@@ This can go if we ever get
  509. rid of the BJ_IO/BJ_Shadow pairing of buffers. */
  510. atomic_inc(&jh2bh(jh)->b_count);
  511. /* Make a temporary IO buffer with which to write it out
  512. (this will requeue both the metadata buffer and the
  513. temporary IO buffer). new_bh goes on BJ_IO*/
  514. set_bit(BH_JWrite, &jh2bh(jh)->b_state);
  515. /*
  516. * akpm: jbd2_journal_write_metadata_buffer() sets
  517. * new_bh->b_transaction to commit_transaction.
  518. * We need to clean this up before we release new_bh
  519. * (which is of type BJ_IO)
  520. */
  521. JBUFFER_TRACE(jh, "ph3: write metadata");
  522. flags = jbd2_journal_write_metadata_buffer(commit_transaction,
  523. jh, &new_jh, blocknr);
  524. set_bit(BH_JWrite, &jh2bh(new_jh)->b_state);
  525. wbuf[bufs++] = jh2bh(new_jh);
  526. /* Record the new block's tag in the current descriptor
  527. buffer */
  528. tag_flag = 0;
  529. if (flags & 1)
  530. tag_flag |= JBD2_FLAG_ESCAPE;
  531. if (!first_tag)
  532. tag_flag |= JBD2_FLAG_SAME_UUID;
  533. tag = (journal_block_tag_t *) tagp;
  534. write_tag_block(tag_bytes, tag, jh2bh(jh)->b_blocknr);
  535. tag->t_flags = cpu_to_be32(tag_flag);
  536. tagp += tag_bytes;
  537. space_left -= tag_bytes;
  538. if (first_tag) {
  539. memcpy (tagp, journal->j_uuid, 16);
  540. tagp += 16;
  541. space_left -= 16;
  542. first_tag = 0;
  543. }
  544. /* If there's no more to do, or if the descriptor is full,
  545. let the IO rip! */
  546. if (bufs == journal->j_wbufsize ||
  547. commit_transaction->t_buffers == NULL ||
  548. space_left < tag_bytes + 16) {
  549. jbd_debug(4, "JBD: Submit %d IOs\n", bufs);
  550. /* Write an end-of-descriptor marker before
  551. submitting the IOs. "tag" still points to
  552. the last tag we set up. */
  553. tag->t_flags |= cpu_to_be32(JBD2_FLAG_LAST_TAG);
  554. start_journal_io:
  555. for (i = 0; i < bufs; i++) {
  556. struct buffer_head *bh = wbuf[i];
  557. lock_buffer(bh);
  558. clear_buffer_dirty(bh);
  559. set_buffer_uptodate(bh);
  560. bh->b_end_io = journal_end_buffer_io_sync;
  561. submit_bh(WRITE, bh);
  562. }
  563. cond_resched();
  564. /* Force a new descriptor to be generated next
  565. time round the loop. */
  566. descriptor = NULL;
  567. bufs = 0;
  568. }
  569. }
  570. /* Lo and behold: we have just managed to send a transaction to
  571. the log. Before we can commit it, wait for the IO so far to
  572. complete. Control buffers being written are on the
  573. transaction's t_log_list queue, and metadata buffers are on
  574. the t_iobuf_list queue.
  575. Wait for the buffers in reverse order. That way we are
  576. less likely to be woken up until all IOs have completed, and
  577. so we incur less scheduling load.
  578. */
  579. jbd_debug(3, "JBD: commit phase 4\n");
  580. /*
  581. * akpm: these are BJ_IO, and j_list_lock is not needed.
  582. * See __journal_try_to_free_buffer.
  583. */
  584. wait_for_iobuf:
  585. while (commit_transaction->t_iobuf_list != NULL) {
  586. struct buffer_head *bh;
  587. jh = commit_transaction->t_iobuf_list->b_tprev;
  588. bh = jh2bh(jh);
  589. if (buffer_locked(bh)) {
  590. wait_on_buffer(bh);
  591. goto wait_for_iobuf;
  592. }
  593. if (cond_resched())
  594. goto wait_for_iobuf;
  595. if (unlikely(!buffer_uptodate(bh)))
  596. err = -EIO;
  597. clear_buffer_jwrite(bh);
  598. JBUFFER_TRACE(jh, "ph4: unfile after journal write");
  599. jbd2_journal_unfile_buffer(journal, jh);
  600. /*
  601. * ->t_iobuf_list should contain only dummy buffer_heads
  602. * which were created by jbd2_journal_write_metadata_buffer().
  603. */
  604. BUFFER_TRACE(bh, "dumping temporary bh");
  605. jbd2_journal_put_journal_head(jh);
  606. __brelse(bh);
  607. J_ASSERT_BH(bh, atomic_read(&bh->b_count) == 0);
  608. free_buffer_head(bh);
  609. /* We also have to unlock and free the corresponding
  610. shadowed buffer */
  611. jh = commit_transaction->t_shadow_list->b_tprev;
  612. bh = jh2bh(jh);
  613. clear_bit(BH_JWrite, &bh->b_state);
  614. J_ASSERT_BH(bh, buffer_jbddirty(bh));
  615. /* The metadata is now released for reuse, but we need
  616. to remember it against this transaction so that when
  617. we finally commit, we can do any checkpointing
  618. required. */
  619. JBUFFER_TRACE(jh, "file as BJ_Forget");
  620. jbd2_journal_file_buffer(jh, commit_transaction, BJ_Forget);
  621. /* Wake up any transactions which were waiting for this
  622. IO to complete */
  623. wake_up_bit(&bh->b_state, BH_Unshadow);
  624. JBUFFER_TRACE(jh, "brelse shadowed buffer");
  625. __brelse(bh);
  626. }
  627. J_ASSERT (commit_transaction->t_shadow_list == NULL);
  628. jbd_debug(3, "JBD: commit phase 5\n");
  629. /* Here we wait for the revoke record and descriptor record buffers */
  630. wait_for_ctlbuf:
  631. while (commit_transaction->t_log_list != NULL) {
  632. struct buffer_head *bh;
  633. jh = commit_transaction->t_log_list->b_tprev;
  634. bh = jh2bh(jh);
  635. if (buffer_locked(bh)) {
  636. wait_on_buffer(bh);
  637. goto wait_for_ctlbuf;
  638. }
  639. if (cond_resched())
  640. goto wait_for_ctlbuf;
  641. if (unlikely(!buffer_uptodate(bh)))
  642. err = -EIO;
  643. BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile");
  644. clear_buffer_jwrite(bh);
  645. jbd2_journal_unfile_buffer(journal, jh);
  646. jbd2_journal_put_journal_head(jh);
  647. __brelse(bh); /* One for getblk */
  648. /* AKPM: bforget here */
  649. }
  650. jbd_debug(3, "JBD: commit phase 6\n");
  651. if (journal_write_commit_record(journal, commit_transaction))
  652. err = -EIO;
  653. if (err)
  654. __jbd2_journal_abort_hard(journal);
  655. /* End of a transaction! Finally, we can do checkpoint
  656. processing: any buffers committed as a result of this
  657. transaction can be removed from any checkpoint list it was on
  658. before. */
  659. jbd_debug(3, "JBD: commit phase 7\n");
  660. J_ASSERT(commit_transaction->t_sync_datalist == NULL);
  661. J_ASSERT(commit_transaction->t_buffers == NULL);
  662. J_ASSERT(commit_transaction->t_checkpoint_list == NULL);
  663. J_ASSERT(commit_transaction->t_iobuf_list == NULL);
  664. J_ASSERT(commit_transaction->t_shadow_list == NULL);
  665. J_ASSERT(commit_transaction->t_log_list == NULL);
  666. restart_loop:
  667. /*
  668. * As there are other places (journal_unmap_buffer()) adding buffers
  669. * to this list we have to be careful and hold the j_list_lock.
  670. */
  671. spin_lock(&journal->j_list_lock);
  672. while (commit_transaction->t_forget) {
  673. transaction_t *cp_transaction;
  674. struct buffer_head *bh;
  675. jh = commit_transaction->t_forget;
  676. spin_unlock(&journal->j_list_lock);
  677. bh = jh2bh(jh);
  678. jbd_lock_bh_state(bh);
  679. J_ASSERT_JH(jh, jh->b_transaction == commit_transaction ||
  680. jh->b_transaction == journal->j_running_transaction);
  681. /*
  682. * If there is undo-protected committed data against
  683. * this buffer, then we can remove it now. If it is a
  684. * buffer needing such protection, the old frozen_data
  685. * field now points to a committed version of the
  686. * buffer, so rotate that field to the new committed
  687. * data.
  688. *
  689. * Otherwise, we can just throw away the frozen data now.
  690. */
  691. if (jh->b_committed_data) {
  692. jbd2_slab_free(jh->b_committed_data, bh->b_size);
  693. jh->b_committed_data = NULL;
  694. if (jh->b_frozen_data) {
  695. jh->b_committed_data = jh->b_frozen_data;
  696. jh->b_frozen_data = NULL;
  697. }
  698. } else if (jh->b_frozen_data) {
  699. jbd2_slab_free(jh->b_frozen_data, bh->b_size);
  700. jh->b_frozen_data = NULL;
  701. }
  702. spin_lock(&journal->j_list_lock);
  703. cp_transaction = jh->b_cp_transaction;
  704. if (cp_transaction) {
  705. JBUFFER_TRACE(jh, "remove from old cp transaction");
  706. __jbd2_journal_remove_checkpoint(jh);
  707. }
  708. /* Only re-checkpoint the buffer_head if it is marked
  709. * dirty. If the buffer was added to the BJ_Forget list
  710. * by jbd2_journal_forget, it may no longer be dirty and
  711. * there's no point in keeping a checkpoint record for
  712. * it. */
  713. /* A buffer which has been freed while still being
  714. * journaled by a previous transaction may end up still
  715. * being dirty here, but we want to avoid writing back
  716. * that buffer in the future now that the last use has
  717. * been committed. That's not only a performance gain,
  718. * it also stops aliasing problems if the buffer is left
  719. * behind for writeback and gets reallocated for another
  720. * use in a different page. */
  721. if (buffer_freed(bh)) {
  722. clear_buffer_freed(bh);
  723. clear_buffer_jbddirty(bh);
  724. }
  725. if (buffer_jbddirty(bh)) {
  726. JBUFFER_TRACE(jh, "add to new checkpointing trans");
  727. __jbd2_journal_insert_checkpoint(jh, commit_transaction);
  728. JBUFFER_TRACE(jh, "refile for checkpoint writeback");
  729. __jbd2_journal_refile_buffer(jh);
  730. jbd_unlock_bh_state(bh);
  731. } else {
  732. J_ASSERT_BH(bh, !buffer_dirty(bh));
  733. /* The buffer on BJ_Forget list and not jbddirty means
  734. * it has been freed by this transaction and hence it
  735. * could not have been reallocated until this
  736. * transaction has committed. *BUT* it could be
  737. * reallocated once we have written all the data to
  738. * disk and before we process the buffer on BJ_Forget
  739. * list. */
  740. JBUFFER_TRACE(jh, "refile or unfile freed buffer");
  741. __jbd2_journal_refile_buffer(jh);
  742. if (!jh->b_transaction) {
  743. jbd_unlock_bh_state(bh);
  744. /* needs a brelse */
  745. jbd2_journal_remove_journal_head(bh);
  746. release_buffer_page(bh);
  747. } else
  748. jbd_unlock_bh_state(bh);
  749. }
  750. cond_resched_lock(&journal->j_list_lock);
  751. }
  752. spin_unlock(&journal->j_list_lock);
  753. /*
  754. * This is a bit sleazy. We borrow j_list_lock to protect
  755. * journal->j_committing_transaction in __jbd2_journal_remove_checkpoint.
  756. * Really, __jbd2_journal_remove_checkpoint should be using j_state_lock but
  757. * it's a bit hassle to hold that across __jbd2_journal_remove_checkpoint
  758. */
  759. spin_lock(&journal->j_state_lock);
  760. spin_lock(&journal->j_list_lock);
  761. /*
  762. * Now recheck if some buffers did not get attached to the transaction
  763. * while the lock was dropped...
  764. */
  765. if (commit_transaction->t_forget) {
  766. spin_unlock(&journal->j_list_lock);
  767. spin_unlock(&journal->j_state_lock);
  768. goto restart_loop;
  769. }
  770. /* Done with this transaction! */
  771. jbd_debug(3, "JBD: commit phase 8\n");
  772. J_ASSERT(commit_transaction->t_state == T_COMMIT);
  773. commit_transaction->t_state = T_FINISHED;
  774. J_ASSERT(commit_transaction == journal->j_committing_transaction);
  775. journal->j_commit_sequence = commit_transaction->t_tid;
  776. journal->j_committing_transaction = NULL;
  777. spin_unlock(&journal->j_state_lock);
  778. if (commit_transaction->t_checkpoint_list == NULL) {
  779. __jbd2_journal_drop_transaction(journal, commit_transaction);
  780. } else {
  781. if (journal->j_checkpoint_transactions == NULL) {
  782. journal->j_checkpoint_transactions = commit_transaction;
  783. commit_transaction->t_cpnext = commit_transaction;
  784. commit_transaction->t_cpprev = commit_transaction;
  785. } else {
  786. commit_transaction->t_cpnext =
  787. journal->j_checkpoint_transactions;
  788. commit_transaction->t_cpprev =
  789. commit_transaction->t_cpnext->t_cpprev;
  790. commit_transaction->t_cpnext->t_cpprev =
  791. commit_transaction;
  792. commit_transaction->t_cpprev->t_cpnext =
  793. commit_transaction;
  794. }
  795. }
  796. spin_unlock(&journal->j_list_lock);
  797. jbd_debug(1, "JBD: commit %d complete, head %d\n",
  798. journal->j_commit_sequence, journal->j_tail_sequence);
  799. wake_up(&journal->j_wait_done_commit);
  800. }