|
@@ -287,6 +287,7 @@ int journal_write_metadata_buffer(transaction_t *transaction,
|
|
|
struct page *new_page;
|
|
|
unsigned int new_offset;
|
|
|
struct buffer_head *bh_in = jh2bh(jh_in);
|
|
|
+ journal_t *journal = transaction->t_journal;
|
|
|
|
|
|
/*
|
|
|
* The buffer really shouldn't be locked: only the current committing
|
|
@@ -300,6 +301,11 @@ int journal_write_metadata_buffer(transaction_t *transaction,
|
|
|
J_ASSERT_BH(bh_in, buffer_jbddirty(bh_in));
|
|
|
|
|
|
new_bh = alloc_buffer_head(GFP_NOFS|__GFP_NOFAIL);
|
|
|
+ /* keep subsequent assertions sane */
|
|
|
+ new_bh->b_state = 0;
|
|
|
+ init_buffer(new_bh, NULL, NULL);
|
|
|
+ atomic_set(&new_bh->b_count, 1);
|
|
|
+ new_jh = journal_add_journal_head(new_bh); /* This sleeps */
|
|
|
|
|
|
/*
|
|
|
* If a new transaction has already done a buffer copy-out, then
|
|
@@ -361,14 +367,6 @@ repeat:
|
|
|
kunmap_atomic(mapped_data, KM_USER0);
|
|
|
}
|
|
|
|
|
|
- /* keep subsequent assertions sane */
|
|
|
- new_bh->b_state = 0;
|
|
|
- init_buffer(new_bh, NULL, NULL);
|
|
|
- atomic_set(&new_bh->b_count, 1);
|
|
|
- jbd_unlock_bh_state(bh_in);
|
|
|
-
|
|
|
- new_jh = journal_add_journal_head(new_bh); /* This sleeps */
|
|
|
-
|
|
|
set_bh_page(new_bh, new_page, new_offset);
|
|
|
new_jh->b_transaction = NULL;
|
|
|
new_bh->b_size = jh2bh(jh_in)->b_size;
|
|
@@ -385,7 +383,11 @@ repeat:
|
|
|
* copying is moved to the transaction's shadow queue.
|
|
|
*/
|
|
|
JBUFFER_TRACE(jh_in, "file as BJ_Shadow");
|
|
|
- journal_file_buffer(jh_in, transaction, BJ_Shadow);
|
|
|
+ spin_lock(&journal->j_list_lock);
|
|
|
+ __journal_file_buffer(jh_in, transaction, BJ_Shadow);
|
|
|
+ spin_unlock(&journal->j_list_lock);
|
|
|
+ jbd_unlock_bh_state(bh_in);
|
|
|
+
|
|
|
JBUFFER_TRACE(new_jh, "file as BJ_IO");
|
|
|
journal_file_buffer(new_jh, transaction, BJ_IO);
|
|
|
|