forked from Minki/linux
[PATCH] Fix JBD race in t_forget list handling
Fix race between journal_commit_transaction() and other places as journal_unmap_buffer() that are adding buffers to transaction's t_forget list. We have to protect against such places by holding j_list_lock even when traversing the t_forget list. The fact that other places can only add buffers to the list makes the locking easier. OTOH the lock ranking complicates the stuff... Signed-off-by: Jan Kara <jack@suse.cz> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
cbf0d27a13
commit
e6c9f5c188
@ -720,11 +720,17 @@ wait_for_iobuf:
|
|||||||
J_ASSERT(commit_transaction->t_log_list == NULL);
|
J_ASSERT(commit_transaction->t_log_list == NULL);
|
||||||
|
|
||||||
restart_loop:
|
restart_loop:
|
||||||
|
/*
|
||||||
|
* As there are other places (journal_unmap_buffer()) adding buffers
|
||||||
|
* to this list we have to be careful and hold the j_list_lock.
|
||||||
|
*/
|
||||||
|
spin_lock(&journal->j_list_lock);
|
||||||
while (commit_transaction->t_forget) {
|
while (commit_transaction->t_forget) {
|
||||||
transaction_t *cp_transaction;
|
transaction_t *cp_transaction;
|
||||||
struct buffer_head *bh;
|
struct buffer_head *bh;
|
||||||
|
|
||||||
jh = commit_transaction->t_forget;
|
jh = commit_transaction->t_forget;
|
||||||
|
spin_unlock(&journal->j_list_lock);
|
||||||
bh = jh2bh(jh);
|
bh = jh2bh(jh);
|
||||||
jbd_lock_bh_state(bh);
|
jbd_lock_bh_state(bh);
|
||||||
J_ASSERT_JH(jh, jh->b_transaction == commit_transaction ||
|
J_ASSERT_JH(jh, jh->b_transaction == commit_transaction ||
|
||||||
@ -792,8 +798,24 @@ restart_loop:
|
|||||||
journal_remove_journal_head(bh); /* needs a brelse */
|
journal_remove_journal_head(bh); /* needs a brelse */
|
||||||
release_buffer_page(bh);
|
release_buffer_page(bh);
|
||||||
}
|
}
|
||||||
|
cond_resched_lock(&journal->j_list_lock);
|
||||||
|
}
|
||||||
spin_unlock(&journal->j_list_lock);
|
spin_unlock(&journal->j_list_lock);
|
||||||
if (cond_resched())
|
/*
|
||||||
|
* This is a bit sleazy. We borrow j_list_lock to protect
|
||||||
|
* journal->j_committing_transaction in __journal_remove_checkpoint.
|
||||||
|
* Really, __journal_remove_checkpoint should be using j_state_lock but
|
||||||
|
* it's a bit hassle to hold that across __journal_remove_checkpoint
|
||||||
|
*/
|
||||||
|
spin_lock(&journal->j_state_lock);
|
||||||
|
spin_lock(&journal->j_list_lock);
|
||||||
|
/*
|
||||||
|
* Now recheck if some buffers did not get attached to the transaction
|
||||||
|
* while the lock was dropped...
|
||||||
|
*/
|
||||||
|
if (commit_transaction->t_forget) {
|
||||||
|
spin_unlock(&journal->j_list_lock);
|
||||||
|
spin_unlock(&journal->j_state_lock);
|
||||||
goto restart_loop;
|
goto restart_loop;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -803,14 +825,6 @@ restart_loop:
|
|||||||
|
|
||||||
J_ASSERT(commit_transaction->t_state == T_COMMIT);
|
J_ASSERT(commit_transaction->t_state == T_COMMIT);
|
||||||
|
|
||||||
/*
|
|
||||||
* This is a bit sleazy. We borrow j_list_lock to protect
|
|
||||||
* journal->j_committing_transaction in __journal_remove_checkpoint.
|
|
||||||
* Really, __jornal_remove_checkpoint should be using j_state_lock but
|
|
||||||
* it's a bit hassle to hold that across __journal_remove_checkpoint
|
|
||||||
*/
|
|
||||||
spin_lock(&journal->j_state_lock);
|
|
||||||
spin_lock(&journal->j_list_lock);
|
|
||||||
commit_transaction->t_state = T_FINISHED;
|
commit_transaction->t_state = T_FINISHED;
|
||||||
J_ASSERT(commit_transaction == journal->j_committing_transaction);
|
J_ASSERT(commit_transaction == journal->j_committing_transaction);
|
||||||
journal->j_commit_sequence = commit_transaction->t_tid;
|
journal->j_commit_sequence = commit_transaction->t_tid;
|
||||||
|
Loading…
Reference in New Issue
Block a user