btrfs: cleanup extent_op handling

The cleanup_extent_op function actually would run the extent_op if it
needed running, which made the name sort of a misnomer.  Change it to
run_and_cleanup_extent_op, and move the actual cleanup work to
cleanup_extent_op so it can be used by check_ref_cleanup() in order to
unify the extent op handling.

Reviewed-by: Lu Fengqi <lufq.fnst@cn.fujitsu.com>
Signed-off-by: Josef Bacik <jbacik@fb.com>
Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
Josef Bacik 2018-12-03 10:20:31 -05:00 committed by David Sterba
parent 07c47775f4
commit bedc661760

View File

@ -2424,19 +2424,32 @@ static void unselect_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_ref
btrfs_delayed_ref_unlock(head); btrfs_delayed_ref_unlock(head);
} }
static int cleanup_extent_op(struct btrfs_trans_handle *trans, static struct btrfs_delayed_extent_op *cleanup_extent_op(
struct btrfs_delayed_ref_head *head) struct btrfs_delayed_ref_head *head)
{ {
struct btrfs_delayed_extent_op *extent_op = head->extent_op; struct btrfs_delayed_extent_op *extent_op = head->extent_op;
if (!extent_op)
return NULL;
if (head->must_insert_reserved) {
head->extent_op = NULL;
btrfs_free_delayed_extent_op(extent_op);
return NULL;
}
return extent_op;
}
static int run_and_cleanup_extent_op(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_head *head)
{
struct btrfs_delayed_extent_op *extent_op;
int ret; int ret;
extent_op = cleanup_extent_op(head);
if (!extent_op) if (!extent_op)
return 0; return 0;
head->extent_op = NULL; head->extent_op = NULL;
if (head->must_insert_reserved) {
btrfs_free_delayed_extent_op(extent_op);
return 0;
}
spin_unlock(&head->lock); spin_unlock(&head->lock);
ret = run_delayed_extent_op(trans, head, extent_op); ret = run_delayed_extent_op(trans, head, extent_op);
btrfs_free_delayed_extent_op(extent_op); btrfs_free_delayed_extent_op(extent_op);
@ -2488,7 +2501,7 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans,
delayed_refs = &trans->transaction->delayed_refs; delayed_refs = &trans->transaction->delayed_refs;
ret = cleanup_extent_op(trans, head); ret = run_and_cleanup_extent_op(trans, head);
if (ret < 0) { if (ret < 0) {
unselect_delayed_ref_head(delayed_refs, head); unselect_delayed_ref_head(delayed_refs, head);
btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret); btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
@ -6980,12 +6993,8 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
if (!RB_EMPTY_ROOT(&head->ref_tree.rb_root)) if (!RB_EMPTY_ROOT(&head->ref_tree.rb_root))
goto out; goto out;
if (head->extent_op) { if (cleanup_extent_op(head) != NULL)
if (!head->must_insert_reserved) goto out;
goto out;
btrfs_free_delayed_extent_op(head->extent_op);
head->extent_op = NULL;
}
/* /*
* waiting for the lock here would deadlock. If someone else has it * waiting for the lock here would deadlock. If someone else has it