Btrfs: use fastpath in extent state ops as much as possible

Fully utilize our extent state's new helper functions to use
fastpath as much as possible.

Signed-off-by: Liu Bo <liubo2009@cn.fujitsu.com>
Reviewed-by: Josef Bacik <josef@redhat.com>
This commit is contained in:
Liu Bo 2012-05-10 18:10:39 +08:00 committed by Josef Bacik
parent f8c5d0b443
commit d1ac6e41d5

View File

@ -569,10 +569,8 @@ hit_next:
if (err) if (err)
goto out; goto out;
if (state->end <= end) { if (state->end <= end) {
clear_state_bit(tree, state, &bits, wake); state = clear_state_bit(tree, state, &bits, wake);
if (last_end == (u64)-1) goto next;
goto out;
start = last_end + 1;
} }
goto search_again; goto search_again;
} }
@ -780,7 +778,6 @@ hit_next:
* Just lock what we found and keep going * Just lock what we found and keep going
*/ */
if (state->start == start && state->end <= end) { if (state->start == start && state->end <= end) {
struct rb_node *next_node;
if (state->state & exclusive_bits) { if (state->state & exclusive_bits) {
*failed_start = state->start; *failed_start = state->start;
err = -EEXIST; err = -EEXIST;
@ -788,20 +785,15 @@ hit_next:
} }
set_state_bits(tree, state, &bits); set_state_bits(tree, state, &bits);
cache_state(state, cached_state); cache_state(state, cached_state);
merge_state(tree, state); merge_state(tree, state);
if (last_end == (u64)-1) if (last_end == (u64)-1)
goto out; goto out;
start = last_end + 1; start = last_end + 1;
next_node = rb_next(&state->rb_node); state = next_state(state);
if (next_node && start < end && prealloc && !need_resched()) { if (start < end && state && state->start == start &&
state = rb_entry(next_node, struct extent_state, !need_resched())
rb_node); goto hit_next;
if (state->start == start)
goto hit_next;
}
goto search_again; goto search_again;
} }
@ -844,6 +836,10 @@ hit_next:
if (last_end == (u64)-1) if (last_end == (u64)-1)
goto out; goto out;
start = last_end + 1; start = last_end + 1;
state = next_state(state);
if (start < end && state && state->start == start &&
!need_resched())
goto hit_next;
} }
goto search_again; goto search_again;
} }
@ -993,21 +989,14 @@ hit_next:
* Just lock what we found and keep going * Just lock what we found and keep going
*/ */
if (state->start == start && state->end <= end) { if (state->start == start && state->end <= end) {
struct rb_node *next_node;
set_state_bits(tree, state, &bits); set_state_bits(tree, state, &bits);
clear_state_bit(tree, state, &clear_bits, 0); state = clear_state_bit(tree, state, &clear_bits, 0);
if (last_end == (u64)-1) if (last_end == (u64)-1)
goto out; goto out;
start = last_end + 1; start = last_end + 1;
next_node = rb_next(&state->rb_node); if (start < end && state && state->start == start &&
if (next_node && start < end && prealloc && !need_resched()) { !need_resched())
state = rb_entry(next_node, struct extent_state, goto hit_next;
rb_node);
if (state->start == start)
goto hit_next;
}
goto search_again; goto search_again;
} }
@ -1041,10 +1030,13 @@ hit_next:
goto out; goto out;
if (state->end <= end) { if (state->end <= end) {
set_state_bits(tree, state, &bits); set_state_bits(tree, state, &bits);
clear_state_bit(tree, state, &clear_bits, 0); state = clear_state_bit(tree, state, &clear_bits, 0);
if (last_end == (u64)-1) if (last_end == (u64)-1)
goto out; goto out;
start = last_end + 1; start = last_end + 1;
if (start < end && state && state->start == start &&
!need_resched())
goto hit_next;
} }
goto search_again; goto search_again;
} }