forked from Minki/linux
writeback: improve scalability of bdi writeback work queues
If you're going to do an atomic RMW on each list entry, there's not much point in all the RCU complexities of the list walking. This is only going to help the multi-thread case I guess, but it doesn't hurt to do now. Signed-off-by: Nick Piggin <npiggin@suse.de> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
This commit is contained in:
parent
deed62edff
commit
77fad5e625
@ -772,8 +772,9 @@ static struct bdi_work *get_next_work_item(struct backing_dev_info *bdi,
|
||||
rcu_read_lock();
|
||||
|
||||
list_for_each_entry_rcu(work, &bdi->work_list, list) {
|
||||
if (!test_and_clear_bit(wb->nr, &work->seen))
|
||||
if (!test_bit(wb->nr, &work->seen))
|
||||
continue;
|
||||
clear_bit(wb->nr, &work->seen);
|
||||
|
||||
ret = work;
|
||||
break;
|
||||
|
Loading…
Reference in New Issue
Block a user