mirror of
https://github.com/torvalds/linux.git
synced 2024-11-23 20:51:44 +00:00
58ab5e32e6
Ming reports that lockdep spews the following trace. What this essentially says is that the sbitmap swap_lock was used inconsistently in IRQ enabled and disabled context, and that is usually indicative of a bug that will cause a deadlock. For this case, it's a false positive. The swap_lock is used from process context only, when we swap the bits in the word and cleared mask. We also end up doing that when we are getting a driver tag, from the blk_mq_mark_tag_wait(), and from there we hold the waitqueue lock with IRQs disabled. However, this isn't from an actual IRQ, it's still process context. In lieu of a better way to fix this, simply always disable interrupts when grabbing the swap_lock if lockdep is enabled. [ 100.967642] ================start test sanity/001================ [ 101.238280] null: module loaded [ 106.093735] [ 106.094012] ===================================================== [ 106.094854] WARNING: SOFTIRQ-safe -> SOFTIRQ-unsafe lock order detected [ 106.095759] 4.20.0-rc3_5d2ee7122c73_for-next+ #1 Not tainted [ 106.096551] ----------------------------------------------------- [ 106.097386] fio/1043 [HC0[0]:SC0[0]:HE0:SE1] is trying to acquire: [ 106.098231] 000000004c43fa71 (&(&sb->map[i].swap_lock)->rlock){+.+.}, at: sbitmap_get+0xd5/0x22c [ 106.099431] [ 106.099431] and this task is already holding: [ 106.100229] 000000007eec8b2f (&(&hctx->dispatch_wait_lock)->rlock){....}, at: blk_mq_dispatch_rq_list+0x4c1/0xd7c [ 106.101630] which would create a new lock dependency: [ 106.102326] (&(&hctx->dispatch_wait_lock)->rlock){....} -> (&(&sb->map[i].swap_lock)->rlock){+.+.} [ 106.103553] [ 106.103553] but this new dependency connects a SOFTIRQ-irq-safe lock: [ 106.104580] (&sbq->ws[i].wait){..-.} [ 106.104582] [ 106.104582] ... which became SOFTIRQ-irq-safe at: [ 106.105751] _raw_spin_lock_irqsave+0x4b/0x82 [ 106.106284] __wake_up_common_lock+0x119/0x1b9 [ 106.106825] sbitmap_queue_wake_up+0x33f/0x383 [ 106.107456] sbitmap_queue_clear+0x4c/0x9a [ 106.108046] __blk_mq_free_request+0x188/0x1d3 [ 106.108581] blk_mq_free_request+0x23b/0x26b [ 106.109102] scsi_end_request+0x345/0x5d7 [ 106.109587] scsi_io_completion+0x4b5/0x8f0 [ 106.110099] scsi_finish_command+0x412/0x456 [ 106.110615] scsi_softirq_done+0x23f/0x29b [ 106.111115] blk_done_softirq+0x2a7/0x2e6 [ 106.111608] __do_softirq+0x360/0x6ad [ 106.112062] run_ksoftirqd+0x2f/0x5b [ 106.112499] smpboot_thread_fn+0x3a5/0x3db [ 106.113000] kthread+0x1d4/0x1e4 [ 106.113457] ret_from_fork+0x3a/0x50 [ 106.113969] [ 106.113969] to a SOFTIRQ-irq-unsafe lock: [ 106.114672] (&(&sb->map[i].swap_lock)->rlock){+.+.} [ 106.114674] [ 106.114674] ... which became SOFTIRQ-irq-unsafe at: [ 106.116000] ... [ 106.116003] _raw_spin_lock+0x33/0x64 [ 106.116676] sbitmap_get+0xd5/0x22c [ 106.117134] __sbitmap_queue_get+0xe8/0x177 [ 106.117731] __blk_mq_get_tag+0x1e6/0x22d [ 106.118286] blk_mq_get_tag+0x1db/0x6e4 [ 106.118756] blk_mq_get_driver_tag+0x161/0x258 [ 106.119383] blk_mq_dispatch_rq_list+0x28e/0xd7c [ 106.120043] blk_mq_do_dispatch_sched+0x23a/0x287 [ 106.120607] blk_mq_sched_dispatch_requests+0x379/0x3fc [ 106.121234] __blk_mq_run_hw_queue+0x137/0x17e [ 106.121781] __blk_mq_delay_run_hw_queue+0x80/0x25f [ 106.122366] blk_mq_run_hw_queue+0x151/0x187 [ 106.122887] blk_mq_sched_insert_requests+0x13f/0x175 [ 106.123492] blk_mq_flush_plug_list+0x7d6/0x81b [ 106.124042] blk_flush_plug_list+0x392/0x3d7 [ 106.124557] blk_finish_plug+0x37/0x4f [ 106.125019] read_pages+0x3ef/0x430 [ 106.125446] __do_page_cache_readahead+0x18e/0x2fc [ 106.126027] force_page_cache_readahead+0x121/0x133 [ 106.126621] page_cache_sync_readahead+0x35f/0x3bb [ 106.127229] generic_file_buffered_read+0x410/0x1860 [ 106.127932] __vfs_read+0x319/0x38f [ 106.128415] vfs_read+0xd2/0x19a [ 106.128817] ksys_read+0xb9/0x135 [ 106.129225] do_syscall_64+0x140/0x385 [ 106.129684] entry_SYSCALL_64_after_hwframe+0x49/0xbe [ 106.130292] [ 106.130292] other info that might help us debug this: [ 106.130292] [ 106.131226] Chain exists of: [ 106.131226] &sbq->ws[i].wait --> &(&hctx->dispatch_wait_lock)->rlock --> &(&sb->map[i].swap_lock)->rlock [ 106.131226] [ 106.132865] Possible interrupt unsafe locking scenario: [ 106.132865] [ 106.133659] CPU0 CPU1 [ 106.134194] ---- ---- [ 106.134733] lock(&(&sb->map[i].swap_lock)->rlock); [ 106.135318] local_irq_disable(); [ 106.136014] lock(&sbq->ws[i].wait); [ 106.136747] lock(&(&hctx->dispatch_wait_lock)->rlock); [ 106.137742] <Interrupt> [ 106.138110] lock(&sbq->ws[i].wait); [ 106.138625] [ 106.138625] *** DEADLOCK *** [ 106.138625] [ 106.139430] 3 locks held by fio/1043: [ 106.139947] #0: 0000000076ff0fd9 (rcu_read_lock){....}, at: hctx_lock+0x29/0xe8 [ 106.140813] #1: 000000002feb1016 (&sbq->ws[i].wait){..-.}, at: blk_mq_dispatch_rq_list+0x4ad/0xd7c [ 106.141877] #2: 000000007eec8b2f (&(&hctx->dispatch_wait_lock)->rlock){....}, at: blk_mq_dispatch_rq_list+0x4c1/0xd7c [ 106.143267] [ 106.143267] the dependencies between SOFTIRQ-irq-safe lock and the holding lock: [ 106.144351] -> (&sbq->ws[i].wait){..-.} ops: 82 { [ 106.144926] IN-SOFTIRQ-W at: [ 106.145314] _raw_spin_lock_irqsave+0x4b/0x82 [ 106.146042] __wake_up_common_lock+0x119/0x1b9 [ 106.146785] sbitmap_queue_wake_up+0x33f/0x383 [ 106.147567] sbitmap_queue_clear+0x4c/0x9a [ 106.148379] __blk_mq_free_request+0x188/0x1d3 [ 106.149148] blk_mq_free_request+0x23b/0x26b [ 106.149864] scsi_end_request+0x345/0x5d7 [ 106.150546] scsi_io_completion+0x4b5/0x8f0 [ 106.151367] scsi_finish_command+0x412/0x456 [ 106.152157] scsi_softirq_done+0x23f/0x29b [ 106.152855] blk_done_softirq+0x2a7/0x2e6 [ 106.153537] __do_softirq+0x360/0x6ad [ 106.154280] run_ksoftirqd+0x2f/0x5b [ 106.155020] smpboot_thread_fn+0x3a5/0x3db [ 106.155828] kthread+0x1d4/0x1e4 [ 106.156526] ret_from_fork+0x3a/0x50 [ 106.157267] INITIAL USE at: [ 106.157713] _raw_spin_lock_irqsave+0x4b/0x82 [ 106.158542] prepare_to_wait_exclusive+0xa8/0x215 [ 106.159421] blk_mq_get_tag+0x34f/0x6e4 [ 106.160186] blk_mq_get_request+0x48e/0xaef [ 106.160997] blk_mq_make_request+0x27e/0xbd2 [ 106.161828] generic_make_request+0x4d1/0x873 [ 106.162661] submit_bio+0x20c/0x253 [ 106.163379] mpage_bio_submit+0x44/0x4b [ 106.164142] mpage_readpages+0x3c2/0x407 [ 106.164919] read_pages+0x13a/0x430 [ 106.165633] __do_page_cache_readahead+0x18e/0x2fc [ 106.166530] force_page_cache_readahead+0x121/0x133 [ 106.167439] page_cache_sync_readahead+0x35f/0x3bb [ 106.168337] generic_file_buffered_read+0x410/0x1860 [ 106.169255] __vfs_read+0x319/0x38f [ 106.169977] vfs_read+0xd2/0x19a [ 106.170662] ksys_read+0xb9/0x135 [ 106.171356] do_syscall_64+0x140/0x385 [ 106.172120] entry_SYSCALL_64_after_hwframe+0x49/0xbe [ 106.173051] } [ 106.173308] ... key at: [<ffffffff85094600>] __key.26481+0x0/0x40 [ 106.174219] ... acquired at: [ 106.174646] _raw_spin_lock+0x33/0x64 [ 106.175183] blk_mq_dispatch_rq_list+0x4c1/0xd7c [ 106.175843] blk_mq_do_dispatch_sched+0x23a/0x287 [ 106.176518] blk_mq_sched_dispatch_requests+0x379/0x3fc [ 106.177262] __blk_mq_run_hw_queue+0x137/0x17e [ 106.177900] __blk_mq_delay_run_hw_queue+0x80/0x25f [ 106.178591] blk_mq_run_hw_queue+0x151/0x187 [ 106.179207] blk_mq_sched_insert_requests+0x13f/0x175 [ 106.179926] blk_mq_flush_plug_list+0x7d6/0x81b [ 106.180571] blk_flush_plug_list+0x392/0x3d7 [ 106.181187] blk_finish_plug+0x37/0x4f [ 106.181737] __se_sys_io_submit+0x171/0x304 [ 106.182346] do_syscall_64+0x140/0x385 [ 106.182895] entry_SYSCALL_64_after_hwframe+0x49/0xbe [ 106.183607] [ 106.183830] -> (&(&hctx->dispatch_wait_lock)->rlock){....} ops: 1 { [ 106.184691] INITIAL USE at: [ 106.185119] _raw_spin_lock+0x33/0x64 [ 106.185838] blk_mq_dispatch_rq_list+0x4c1/0xd7c [ 106.186697] blk_mq_do_dispatch_sched+0x23a/0x287 [ 106.187551] blk_mq_sched_dispatch_requests+0x379/0x3fc [ 106.188481] __blk_mq_run_hw_queue+0x137/0x17e [ 106.189307] __blk_mq_delay_run_hw_queue+0x80/0x25f [ 106.190189] blk_mq_run_hw_queue+0x151/0x187 [ 106.190989] blk_mq_sched_insert_requests+0x13f/0x175 [ 106.191902] blk_mq_flush_plug_list+0x7d6/0x81b [ 106.192739] blk_flush_plug_list+0x392/0x3d7 [ 106.193535] blk_finish_plug+0x37/0x4f [ 106.194269] __se_sys_io_submit+0x171/0x304 [ 106.195059] do_syscall_64+0x140/0x385 [ 106.195794] entry_SYSCALL_64_after_hwframe+0x49/0xbe [ 106.196705] } [ 106.196950] ... key at: [<ffffffff84880620>] __key.51231+0x0/0x40 [ 106.197853] ... acquired at: [ 106.198270] lock_acquire+0x280/0x2f3 [ 106.198806] _raw_spin_lock+0x33/0x64 [ 106.199337] sbitmap_get+0xd5/0x22c [ 106.199850] __sbitmap_queue_get+0xe8/0x177 [ 106.200450] __blk_mq_get_tag+0x1e6/0x22d [ 106.201035] blk_mq_get_tag+0x1db/0x6e4 [ 106.201589] blk_mq_get_driver_tag+0x161/0x258 [ 106.202237] blk_mq_dispatch_rq_list+0x5b9/0xd7c [ 106.202902] blk_mq_do_dispatch_sched+0x23a/0x287 [ 106.203572] blk_mq_sched_dispatch_requests+0x379/0x3fc [ 106.204316] __blk_mq_run_hw_queue+0x137/0x17e [ 106.204956] __blk_mq_delay_run_hw_queue+0x80/0x25f [ 106.205649] blk_mq_run_hw_queue+0x151/0x187 [ 106.206269] blk_mq_sched_insert_requests+0x13f/0x175 [ 106.206997] blk_mq_flush_plug_list+0x7d6/0x81b [ 106.207644] blk_flush_plug_list+0x392/0x3d7 [ 106.208264] blk_finish_plug+0x37/0x4f [ 106.208814] __se_sys_io_submit+0x171/0x304 [ 106.209415] do_syscall_64+0x140/0x385 [ 106.209965] entry_SYSCALL_64_after_hwframe+0x49/0xbe [ 106.210684] [ 106.210904] [ 106.210904] the dependencies between the lock to be acquired [ 106.210905] and SOFTIRQ-irq-unsafe lock: [ 106.212541] -> (&(&sb->map[i].swap_lock)->rlock){+.+.} ops: 1969 { [ 106.213393] HARDIRQ-ON-W at: [ 106.213840] _raw_spin_lock+0x33/0x64 [ 106.214570] sbitmap_get+0xd5/0x22c [ 106.215282] __sbitmap_queue_get+0xe8/0x177 [ 106.216086] __blk_mq_get_tag+0x1e6/0x22d [ 106.216876] blk_mq_get_tag+0x1db/0x6e4 [ 106.217627] blk_mq_get_driver_tag+0x161/0x258 [ 106.218465] blk_mq_dispatch_rq_list+0x28e/0xd7c [ 106.219326] blk_mq_do_dispatch_sched+0x23a/0x287 [ 106.220198] blk_mq_sched_dispatch_requests+0x379/0x3fc [ 106.221138] __blk_mq_run_hw_queue+0x137/0x17e [ 106.221975] __blk_mq_delay_run_hw_queue+0x80/0x25f [ 106.222874] blk_mq_run_hw_queue+0x151/0x187 [ 106.223686] blk_mq_sched_insert_requests+0x13f/0x175 [ 106.224597] blk_mq_flush_plug_list+0x7d6/0x81b [ 106.225444] blk_flush_plug_list+0x392/0x3d7 [ 106.226255] blk_finish_plug+0x37/0x4f [ 106.227006] read_pages+0x3ef/0x430 [ 106.227717] __do_page_cache_readahead+0x18e/0x2fc [ 106.228595] force_page_cache_readahead+0x121/0x133 [ 106.229491] page_cache_sync_readahead+0x35f/0x3bb [ 106.230373] generic_file_buffered_read+0x410/0x1860 [ 106.231277] __vfs_read+0x319/0x38f [ 106.231986] vfs_read+0xd2/0x19a [ 106.232666] ksys_read+0xb9/0x135 [ 106.233350] do_syscall_64+0x140/0x385 [ 106.234097] entry_SYSCALL_64_after_hwframe+0x49/0xbe [ 106.235012] SOFTIRQ-ON-W at: [ 106.235460] _raw_spin_lock+0x33/0x64 [ 106.236195] sbitmap_get+0xd5/0x22c [ 106.236913] __sbitmap_queue_get+0xe8/0x177 [ 106.237715] __blk_mq_get_tag+0x1e6/0x22d [ 106.238488] blk_mq_get_tag+0x1db/0x6e4 [ 106.239244] blk_mq_get_driver_tag+0x161/0x258 [ 106.240079] blk_mq_dispatch_rq_list+0x28e/0xd7c [ 106.240937] blk_mq_do_dispatch_sched+0x23a/0x287 [ 106.241806] blk_mq_sched_dispatch_requests+0x379/0x3fc [ 106.242751] __blk_mq_run_hw_queue+0x137/0x17e [ 106.243579] __blk_mq_delay_run_hw_queue+0x80/0x25f [ 106.244469] blk_mq_run_hw_queue+0x151/0x187 [ 106.245277] blk_mq_sched_insert_requests+0x13f/0x175 [ 106.246191] blk_mq_flush_plug_list+0x7d6/0x81b [ 106.247044] blk_flush_plug_list+0x392/0x3d7 [ 106.247859] blk_finish_plug+0x37/0x4f [ 106.248749] read_pages+0x3ef/0x430 [ 106.249463] __do_page_cache_readahead+0x18e/0x2fc [ 106.250357] force_page_cache_readahead+0x121/0x133 [ 106.251263] page_cache_sync_readahead+0x35f/0x3bb [ 106.252157] generic_file_buffered_read+0x410/0x1860 [ 106.253084] __vfs_read+0x319/0x38f [ 106.253808] vfs_read+0xd2/0x19a [ 106.254488] ksys_read+0xb9/0x135 [ 106.255186] do_syscall_64+0x140/0x385 [ 106.255943] entry_SYSCALL_64_after_hwframe+0x49/0xbe [ 106.256867] INITIAL USE at: [ 106.257300] _raw_spin_lock+0x33/0x64 [ 106.258033] sbitmap_get+0xd5/0x22c [ 106.258747] __sbitmap_queue_get+0xe8/0x177 [ 106.259542] __blk_mq_get_tag+0x1e6/0x22d [ 106.260320] blk_mq_get_tag+0x1db/0x6e4 [ 106.261072] blk_mq_get_driver_tag+0x161/0x258 [ 106.261902] blk_mq_dispatch_rq_list+0x28e/0xd7c [ 106.262762] blk_mq_do_dispatch_sched+0x23a/0x287 [ 106.263626] blk_mq_sched_dispatch_requests+0x379/0x3fc [ 106.264571] __blk_mq_run_hw_queue+0x137/0x17e [ 106.265409] __blk_mq_delay_run_hw_queue+0x80/0x25f [ 106.266302] blk_mq_run_hw_queue+0x151/0x187 [ 106.267111] blk_mq_sched_insert_requests+0x13f/0x175 [ 106.268028] blk_mq_flush_plug_list+0x7d6/0x81b [ 106.268878] blk_flush_plug_list+0x392/0x3d7 [ 106.269694] blk_finish_plug+0x37/0x4f [ 106.270432] read_pages+0x3ef/0x430 [ 106.271139] __do_page_cache_readahead+0x18e/0x2fc [ 106.272040] force_page_cache_readahead+0x121/0x133 [ 106.272932] page_cache_sync_readahead+0x35f/0x3bb [ 106.273811] generic_file_buffered_read+0x410/0x1860 [ 106.274709] __vfs_read+0x319/0x38f [ 106.275407] vfs_read+0xd2/0x19a [ 106.276074] ksys_read+0xb9/0x135 [ 106.276764] do_syscall_64+0x140/0x385 [ 106.277500] entry_SYSCALL_64_after_hwframe+0x49/0xbe [ 106.278417] } [ 106.278676] ... key at: [<ffffffff85094640>] __key.26212+0x0/0x40 [ 106.279586] ... acquired at: [ 106.280026] lock_acquire+0x280/0x2f3 [ 106.280559] _raw_spin_lock+0x33/0x64 [ 106.281101] sbitmap_get+0xd5/0x22c [ 106.281610] __sbitmap_queue_get+0xe8/0x177 [ 106.282221] __blk_mq_get_tag+0x1e6/0x22d [ 106.282809] blk_mq_get_tag+0x1db/0x6e4 [ 106.283368] blk_mq_get_driver_tag+0x161/0x258 [ 106.284018] blk_mq_dispatch_rq_list+0x5b9/0xd7c [ 106.284685] blk_mq_do_dispatch_sched+0x23a/0x287 [ 106.285371] blk_mq_sched_dispatch_requests+0x379/0x3fc [ 106.286135] __blk_mq_run_hw_queue+0x137/0x17e [ 106.286806] __blk_mq_delay_run_hw_queue+0x80/0x25f [ 106.287515] blk_mq_run_hw_queue+0x151/0x187 [ 106.288149] blk_mq_sched_insert_requests+0x13f/0x175 [ 106.289041] blk_mq_flush_plug_list+0x7d6/0x81b [ 106.289912] blk_flush_plug_list+0x392/0x3d7 [ 106.290590] blk_finish_plug+0x37/0x4f [ 106.291238] __se_sys_io_submit+0x171/0x304 [ 106.291864] do_syscall_64+0x140/0x385 [ 106.292534] entry_SYSCALL_64_after_hwframe+0x49/0xbe Reported-by: Ming Lei <ming.lei@redhat.com> Tested-by: Guenter Roeck <linux@roeck-us.net> Signed-off-by: Jens Axboe <axboe@kernel.dk>
688 lines
16 KiB
C
688 lines
16 KiB
C
/*
|
|
* Copyright (C) 2016 Facebook
|
|
* Copyright (C) 2013-2014 Jens Axboe
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public
|
|
* License v2 as published by the Free Software Foundation.
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
* General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program. If not, see <https://www.gnu.org/licenses/>.
|
|
*/
|
|
|
|
#include <linux/sched.h>
|
|
#include <linux/random.h>
|
|
#include <linux/sbitmap.h>
|
|
#include <linux/seq_file.h>
|
|
|
|
int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
|
|
gfp_t flags, int node)
|
|
{
|
|
unsigned int bits_per_word;
|
|
unsigned int i;
|
|
|
|
if (shift < 0) {
|
|
shift = ilog2(BITS_PER_LONG);
|
|
/*
|
|
* If the bitmap is small, shrink the number of bits per word so
|
|
* we spread over a few cachelines, at least. If less than 4
|
|
* bits, just forget about it, it's not going to work optimally
|
|
* anyway.
|
|
*/
|
|
if (depth >= 4) {
|
|
while ((4U << shift) > depth)
|
|
shift--;
|
|
}
|
|
}
|
|
bits_per_word = 1U << shift;
|
|
if (bits_per_word > BITS_PER_LONG)
|
|
return -EINVAL;
|
|
|
|
sb->shift = shift;
|
|
sb->depth = depth;
|
|
sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
|
|
|
|
if (depth == 0) {
|
|
sb->map = NULL;
|
|
return 0;
|
|
}
|
|
|
|
sb->map = kcalloc_node(sb->map_nr, sizeof(*sb->map), flags, node);
|
|
if (!sb->map)
|
|
return -ENOMEM;
|
|
|
|
for (i = 0; i < sb->map_nr; i++) {
|
|
sb->map[i].depth = min(depth, bits_per_word);
|
|
depth -= sb->map[i].depth;
|
|
spin_lock_init(&sb->map[i].swap_lock);
|
|
}
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL_GPL(sbitmap_init_node);
|
|
|
|
void sbitmap_resize(struct sbitmap *sb, unsigned int depth)
|
|
{
|
|
unsigned int bits_per_word = 1U << sb->shift;
|
|
unsigned int i;
|
|
|
|
sb->depth = depth;
|
|
sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
|
|
|
|
for (i = 0; i < sb->map_nr; i++) {
|
|
sb->map[i].depth = min(depth, bits_per_word);
|
|
depth -= sb->map[i].depth;
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(sbitmap_resize);
|
|
|
|
static int __sbitmap_get_word(unsigned long *word, unsigned long depth,
|
|
unsigned int hint, bool wrap)
|
|
{
|
|
unsigned int orig_hint = hint;
|
|
int nr;
|
|
|
|
while (1) {
|
|
nr = find_next_zero_bit(word, depth, hint);
|
|
if (unlikely(nr >= depth)) {
|
|
/*
|
|
* We started with an offset, and we didn't reset the
|
|
* offset to 0 in a failure case, so start from 0 to
|
|
* exhaust the map.
|
|
*/
|
|
if (orig_hint && hint && wrap) {
|
|
hint = orig_hint = 0;
|
|
continue;
|
|
}
|
|
return -1;
|
|
}
|
|
|
|
if (!test_and_set_bit_lock(nr, word))
|
|
break;
|
|
|
|
hint = nr + 1;
|
|
if (hint >= depth - 1)
|
|
hint = 0;
|
|
}
|
|
|
|
return nr;
|
|
}
|
|
|
|
/*
|
|
* See if we have deferred clears that we can batch move
|
|
*/
|
|
static inline bool sbitmap_deferred_clear(struct sbitmap *sb, int index)
|
|
{
|
|
unsigned long mask, val;
|
|
unsigned long __maybe_unused flags;
|
|
bool ret = false;
|
|
|
|
/* Silence bogus lockdep warning */
|
|
#if defined(CONFIG_LOCKDEP)
|
|
local_irq_save(flags);
|
|
#endif
|
|
spin_lock(&sb->map[index].swap_lock);
|
|
|
|
if (!sb->map[index].cleared)
|
|
goto out_unlock;
|
|
|
|
/*
|
|
* First get a stable cleared mask, setting the old mask to 0.
|
|
*/
|
|
do {
|
|
mask = sb->map[index].cleared;
|
|
} while (cmpxchg(&sb->map[index].cleared, mask, 0) != mask);
|
|
|
|
/*
|
|
* Now clear the masked bits in our free word
|
|
*/
|
|
do {
|
|
val = sb->map[index].word;
|
|
} while (cmpxchg(&sb->map[index].word, val, val & ~mask) != val);
|
|
|
|
ret = true;
|
|
out_unlock:
|
|
spin_unlock(&sb->map[index].swap_lock);
|
|
#if defined(CONFIG_LOCKDEP)
|
|
local_irq_restore(flags);
|
|
#endif
|
|
return ret;
|
|
}
|
|
|
|
static int sbitmap_find_bit_in_index(struct sbitmap *sb, int index,
|
|
unsigned int alloc_hint, bool round_robin)
|
|
{
|
|
int nr;
|
|
|
|
do {
|
|
nr = __sbitmap_get_word(&sb->map[index].word,
|
|
sb->map[index].depth, alloc_hint,
|
|
!round_robin);
|
|
if (nr != -1)
|
|
break;
|
|
if (!sbitmap_deferred_clear(sb, index))
|
|
break;
|
|
} while (1);
|
|
|
|
return nr;
|
|
}
|
|
|
|
int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin)
|
|
{
|
|
unsigned int i, index;
|
|
int nr = -1;
|
|
|
|
index = SB_NR_TO_INDEX(sb, alloc_hint);
|
|
|
|
/*
|
|
* Unless we're doing round robin tag allocation, just use the
|
|
* alloc_hint to find the right word index. No point in looping
|
|
* twice in find_next_zero_bit() for that case.
|
|
*/
|
|
if (round_robin)
|
|
alloc_hint = SB_NR_TO_BIT(sb, alloc_hint);
|
|
else
|
|
alloc_hint = 0;
|
|
|
|
for (i = 0; i < sb->map_nr; i++) {
|
|
nr = sbitmap_find_bit_in_index(sb, index, alloc_hint,
|
|
round_robin);
|
|
if (nr != -1) {
|
|
nr += index << sb->shift;
|
|
break;
|
|
}
|
|
|
|
/* Jump to next index. */
|
|
alloc_hint = 0;
|
|
if (++index >= sb->map_nr)
|
|
index = 0;
|
|
}
|
|
|
|
return nr;
|
|
}
|
|
EXPORT_SYMBOL_GPL(sbitmap_get);
|
|
|
|
int sbitmap_get_shallow(struct sbitmap *sb, unsigned int alloc_hint,
|
|
unsigned long shallow_depth)
|
|
{
|
|
unsigned int i, index;
|
|
int nr = -1;
|
|
|
|
index = SB_NR_TO_INDEX(sb, alloc_hint);
|
|
|
|
for (i = 0; i < sb->map_nr; i++) {
|
|
nr = __sbitmap_get_word(&sb->map[index].word,
|
|
min(sb->map[index].depth, shallow_depth),
|
|
SB_NR_TO_BIT(sb, alloc_hint), true);
|
|
if (nr != -1) {
|
|
nr += index << sb->shift;
|
|
break;
|
|
}
|
|
|
|
/* Jump to next index. */
|
|
index++;
|
|
alloc_hint = index << sb->shift;
|
|
|
|
if (index >= sb->map_nr) {
|
|
index = 0;
|
|
alloc_hint = 0;
|
|
}
|
|
}
|
|
|
|
return nr;
|
|
}
|
|
EXPORT_SYMBOL_GPL(sbitmap_get_shallow);
|
|
|
|
bool sbitmap_any_bit_set(const struct sbitmap *sb)
|
|
{
|
|
unsigned int i;
|
|
|
|
for (i = 0; i < sb->map_nr; i++) {
|
|
if (sb->map[i].word)
|
|
return true;
|
|
}
|
|
return false;
|
|
}
|
|
EXPORT_SYMBOL_GPL(sbitmap_any_bit_set);
|
|
|
|
bool sbitmap_any_bit_clear(const struct sbitmap *sb)
|
|
{
|
|
unsigned int i;
|
|
|
|
for (i = 0; i < sb->map_nr; i++) {
|
|
const struct sbitmap_word *word = &sb->map[i];
|
|
unsigned long ret;
|
|
|
|
ret = find_first_zero_bit(&word->word, word->depth);
|
|
if (ret < word->depth)
|
|
return true;
|
|
}
|
|
return false;
|
|
}
|
|
EXPORT_SYMBOL_GPL(sbitmap_any_bit_clear);
|
|
|
|
static unsigned int __sbitmap_weight(const struct sbitmap *sb, bool set)
|
|
{
|
|
unsigned int i, weight = 0;
|
|
|
|
for (i = 0; i < sb->map_nr; i++) {
|
|
const struct sbitmap_word *word = &sb->map[i];
|
|
|
|
if (set)
|
|
weight += bitmap_weight(&word->word, word->depth);
|
|
else
|
|
weight += bitmap_weight(&word->cleared, word->depth);
|
|
}
|
|
return weight;
|
|
}
|
|
|
|
static unsigned int sbitmap_weight(const struct sbitmap *sb)
|
|
{
|
|
return __sbitmap_weight(sb, true);
|
|
}
|
|
|
|
static unsigned int sbitmap_cleared(const struct sbitmap *sb)
|
|
{
|
|
return __sbitmap_weight(sb, false);
|
|
}
|
|
|
|
void sbitmap_show(struct sbitmap *sb, struct seq_file *m)
|
|
{
|
|
seq_printf(m, "depth=%u\n", sb->depth);
|
|
seq_printf(m, "busy=%u\n", sbitmap_weight(sb) - sbitmap_cleared(sb));
|
|
seq_printf(m, "cleared=%u\n", sbitmap_cleared(sb));
|
|
seq_printf(m, "bits_per_word=%u\n", 1U << sb->shift);
|
|
seq_printf(m, "map_nr=%u\n", sb->map_nr);
|
|
}
|
|
EXPORT_SYMBOL_GPL(sbitmap_show);
|
|
|
|
static inline void emit_byte(struct seq_file *m, unsigned int offset, u8 byte)
|
|
{
|
|
if ((offset & 0xf) == 0) {
|
|
if (offset != 0)
|
|
seq_putc(m, '\n');
|
|
seq_printf(m, "%08x:", offset);
|
|
}
|
|
if ((offset & 0x1) == 0)
|
|
seq_putc(m, ' ');
|
|
seq_printf(m, "%02x", byte);
|
|
}
|
|
|
|
void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m)
|
|
{
|
|
u8 byte = 0;
|
|
unsigned int byte_bits = 0;
|
|
unsigned int offset = 0;
|
|
int i;
|
|
|
|
for (i = 0; i < sb->map_nr; i++) {
|
|
unsigned long word = READ_ONCE(sb->map[i].word);
|
|
unsigned int word_bits = READ_ONCE(sb->map[i].depth);
|
|
|
|
while (word_bits > 0) {
|
|
unsigned int bits = min(8 - byte_bits, word_bits);
|
|
|
|
byte |= (word & (BIT(bits) - 1)) << byte_bits;
|
|
byte_bits += bits;
|
|
if (byte_bits == 8) {
|
|
emit_byte(m, offset, byte);
|
|
byte = 0;
|
|
byte_bits = 0;
|
|
offset++;
|
|
}
|
|
word >>= bits;
|
|
word_bits -= bits;
|
|
}
|
|
}
|
|
if (byte_bits) {
|
|
emit_byte(m, offset, byte);
|
|
offset++;
|
|
}
|
|
if (offset)
|
|
seq_putc(m, '\n');
|
|
}
|
|
EXPORT_SYMBOL_GPL(sbitmap_bitmap_show);
|
|
|
|
static unsigned int sbq_calc_wake_batch(struct sbitmap_queue *sbq,
|
|
unsigned int depth)
|
|
{
|
|
unsigned int wake_batch;
|
|
unsigned int shallow_depth;
|
|
|
|
/*
|
|
* For each batch, we wake up one queue. We need to make sure that our
|
|
* batch size is small enough that the full depth of the bitmap,
|
|
* potentially limited by a shallow depth, is enough to wake up all of
|
|
* the queues.
|
|
*
|
|
* Each full word of the bitmap has bits_per_word bits, and there might
|
|
* be a partial word. There are depth / bits_per_word full words and
|
|
* depth % bits_per_word bits left over. In bitwise arithmetic:
|
|
*
|
|
* bits_per_word = 1 << shift
|
|
* depth / bits_per_word = depth >> shift
|
|
* depth % bits_per_word = depth & ((1 << shift) - 1)
|
|
*
|
|
* Each word can be limited to sbq->min_shallow_depth bits.
|
|
*/
|
|
shallow_depth = min(1U << sbq->sb.shift, sbq->min_shallow_depth);
|
|
depth = ((depth >> sbq->sb.shift) * shallow_depth +
|
|
min(depth & ((1U << sbq->sb.shift) - 1), shallow_depth));
|
|
wake_batch = clamp_t(unsigned int, depth / SBQ_WAIT_QUEUES, 1,
|
|
SBQ_WAKE_BATCH);
|
|
|
|
return wake_batch;
|
|
}
|
|
|
|
int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
|
|
int shift, bool round_robin, gfp_t flags, int node)
|
|
{
|
|
int ret;
|
|
int i;
|
|
|
|
ret = sbitmap_init_node(&sbq->sb, depth, shift, flags, node);
|
|
if (ret)
|
|
return ret;
|
|
|
|
sbq->alloc_hint = alloc_percpu_gfp(unsigned int, flags);
|
|
if (!sbq->alloc_hint) {
|
|
sbitmap_free(&sbq->sb);
|
|
return -ENOMEM;
|
|
}
|
|
|
|
if (depth && !round_robin) {
|
|
for_each_possible_cpu(i)
|
|
*per_cpu_ptr(sbq->alloc_hint, i) = prandom_u32() % depth;
|
|
}
|
|
|
|
sbq->min_shallow_depth = UINT_MAX;
|
|
sbq->wake_batch = sbq_calc_wake_batch(sbq, depth);
|
|
atomic_set(&sbq->wake_index, 0);
|
|
atomic_set(&sbq->ws_active, 0);
|
|
|
|
sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node);
|
|
if (!sbq->ws) {
|
|
free_percpu(sbq->alloc_hint);
|
|
sbitmap_free(&sbq->sb);
|
|
return -ENOMEM;
|
|
}
|
|
|
|
for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
|
|
init_waitqueue_head(&sbq->ws[i].wait);
|
|
atomic_set(&sbq->ws[i].wait_cnt, sbq->wake_batch);
|
|
}
|
|
|
|
sbq->round_robin = round_robin;
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL_GPL(sbitmap_queue_init_node);
|
|
|
|
static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq,
|
|
unsigned int depth)
|
|
{
|
|
unsigned int wake_batch = sbq_calc_wake_batch(sbq, depth);
|
|
int i;
|
|
|
|
if (sbq->wake_batch != wake_batch) {
|
|
WRITE_ONCE(sbq->wake_batch, wake_batch);
|
|
/*
|
|
* Pairs with the memory barrier in sbitmap_queue_wake_up()
|
|
* to ensure that the batch size is updated before the wait
|
|
* counts.
|
|
*/
|
|
smp_mb__before_atomic();
|
|
for (i = 0; i < SBQ_WAIT_QUEUES; i++)
|
|
atomic_set(&sbq->ws[i].wait_cnt, 1);
|
|
}
|
|
}
|
|
|
|
void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth)
|
|
{
|
|
sbitmap_queue_update_wake_batch(sbq, depth);
|
|
sbitmap_resize(&sbq->sb, depth);
|
|
}
|
|
EXPORT_SYMBOL_GPL(sbitmap_queue_resize);
|
|
|
|
int __sbitmap_queue_get(struct sbitmap_queue *sbq)
|
|
{
|
|
unsigned int hint, depth;
|
|
int nr;
|
|
|
|
hint = this_cpu_read(*sbq->alloc_hint);
|
|
depth = READ_ONCE(sbq->sb.depth);
|
|
if (unlikely(hint >= depth)) {
|
|
hint = depth ? prandom_u32() % depth : 0;
|
|
this_cpu_write(*sbq->alloc_hint, hint);
|
|
}
|
|
nr = sbitmap_get(&sbq->sb, hint, sbq->round_robin);
|
|
|
|
if (nr == -1) {
|
|
/* If the map is full, a hint won't do us much good. */
|
|
this_cpu_write(*sbq->alloc_hint, 0);
|
|
} else if (nr == hint || unlikely(sbq->round_robin)) {
|
|
/* Only update the hint if we used it. */
|
|
hint = nr + 1;
|
|
if (hint >= depth - 1)
|
|
hint = 0;
|
|
this_cpu_write(*sbq->alloc_hint, hint);
|
|
}
|
|
|
|
return nr;
|
|
}
|
|
EXPORT_SYMBOL_GPL(__sbitmap_queue_get);
|
|
|
|
int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
|
|
unsigned int shallow_depth)
|
|
{
|
|
unsigned int hint, depth;
|
|
int nr;
|
|
|
|
WARN_ON_ONCE(shallow_depth < sbq->min_shallow_depth);
|
|
|
|
hint = this_cpu_read(*sbq->alloc_hint);
|
|
depth = READ_ONCE(sbq->sb.depth);
|
|
if (unlikely(hint >= depth)) {
|
|
hint = depth ? prandom_u32() % depth : 0;
|
|
this_cpu_write(*sbq->alloc_hint, hint);
|
|
}
|
|
nr = sbitmap_get_shallow(&sbq->sb, hint, shallow_depth);
|
|
|
|
if (nr == -1) {
|
|
/* If the map is full, a hint won't do us much good. */
|
|
this_cpu_write(*sbq->alloc_hint, 0);
|
|
} else if (nr == hint || unlikely(sbq->round_robin)) {
|
|
/* Only update the hint if we used it. */
|
|
hint = nr + 1;
|
|
if (hint >= depth - 1)
|
|
hint = 0;
|
|
this_cpu_write(*sbq->alloc_hint, hint);
|
|
}
|
|
|
|
return nr;
|
|
}
|
|
EXPORT_SYMBOL_GPL(__sbitmap_queue_get_shallow);
|
|
|
|
void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq,
|
|
unsigned int min_shallow_depth)
|
|
{
|
|
sbq->min_shallow_depth = min_shallow_depth;
|
|
sbitmap_queue_update_wake_batch(sbq, sbq->sb.depth);
|
|
}
|
|
EXPORT_SYMBOL_GPL(sbitmap_queue_min_shallow_depth);
|
|
|
|
static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq)
|
|
{
|
|
int i, wake_index;
|
|
|
|
if (!atomic_read(&sbq->ws_active))
|
|
return NULL;
|
|
|
|
wake_index = atomic_read(&sbq->wake_index);
|
|
for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
|
|
struct sbq_wait_state *ws = &sbq->ws[wake_index];
|
|
|
|
if (waitqueue_active(&ws->wait)) {
|
|
int o = atomic_read(&sbq->wake_index);
|
|
|
|
if (wake_index != o)
|
|
atomic_cmpxchg(&sbq->wake_index, o, wake_index);
|
|
return ws;
|
|
}
|
|
|
|
wake_index = sbq_index_inc(wake_index);
|
|
}
|
|
|
|
return NULL;
|
|
}
|
|
|
|
static bool __sbq_wake_up(struct sbitmap_queue *sbq)
|
|
{
|
|
struct sbq_wait_state *ws;
|
|
unsigned int wake_batch;
|
|
int wait_cnt;
|
|
|
|
ws = sbq_wake_ptr(sbq);
|
|
if (!ws)
|
|
return false;
|
|
|
|
wait_cnt = atomic_dec_return(&ws->wait_cnt);
|
|
if (wait_cnt <= 0) {
|
|
int ret;
|
|
|
|
wake_batch = READ_ONCE(sbq->wake_batch);
|
|
|
|
/*
|
|
* Pairs with the memory barrier in sbitmap_queue_resize() to
|
|
* ensure that we see the batch size update before the wait
|
|
* count is reset.
|
|
*/
|
|
smp_mb__before_atomic();
|
|
|
|
/*
|
|
* For concurrent callers of this, the one that failed the
|
|
* atomic_cmpxhcg() race should call this function again
|
|
* to wakeup a new batch on a different 'ws'.
|
|
*/
|
|
ret = atomic_cmpxchg(&ws->wait_cnt, wait_cnt, wake_batch);
|
|
if (ret == wait_cnt) {
|
|
sbq_index_atomic_inc(&sbq->wake_index);
|
|
wake_up_nr(&ws->wait, wake_batch);
|
|
return false;
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
return false;
|
|
}
|
|
|
|
void sbitmap_queue_wake_up(struct sbitmap_queue *sbq)
|
|
{
|
|
while (__sbq_wake_up(sbq))
|
|
;
|
|
}
|
|
EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up);
|
|
|
|
void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
|
|
unsigned int cpu)
|
|
{
|
|
sbitmap_deferred_clear_bit(&sbq->sb, nr);
|
|
|
|
/*
|
|
* Pairs with the memory barrier in set_current_state() to ensure the
|
|
* proper ordering of clear_bit_unlock()/waitqueue_active() in the waker
|
|
* and test_and_set_bit_lock()/prepare_to_wait()/finish_wait() in the
|
|
* waiter. See the comment on waitqueue_active().
|
|
*/
|
|
smp_mb__after_atomic();
|
|
sbitmap_queue_wake_up(sbq);
|
|
|
|
if (likely(!sbq->round_robin && nr < sbq->sb.depth))
|
|
*per_cpu_ptr(sbq->alloc_hint, cpu) = nr;
|
|
}
|
|
EXPORT_SYMBOL_GPL(sbitmap_queue_clear);
|
|
|
|
void sbitmap_queue_wake_all(struct sbitmap_queue *sbq)
|
|
{
|
|
int i, wake_index;
|
|
|
|
/*
|
|
* Pairs with the memory barrier in set_current_state() like in
|
|
* sbitmap_queue_wake_up().
|
|
*/
|
|
smp_mb();
|
|
wake_index = atomic_read(&sbq->wake_index);
|
|
for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
|
|
struct sbq_wait_state *ws = &sbq->ws[wake_index];
|
|
|
|
if (waitqueue_active(&ws->wait))
|
|
wake_up(&ws->wait);
|
|
|
|
wake_index = sbq_index_inc(wake_index);
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(sbitmap_queue_wake_all);
|
|
|
|
void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m)
|
|
{
|
|
bool first;
|
|
int i;
|
|
|
|
sbitmap_show(&sbq->sb, m);
|
|
|
|
seq_puts(m, "alloc_hint={");
|
|
first = true;
|
|
for_each_possible_cpu(i) {
|
|
if (!first)
|
|
seq_puts(m, ", ");
|
|
first = false;
|
|
seq_printf(m, "%u", *per_cpu_ptr(sbq->alloc_hint, i));
|
|
}
|
|
seq_puts(m, "}\n");
|
|
|
|
seq_printf(m, "wake_batch=%u\n", sbq->wake_batch);
|
|
seq_printf(m, "wake_index=%d\n", atomic_read(&sbq->wake_index));
|
|
seq_printf(m, "ws_active=%d\n", atomic_read(&sbq->ws_active));
|
|
|
|
seq_puts(m, "ws={\n");
|
|
for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
|
|
struct sbq_wait_state *ws = &sbq->ws[i];
|
|
|
|
seq_printf(m, "\t{.wait_cnt=%d, .wait=%s},\n",
|
|
atomic_read(&ws->wait_cnt),
|
|
waitqueue_active(&ws->wait) ? "active" : "inactive");
|
|
}
|
|
seq_puts(m, "}\n");
|
|
|
|
seq_printf(m, "round_robin=%d\n", sbq->round_robin);
|
|
seq_printf(m, "min_shallow_depth=%u\n", sbq->min_shallow_depth);
|
|
}
|
|
EXPORT_SYMBOL_GPL(sbitmap_queue_show);
|
|
|
|
void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq,
|
|
struct sbq_wait_state *ws,
|
|
struct sbq_wait *sbq_wait, int state)
|
|
{
|
|
if (!sbq_wait->accounted) {
|
|
atomic_inc(&sbq->ws_active);
|
|
sbq_wait->accounted = 1;
|
|
}
|
|
prepare_to_wait_exclusive(&ws->wait, &sbq_wait->wait, state);
|
|
}
|
|
EXPORT_SYMBOL_GPL(sbitmap_prepare_to_wait);
|
|
|
|
void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws,
|
|
struct sbq_wait *sbq_wait)
|
|
{
|
|
finish_wait(&ws->wait, &sbq_wait->wait);
|
|
if (sbq_wait->accounted) {
|
|
atomic_dec(&sbq->ws_active);
|
|
sbq_wait->accounted = 0;
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(sbitmap_finish_wait);
|