Kernel Concurrency Sanitizer (KCSAN) updates for v6.13

- Fixes to make KCSAN compatible with PREEMPT_RT
 
 - Minor cleanups
 
 All changes have been in linux-next for the past 4 weeks.
 -----BEGIN PGP SIGNATURE-----
 
 iIcEABYIAC8WIQR7t4b/75lzOR3l5rcxsLN3bbyLnwUCZzMoFREcZWx2ZXJAZ29v
 Z2xlLmNvbQAKCRAxsLN3bbyLn6cVAP4l4IzMyRm+kAW8yqnMjfZBl2+cJ15J5Huy
 jQLqPSdruwD/W8ciiJvz9FhKtQQwVXtZF3WcNdkNgGLqhHbEkPBw4gA=
 =Lx19
 -----END PGP SIGNATURE-----

Merge tag 'kcsan-20241112-v6.13-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/melver/linux

Pull Kernel Concurrency Sanitizer (KCSAN) updates from Marco Elver:

 - Make KCSAN compatible with PREEMPT_RT

 - Minor cleanup

* tag 'kcsan-20241112-v6.13-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/melver/linux:
  kcsan: Remove redundant call of kallsyms_lookup_name()
  kcsan: Turn report_filterlist_lock into a raw_spinlock
This commit is contained in:
Linus Torvalds 2024-11-19 11:44:17 -08:00
commit 769ca7d4d2

View File

@ -46,14 +46,8 @@ static struct {
int used; /* number of elements used */
bool sorted; /* if elements are sorted */
bool whitelist; /* if list is a blacklist or whitelist */
} report_filterlist = {
.addrs = NULL,
.size = 8, /* small initial size */
.used = 0,
.sorted = false,
.whitelist = false, /* default is blacklist */
};
static DEFINE_SPINLOCK(report_filterlist_lock);
} report_filterlist;
static DEFINE_RAW_SPINLOCK(report_filterlist_lock);
/*
* The microbenchmark allows benchmarking KCSAN core runtime only. To run
@ -110,7 +104,7 @@ bool kcsan_skip_report_debugfs(unsigned long func_addr)
return false;
func_addr -= offset; /* Get function start */
spin_lock_irqsave(&report_filterlist_lock, flags);
raw_spin_lock_irqsave(&report_filterlist_lock, flags);
if (report_filterlist.used == 0)
goto out;
@ -127,7 +121,7 @@ bool kcsan_skip_report_debugfs(unsigned long func_addr)
ret = !ret;
out:
spin_unlock_irqrestore(&report_filterlist_lock, flags);
raw_spin_unlock_irqrestore(&report_filterlist_lock, flags);
return ret;
}
@ -135,9 +129,9 @@ static void set_report_filterlist_whitelist(bool whitelist)
{
unsigned long flags;
spin_lock_irqsave(&report_filterlist_lock, flags);
raw_spin_lock_irqsave(&report_filterlist_lock, flags);
report_filterlist.whitelist = whitelist;
spin_unlock_irqrestore(&report_filterlist_lock, flags);
raw_spin_unlock_irqrestore(&report_filterlist_lock, flags);
}
/* Returns 0 on success, error-code otherwise. */
@ -145,6 +139,9 @@ static ssize_t insert_report_filterlist(const char *func)
{
unsigned long flags;
unsigned long addr = kallsyms_lookup_name(func);
unsigned long *delay_free = NULL;
unsigned long *new_addrs = NULL;
size_t new_size = 0;
ssize_t ret = 0;
if (!addr) {
@ -152,42 +149,42 @@ static ssize_t insert_report_filterlist(const char *func)
return -ENOENT;
}
spin_lock_irqsave(&report_filterlist_lock, flags);
retry_alloc:
/*
* Check if we need an allocation, and re-validate under the lock. Since
* the report_filterlist_lock is a raw, cannot allocate under the lock.
*/
if (data_race(report_filterlist.used == report_filterlist.size)) {
new_size = (report_filterlist.size ?: 4) * 2;
delay_free = new_addrs = kmalloc_array(new_size, sizeof(unsigned long), GFP_KERNEL);
if (!new_addrs)
return -ENOMEM;
}
if (report_filterlist.addrs == NULL) {
/* initial allocation */
report_filterlist.addrs =
kmalloc_array(report_filterlist.size,
sizeof(unsigned long), GFP_ATOMIC);
if (report_filterlist.addrs == NULL) {
ret = -ENOMEM;
goto out;
}
} else if (report_filterlist.used == report_filterlist.size) {
/* resize filterlist */
size_t new_size = report_filterlist.size * 2;
unsigned long *new_addrs =
krealloc(report_filterlist.addrs,
new_size * sizeof(unsigned long), GFP_ATOMIC);
if (new_addrs == NULL) {
/* leave filterlist itself untouched */
ret = -ENOMEM;
goto out;
raw_spin_lock_irqsave(&report_filterlist_lock, flags);
if (report_filterlist.used == report_filterlist.size) {
/* Check we pre-allocated enough, and retry if not. */
if (report_filterlist.used >= new_size) {
raw_spin_unlock_irqrestore(&report_filterlist_lock, flags);
kfree(new_addrs); /* kfree(NULL) is safe */
delay_free = new_addrs = NULL;
goto retry_alloc;
}
if (report_filterlist.used)
memcpy(new_addrs, report_filterlist.addrs, report_filterlist.used * sizeof(unsigned long));
delay_free = report_filterlist.addrs; /* free the old list */
report_filterlist.addrs = new_addrs; /* switch to the new list */
report_filterlist.size = new_size;
report_filterlist.addrs = new_addrs;
}
/* Note: deduplicating should be done in userspace. */
report_filterlist.addrs[report_filterlist.used++] =
kallsyms_lookup_name(func);
report_filterlist.addrs[report_filterlist.used++] = addr;
report_filterlist.sorted = false;
out:
spin_unlock_irqrestore(&report_filterlist_lock, flags);
raw_spin_unlock_irqrestore(&report_filterlist_lock, flags);
kfree(delay_free);
return ret;
}
@ -204,13 +201,13 @@ static int show_info(struct seq_file *file, void *v)
}
/* show filter functions, and filter type */
spin_lock_irqsave(&report_filterlist_lock, flags);
raw_spin_lock_irqsave(&report_filterlist_lock, flags);
seq_printf(file, "\n%s functions: %s\n",
report_filterlist.whitelist ? "whitelisted" : "blacklisted",
report_filterlist.used == 0 ? "none" : "");
for (i = 0; i < report_filterlist.used; ++i)
seq_printf(file, " %ps\n", (void *)report_filterlist.addrs[i]);
spin_unlock_irqrestore(&report_filterlist_lock, flags);
raw_spin_unlock_irqrestore(&report_filterlist_lock, flags);
return 0;
}