On systems with KPTI enabled, we can currently observe the following warning: BUG: using smp_processor_id() in preemptible caller is invalidate_user_asid+0x13/0x50 CPU: 6 PID: 1075 Comm: dmesg Not tainted 5.12.0-rc4-gda4a2b1a5479-kfence_1+ #1 Hardware name: Hewlett-Packard HP Pro 3500 Series/2ABF, BIOS 8.11 10/24/2012 Call Trace: dump_stack+0x7f/0xad check_preemption_disabled+0xc8/0xd0 invalidate_user_asid+0x13/0x50 flush_tlb_one_kernel+0x5/0x20 kfence_protect+0x56/0x80 ... While it normally makes sense to require preemption to be off, so that the expected CPU's TLB is flushed and not another, in our case it really is best-effort (see comments in kfence_protect_page()). Avoid the warning by disabling preemption around flush_tlb_one_kernel(). Link: https://lore.kernel.org/lkml/YGIDBAboELGgMgXy@elver.google.com/ Link: https://lkml.kernel.org/r/20210330065737.652669-1-elver@google.com Signed-off-by: Marco Elver <elver@google.com> Reported-by: Tomi Sarvela <tomi.p.sarvela@intel.com> Cc: Alexander Potapenko <glider@google.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Andrey Konovalov <andreyknvl@google.com> Cc: Jann Horn <jannh@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
70 lines
1.6 KiB
C
70 lines
1.6 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/*
|
|
* x86 KFENCE support.
|
|
*
|
|
* Copyright (C) 2020, Google LLC.
|
|
*/
|
|
|
|
#ifndef _ASM_X86_KFENCE_H
|
|
#define _ASM_X86_KFENCE_H
|
|
|
|
#include <linux/bug.h>
|
|
#include <linux/kfence.h>
|
|
|
|
#include <asm/pgalloc.h>
|
|
#include <asm/pgtable.h>
|
|
#include <asm/set_memory.h>
|
|
#include <asm/tlbflush.h>
|
|
|
|
/* Force 4K pages for __kfence_pool. */
|
|
static inline bool arch_kfence_init_pool(void)
|
|
{
|
|
unsigned long addr;
|
|
|
|
for (addr = (unsigned long)__kfence_pool; is_kfence_address((void *)addr);
|
|
addr += PAGE_SIZE) {
|
|
unsigned int level;
|
|
|
|
if (!lookup_address(addr, &level))
|
|
return false;
|
|
|
|
if (level != PG_LEVEL_4K)
|
|
set_memory_4k(addr, 1);
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
/* Protect the given page and flush TLB. */
|
|
static inline bool kfence_protect_page(unsigned long addr, bool protect)
|
|
{
|
|
unsigned int level;
|
|
pte_t *pte = lookup_address(addr, &level);
|
|
|
|
if (WARN_ON(!pte || level != PG_LEVEL_4K))
|
|
return false;
|
|
|
|
/*
|
|
* We need to avoid IPIs, as we may get KFENCE allocations or faults
|
|
* with interrupts disabled. Therefore, the below is best-effort, and
|
|
* does not flush TLBs on all CPUs. We can tolerate some inaccuracy;
|
|
* lazy fault handling takes care of faults after the page is PRESENT.
|
|
*/
|
|
|
|
if (protect)
|
|
set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT));
|
|
else
|
|
set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT));
|
|
|
|
/*
|
|
* Flush this CPU's TLB, assuming whoever did the allocation/free is
|
|
* likely to continue running on this CPU.
|
|
*/
|
|
preempt_disable();
|
|
flush_tlb_one_kernel(addr);
|
|
preempt_enable();
|
|
return true;
|
|
}
|
|
|
|
#endif /* _ASM_X86_KFENCE_H */
|