mirror of
https://github.com/torvalds/linux.git
synced 2024-12-06 11:01:43 +00:00
565474afe0
Add a readable attribute in debugfs to trigger a W^X pages check at any time. To trigger the test, just read /sys/kernel/debug/check_wx_pages It will report FAILED if the test failed, SUCCESS otherwise. Detailed result is provided into dmesg. Link: https://lkml.kernel.org/r/e947fb1a9f3f5466344823e532d343ff194ae03d.1706610398.git.christophe.leroy@csgroup.eu Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: Albert Ou <aou@eecs.berkeley.edu> Cc: Alexander Gordeev <agordeev@linux.ibm.com> Cc: Alexandre Ghiti <alexghiti@rivosinc.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: "Aneesh Kumar K.V (IBM)" <aneesh.kumar@kernel.org> Cc: Borislav Petkov (AMD) <bp@alien8.de> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Christian Borntraeger <borntraeger@linux.ibm.com> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com> Cc: Greg KH <greg@kroah.com> Cc: Heiko Carstens <hca@linux.ibm.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Kees Cook <keescook@chromium.org> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: "Naveen N. Rao" <naveen.n.rao@linux.ibm.com> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Palmer Dabbelt <palmer@dabbelt.com> Cc: Paul Walmsley <paul.walmsley@sifive.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Phong Tran <tranmanphong@gmail.com> Cc: Russell King <linux@armlinux.org.uk> Cc: Steven Price <steven.price@arm.com> Cc: Sven Schnelle <svens@linux.ibm.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Will Deacon <will@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
188 lines
4.6 KiB
C
188 lines
4.6 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
#include <linux/pagewalk.h>
|
|
#include <linux/debugfs.h>
|
|
#include <linux/ptdump.h>
|
|
#include <linux/kasan.h>
|
|
|
|
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
|
|
/*
|
|
* This is an optimization for KASAN=y case. Since all kasan page tables
|
|
* eventually point to the kasan_early_shadow_page we could call note_page()
|
|
* right away without walking through lower level page tables. This saves
|
|
* us dozens of seconds (minutes for 5-level config) while checking for
|
|
* W+X mapping or reading kernel_page_tables debugfs file.
|
|
*/
|
|
static inline int note_kasan_page_table(struct mm_walk *walk,
|
|
unsigned long addr)
|
|
{
|
|
struct ptdump_state *st = walk->private;
|
|
|
|
st->note_page(st, addr, 4, pte_val(kasan_early_shadow_pte[0]));
|
|
|
|
walk->action = ACTION_CONTINUE;
|
|
|
|
return 0;
|
|
}
|
|
#endif
|
|
|
|
static int ptdump_pgd_entry(pgd_t *pgd, unsigned long addr,
|
|
unsigned long next, struct mm_walk *walk)
|
|
{
|
|
struct ptdump_state *st = walk->private;
|
|
pgd_t val = READ_ONCE(*pgd);
|
|
|
|
#if CONFIG_PGTABLE_LEVELS > 4 && \
|
|
(defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS))
|
|
if (pgd_page(val) == virt_to_page(lm_alias(kasan_early_shadow_p4d)))
|
|
return note_kasan_page_table(walk, addr);
|
|
#endif
|
|
|
|
if (st->effective_prot)
|
|
st->effective_prot(st, 0, pgd_val(val));
|
|
|
|
if (pgd_leaf(val)) {
|
|
st->note_page(st, addr, 0, pgd_val(val));
|
|
walk->action = ACTION_CONTINUE;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int ptdump_p4d_entry(p4d_t *p4d, unsigned long addr,
|
|
unsigned long next, struct mm_walk *walk)
|
|
{
|
|
struct ptdump_state *st = walk->private;
|
|
p4d_t val = READ_ONCE(*p4d);
|
|
|
|
#if CONFIG_PGTABLE_LEVELS > 3 && \
|
|
(defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS))
|
|
if (p4d_page(val) == virt_to_page(lm_alias(kasan_early_shadow_pud)))
|
|
return note_kasan_page_table(walk, addr);
|
|
#endif
|
|
|
|
if (st->effective_prot)
|
|
st->effective_prot(st, 1, p4d_val(val));
|
|
|
|
if (p4d_leaf(val)) {
|
|
st->note_page(st, addr, 1, p4d_val(val));
|
|
walk->action = ACTION_CONTINUE;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int ptdump_pud_entry(pud_t *pud, unsigned long addr,
|
|
unsigned long next, struct mm_walk *walk)
|
|
{
|
|
struct ptdump_state *st = walk->private;
|
|
pud_t val = READ_ONCE(*pud);
|
|
|
|
#if CONFIG_PGTABLE_LEVELS > 2 && \
|
|
(defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS))
|
|
if (pud_page(val) == virt_to_page(lm_alias(kasan_early_shadow_pmd)))
|
|
return note_kasan_page_table(walk, addr);
|
|
#endif
|
|
|
|
if (st->effective_prot)
|
|
st->effective_prot(st, 2, pud_val(val));
|
|
|
|
if (pud_leaf(val)) {
|
|
st->note_page(st, addr, 2, pud_val(val));
|
|
walk->action = ACTION_CONTINUE;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int ptdump_pmd_entry(pmd_t *pmd, unsigned long addr,
|
|
unsigned long next, struct mm_walk *walk)
|
|
{
|
|
struct ptdump_state *st = walk->private;
|
|
pmd_t val = READ_ONCE(*pmd);
|
|
|
|
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
|
|
if (pmd_page(val) == virt_to_page(lm_alias(kasan_early_shadow_pte)))
|
|
return note_kasan_page_table(walk, addr);
|
|
#endif
|
|
|
|
if (st->effective_prot)
|
|
st->effective_prot(st, 3, pmd_val(val));
|
|
if (pmd_leaf(val)) {
|
|
st->note_page(st, addr, 3, pmd_val(val));
|
|
walk->action = ACTION_CONTINUE;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int ptdump_pte_entry(pte_t *pte, unsigned long addr,
|
|
unsigned long next, struct mm_walk *walk)
|
|
{
|
|
struct ptdump_state *st = walk->private;
|
|
pte_t val = ptep_get_lockless(pte);
|
|
|
|
if (st->effective_prot)
|
|
st->effective_prot(st, 4, pte_val(val));
|
|
|
|
st->note_page(st, addr, 4, pte_val(val));
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int ptdump_hole(unsigned long addr, unsigned long next,
|
|
int depth, struct mm_walk *walk)
|
|
{
|
|
struct ptdump_state *st = walk->private;
|
|
|
|
st->note_page(st, addr, depth, 0);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static const struct mm_walk_ops ptdump_ops = {
|
|
.pgd_entry = ptdump_pgd_entry,
|
|
.p4d_entry = ptdump_p4d_entry,
|
|
.pud_entry = ptdump_pud_entry,
|
|
.pmd_entry = ptdump_pmd_entry,
|
|
.pte_entry = ptdump_pte_entry,
|
|
.pte_hole = ptdump_hole,
|
|
};
|
|
|
|
void ptdump_walk_pgd(struct ptdump_state *st, struct mm_struct *mm, pgd_t *pgd)
|
|
{
|
|
const struct ptdump_range *range = st->range;
|
|
|
|
mmap_write_lock(mm);
|
|
while (range->start != range->end) {
|
|
walk_page_range_novma(mm, range->start, range->end,
|
|
&ptdump_ops, pgd, st);
|
|
range++;
|
|
}
|
|
mmap_write_unlock(mm);
|
|
|
|
/* Flush out the last page */
|
|
st->note_page(st, 0, -1, 0);
|
|
}
|
|
|
|
static int check_wx_show(struct seq_file *m, void *v)
|
|
{
|
|
if (ptdump_check_wx())
|
|
seq_puts(m, "SUCCESS\n");
|
|
else
|
|
seq_puts(m, "FAILED\n");
|
|
|
|
return 0;
|
|
}
|
|
|
|
DEFINE_SHOW_ATTRIBUTE(check_wx);
|
|
|
|
static int ptdump_debugfs_init(void)
|
|
{
|
|
debugfs_create_file("check_wx_pages", 0400, NULL, NULL, &check_wx_fops);
|
|
|
|
return 0;
|
|
}
|
|
|
|
device_initcall(ptdump_debugfs_init);
|