mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 20:22:09 +00:00
lkdtm/powerpc: Add SLB multihit test
To check machine check handling, add support to inject slb multihit errors. Co-developed-by: Mahesh Salgaonkar <mahesh@linux.ibm.com> Signed-off-by: Mahesh Salgaonkar <mahesh@linux.ibm.com> Signed-off-by: Ganesh Goudar <ganeshgr@linux.ibm.com> [mpe: Use CONFIG_PPC_BOOK3S_64 to fix compile errors reported by lkp@intel.com] Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20201130083057.135610-1-ganeshgr@linux.ibm.com
This commit is contained in:
parent
6c58b1b41b
commit
3ba150fb21
@ -843,6 +843,32 @@ static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
|
||||
|
||||
unsigned htab_shift_for_mem_size(unsigned long mem_size);
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
enum slb_index {
|
||||
LINEAR_INDEX = 0, /* Kernel linear map (0xc000000000000000) */
|
||||
KSTACK_INDEX = 1, /* Kernel stack map */
|
||||
};
|
||||
|
||||
#define slb_esid_mask(ssize) \
|
||||
(((ssize) == MMU_SEGSIZE_256M) ? ESID_MASK : ESID_MASK_1T)
|
||||
|
||||
static inline unsigned long mk_esid_data(unsigned long ea, int ssize,
|
||||
enum slb_index index)
|
||||
{
|
||||
return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | index;
|
||||
}
|
||||
|
||||
static inline unsigned long __mk_vsid_data(unsigned long vsid, int ssize,
|
||||
unsigned long flags)
|
||||
{
|
||||
return (vsid << slb_vsid_shift(ssize)) | flags |
|
||||
((unsigned long)ssize << SLB_VSID_SSIZE_SHIFT);
|
||||
}
|
||||
|
||||
static inline unsigned long mk_vsid_data(unsigned long ea, int ssize,
|
||||
unsigned long flags)
|
||||
{
|
||||
return __mk_vsid_data(get_kernel_vsid(ea, ssize), ssize, flags);
|
||||
}
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_ */
|
||||
|
@ -112,6 +112,7 @@ int mmu_linear_psize = MMU_PAGE_4K;
|
||||
EXPORT_SYMBOL_GPL(mmu_linear_psize);
|
||||
int mmu_virtual_psize = MMU_PAGE_4K;
|
||||
int mmu_vmalloc_psize = MMU_PAGE_4K;
|
||||
EXPORT_SYMBOL_GPL(mmu_vmalloc_psize);
|
||||
#ifdef CONFIG_SPARSEMEM_VMEMMAP
|
||||
int mmu_vmemmap_psize = MMU_PAGE_4K;
|
||||
#endif
|
||||
|
@ -28,35 +28,8 @@
|
||||
#include "internal.h"
|
||||
|
||||
|
||||
enum slb_index {
|
||||
LINEAR_INDEX = 0, /* Kernel linear map (0xc000000000000000) */
|
||||
KSTACK_INDEX = 1, /* Kernel stack map */
|
||||
};
|
||||
|
||||
static long slb_allocate_user(struct mm_struct *mm, unsigned long ea);
|
||||
|
||||
#define slb_esid_mask(ssize) \
|
||||
(((ssize) == MMU_SEGSIZE_256M)? ESID_MASK: ESID_MASK_1T)
|
||||
|
||||
static inline unsigned long mk_esid_data(unsigned long ea, int ssize,
|
||||
enum slb_index index)
|
||||
{
|
||||
return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | index;
|
||||
}
|
||||
|
||||
static inline unsigned long __mk_vsid_data(unsigned long vsid, int ssize,
|
||||
unsigned long flags)
|
||||
{
|
||||
return (vsid << slb_vsid_shift(ssize)) | flags |
|
||||
((unsigned long) ssize << SLB_VSID_SSIZE_SHIFT);
|
||||
}
|
||||
|
||||
static inline unsigned long mk_vsid_data(unsigned long ea, int ssize,
|
||||
unsigned long flags)
|
||||
{
|
||||
return __mk_vsid_data(get_kernel_vsid(ea, ssize), ssize, flags);
|
||||
}
|
||||
|
||||
bool stress_slb_enabled __initdata;
|
||||
|
||||
static int __init parse_stress_slb(char *p)
|
||||
|
@ -10,6 +10,7 @@ lkdtm-$(CONFIG_LKDTM) += rodata_objcopy.o
|
||||
lkdtm-$(CONFIG_LKDTM) += usercopy.o
|
||||
lkdtm-$(CONFIG_LKDTM) += stackleak.o
|
||||
lkdtm-$(CONFIG_LKDTM) += cfi.o
|
||||
lkdtm-$(CONFIG_PPC_BOOK3S_64) += powerpc.o
|
||||
|
||||
KASAN_SANITIZE_stackleak.o := n
|
||||
KCOV_INSTRUMENT_rodata.o := n
|
||||
|
@ -176,6 +176,9 @@ static const struct crashtype crashtypes[] = {
|
||||
#ifdef CONFIG_X86_32
|
||||
CRASHTYPE(DOUBLE_FAULT),
|
||||
#endif
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
CRASHTYPE(PPC_SLB_MULTIHIT),
|
||||
#endif
|
||||
};
|
||||
|
||||
|
||||
|
@ -102,4 +102,7 @@ void lkdtm_STACKLEAK_ERASING(void);
|
||||
/* cfi.c */
|
||||
void lkdtm_CFI_FORWARD_PROTO(void);
|
||||
|
||||
/* powerpc.c */
|
||||
void lkdtm_PPC_SLB_MULTIHIT(void);
|
||||
|
||||
#endif
|
||||
|
120
drivers/misc/lkdtm/powerpc.c
Normal file
120
drivers/misc/lkdtm/powerpc.c
Normal file
@ -0,0 +1,120 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
#include "lkdtm.h"
|
||||
#include <linux/slab.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <asm/mmu.h>
|
||||
|
||||
/* Inserts new slb entries */
|
||||
static void insert_slb_entry(unsigned long p, int ssize, int page_size)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
flags = SLB_VSID_KERNEL | mmu_psize_defs[page_size].sllp;
|
||||
preempt_disable();
|
||||
|
||||
asm volatile("slbmte %0,%1" :
|
||||
: "r" (mk_vsid_data(p, ssize, flags)),
|
||||
"r" (mk_esid_data(p, ssize, SLB_NUM_BOLTED))
|
||||
: "memory");
|
||||
|
||||
asm volatile("slbmte %0,%1" :
|
||||
: "r" (mk_vsid_data(p, ssize, flags)),
|
||||
"r" (mk_esid_data(p, ssize, SLB_NUM_BOLTED + 1))
|
||||
: "memory");
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
/* Inject slb multihit on vmalloc-ed address i.e 0xD00... */
|
||||
static int inject_vmalloc_slb_multihit(void)
|
||||
{
|
||||
char *p;
|
||||
|
||||
p = vmalloc(PAGE_SIZE);
|
||||
if (!p)
|
||||
return -ENOMEM;
|
||||
|
||||
insert_slb_entry((unsigned long)p, MMU_SEGSIZE_1T, mmu_vmalloc_psize);
|
||||
/*
|
||||
* This triggers exception, If handled correctly we must recover
|
||||
* from this error.
|
||||
*/
|
||||
p[0] = '!';
|
||||
vfree(p);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Inject slb multihit on kmalloc-ed address i.e 0xC00... */
|
||||
static int inject_kmalloc_slb_multihit(void)
|
||||
{
|
||||
char *p;
|
||||
|
||||
p = kmalloc(2048, GFP_KERNEL);
|
||||
if (!p)
|
||||
return -ENOMEM;
|
||||
|
||||
insert_slb_entry((unsigned long)p, MMU_SEGSIZE_1T, mmu_linear_psize);
|
||||
/*
|
||||
* This triggers exception, If handled correctly we must recover
|
||||
* from this error.
|
||||
*/
|
||||
p[0] = '!';
|
||||
kfree(p);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Few initial SLB entries are bolted. Add a test to inject
|
||||
* multihit in bolted entry 0.
|
||||
*/
|
||||
static void insert_dup_slb_entry_0(void)
|
||||
{
|
||||
unsigned long test_address = PAGE_OFFSET, *test_ptr;
|
||||
unsigned long esid, vsid;
|
||||
unsigned long i = 0;
|
||||
|
||||
test_ptr = (unsigned long *)test_address;
|
||||
preempt_disable();
|
||||
|
||||
asm volatile("slbmfee %0,%1" : "=r" (esid) : "r" (i));
|
||||
asm volatile("slbmfev %0,%1" : "=r" (vsid) : "r" (i));
|
||||
|
||||
/* for i !=0 we would need to mask out the old entry number */
|
||||
asm volatile("slbmte %0,%1" :
|
||||
: "r" (vsid),
|
||||
"r" (esid | SLB_NUM_BOLTED)
|
||||
: "memory");
|
||||
|
||||
asm volatile("slbmfee %0,%1" : "=r" (esid) : "r" (i));
|
||||
asm volatile("slbmfev %0,%1" : "=r" (vsid) : "r" (i));
|
||||
|
||||
/* for i !=0 we would need to mask out the old entry number */
|
||||
asm volatile("slbmte %0,%1" :
|
||||
: "r" (vsid),
|
||||
"r" (esid | (SLB_NUM_BOLTED + 1))
|
||||
: "memory");
|
||||
|
||||
pr_info("%s accessing test address 0x%lx: 0x%lx\n",
|
||||
__func__, test_address, *test_ptr);
|
||||
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
void lkdtm_PPC_SLB_MULTIHIT(void)
|
||||
{
|
||||
if (!radix_enabled()) {
|
||||
pr_info("Injecting SLB multihit errors\n");
|
||||
/*
|
||||
* These need not be separate tests, And they do pretty
|
||||
* much same thing. In any case we must recover from the
|
||||
* errors introduced by these functions, machine would not
|
||||
* survive these tests in case of failure to handle.
|
||||
*/
|
||||
inject_vmalloc_slb_multihit();
|
||||
inject_kmalloc_slb_multihit();
|
||||
insert_dup_slb_entry_0();
|
||||
pr_info("Recovered from SLB multihit errors\n");
|
||||
} else {
|
||||
pr_err("XFAIL: This test is for ppc64 and with hash mode MMU only\n");
|
||||
}
|
||||
}
|
@ -68,3 +68,4 @@ USERCOPY_STACK_BEYOND
|
||||
USERCOPY_KERNEL
|
||||
STACKLEAK_ERASING OK: the rest of the thread stack is properly erased
|
||||
CFI_FORWARD_PROTO
|
||||
PPC_SLB_MULTIHIT Recovered
|
||||
|
Loading…
Reference in New Issue
Block a user