From 23c216b335d1fbd716076e8263b54a714ea3cf0e Mon Sep 17 00:00:00 2001 From: Alexey Kardashevskiy Date: Thu, 30 Sep 2021 13:44:54 +1000 Subject: [PATCH 01/19] powerpc/iommu: Report the correct most efficient DMA mask for PCI devices According to dma-api.rst, the dma_get_required_mask() helper should return "the mask that the platform requires to operate efficiently". Which in the case of PPC64 means the bypass mask and not a mask from an IOMMU table which is shorter and slower to use due to map/unmap operations (especially expensive on "pseries"). However the existing implementation ignores the possibility of bypassing and returns the IOMMU table mask on the pseries platform which makes some drivers (mpt3sas is one example) choose 32bit DMA even though bypass is supported. The powernv platform sort of handles it by having a bigger default window with a mask >=40 but it only works as drivers choose 63/64bit if the required mask is >32 which is rather pointless. This reintroduces the bypass capability check to let drivers make a better choice of the DMA mask. Fixes: f1565c24b596 ("powerpc: use the generic dma_ops_bypass mode") Signed-off-by: Alexey Kardashevskiy Reviewed-by: Christoph Hellwig Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20210930034454.95794-1-aik@ozlabs.ru --- arch/powerpc/kernel/dma-iommu.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c index 111249fd619d..038ce8d9061d 100644 --- a/arch/powerpc/kernel/dma-iommu.c +++ b/arch/powerpc/kernel/dma-iommu.c @@ -184,6 +184,15 @@ u64 dma_iommu_get_required_mask(struct device *dev) struct iommu_table *tbl = get_iommu_table_base(dev); u64 mask; + if (dev_is_pci(dev)) { + u64 bypass_mask = dma_direct_get_required_mask(dev); + + if (dma_iommu_dma_supported(dev, bypass_mask)) { + dev_info(dev, "%s: returning bypass mask 0x%llx\n", __func__, bypass_mask); + return bypass_mask; + } + } + if (!tbl) return 0; From 4549c3ea3160fa8b3f37dfe2f957657bb265eda9 Mon Sep 17 00:00:00 2001 From: "Naveen N. Rao" Date: Wed, 6 Oct 2021 01:55:20 +0530 Subject: [PATCH 02/19] powerpc/lib: Add helper to check if offset is within conditional branch range Add a helper to check if a given offset is within the branch range for a powerpc conditional branch instruction, and update some sites to use the new helper. Signed-off-by: Naveen N. Rao Reviewed-by: Christophe Leroy Acked-by: Song Liu Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/442b69a34ced32ca346a0d9a855f3f6cfdbbbd41.1633464148.git.naveen.n.rao@linux.vnet.ibm.com --- arch/powerpc/include/asm/code-patching.h | 1 + arch/powerpc/lib/code-patching.c | 7 ++++++- arch/powerpc/net/bpf_jit.h | 7 +------ 3 files changed, 8 insertions(+), 7 deletions(-) diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h index a95f63788c6b..4ba834599c4d 100644 --- a/arch/powerpc/include/asm/code-patching.h +++ b/arch/powerpc/include/asm/code-patching.h @@ -23,6 +23,7 @@ #define BRANCH_ABSOLUTE 0x2 bool is_offset_in_branch_range(long offset); +bool is_offset_in_cond_branch_range(long offset); int create_branch(struct ppc_inst *instr, const u32 *addr, unsigned long target, int flags); int create_cond_branch(struct ppc_inst *instr, const u32 *addr, diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c index f9a3019e37b4..c5ed98823835 100644 --- a/arch/powerpc/lib/code-patching.c +++ b/arch/powerpc/lib/code-patching.c @@ -228,6 +228,11 @@ bool is_offset_in_branch_range(long offset) return (offset >= -0x2000000 && offset <= 0x1fffffc && !(offset & 0x3)); } +bool is_offset_in_cond_branch_range(long offset) +{ + return offset >= -0x8000 && offset <= 0x7fff && !(offset & 0x3); +} + /* * Helper to check if a given instruction is a conditional branch * Derived from the conditional checks in analyse_instr() @@ -280,7 +285,7 @@ int create_cond_branch(struct ppc_inst *instr, const u32 *addr, offset = offset - (unsigned long)addr; /* Check we can represent the target in the instruction format */ - if (offset < -0x8000 || offset > 0x7FFF || offset & 0x3) + if (!is_offset_in_cond_branch_range(offset)) return 1; /* Mask out the flags and target, so they don't step on each other. */ diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h index 99fad093f43e..935ea95b6635 100644 --- a/arch/powerpc/net/bpf_jit.h +++ b/arch/powerpc/net/bpf_jit.h @@ -78,11 +78,6 @@ #define PPC_FUNC_ADDR(d,i) do { PPC_LI32(d, i); } while(0) #endif -static inline bool is_nearbranch(int offset) -{ - return (offset < 32768) && (offset >= -32768); -} - /* * The fly in the ointment of code size changing from pass to pass is * avoided by padding the short branch case with a NOP. If code size differs @@ -91,7 +86,7 @@ static inline bool is_nearbranch(int offset) * state. */ #define PPC_BCC(cond, dest) do { \ - if (is_nearbranch((dest) - (ctx->idx * 4))) { \ + if (is_offset_in_cond_branch_range((long)(dest) - (ctx->idx * 4))) { \ PPC_BCC_SHORT(cond, dest); \ EMIT(PPC_RAW_NOP()); \ } else { \ From 3832ba4e283d7052b783dab8311df7e3590fed93 Mon Sep 17 00:00:00 2001 From: "Naveen N. Rao" Date: Wed, 6 Oct 2021 01:55:21 +0530 Subject: [PATCH 03/19] powerpc/bpf: Validate branch ranges Add checks to ensure that we never emit branch instructions with truncated branch offsets. Suggested-by: Michael Ellerman Signed-off-by: Naveen N. Rao Tested-by: Johan Almbladh Reviewed-by: Christophe Leroy Acked-by: Song Liu Acked-by: Johan Almbladh Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/71d33a6b7603ec1013c9734dd8bdd4ff5e929142.1633464148.git.naveen.n.rao@linux.vnet.ibm.com --- arch/powerpc/net/bpf_jit.h | 26 ++++++++++++++++++++------ arch/powerpc/net/bpf_jit_comp.c | 6 +++++- arch/powerpc/net/bpf_jit_comp32.c | 8 ++++++-- arch/powerpc/net/bpf_jit_comp64.c | 8 ++++++-- 4 files changed, 37 insertions(+), 11 deletions(-) diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h index 935ea95b6635..7e9b978b768e 100644 --- a/arch/powerpc/net/bpf_jit.h +++ b/arch/powerpc/net/bpf_jit.h @@ -24,16 +24,30 @@ #define EMIT(instr) PLANT_INSTR(image, ctx->idx, instr) /* Long jump; (unconditional 'branch') */ -#define PPC_JMP(dest) EMIT(PPC_INST_BRANCH | \ - (((dest) - (ctx->idx * 4)) & 0x03fffffc)) +#define PPC_JMP(dest) \ + do { \ + long offset = (long)(dest) - (ctx->idx * 4); \ + if (!is_offset_in_branch_range(offset)) { \ + pr_err_ratelimited("Branch offset 0x%lx (@%u) out of range\n", offset, ctx->idx); \ + return -ERANGE; \ + } \ + EMIT(PPC_INST_BRANCH | (offset & 0x03fffffc)); \ + } while (0) + /* blr; (unconditional 'branch' with link) to absolute address */ #define PPC_BL_ABS(dest) EMIT(PPC_INST_BL | \ (((dest) - (unsigned long)(image + ctx->idx)) & 0x03fffffc)) /* "cond" here covers BO:BI fields. */ -#define PPC_BCC_SHORT(cond, dest) EMIT(PPC_INST_BRANCH_COND | \ - (((cond) & 0x3ff) << 16) | \ - (((dest) - (ctx->idx * 4)) & \ - 0xfffc)) +#define PPC_BCC_SHORT(cond, dest) \ + do { \ + long offset = (long)(dest) - (ctx->idx * 4); \ + if (!is_offset_in_cond_branch_range(offset)) { \ + pr_err_ratelimited("Conditional branch offset 0x%lx (@%u) out of range\n", offset, ctx->idx); \ + return -ERANGE; \ + } \ + EMIT(PPC_INST_BRANCH_COND | (((cond) & 0x3ff) << 16) | (offset & 0xfffc)); \ + } while (0) + /* Sign-extended 32-bit immediate load */ #define PPC_LI32(d, i) do { \ if ((int)(uintptr_t)(i) >= -32768 && \ diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c index 53aefee3fe70..fcbf7a917c56 100644 --- a/arch/powerpc/net/bpf_jit_comp.c +++ b/arch/powerpc/net/bpf_jit_comp.c @@ -210,7 +210,11 @@ skip_init_ctx: /* Now build the prologue, body code & epilogue for real. */ cgctx.idx = 0; bpf_jit_build_prologue(code_base, &cgctx); - bpf_jit_build_body(fp, code_base, &cgctx, addrs, extra_pass); + if (bpf_jit_build_body(fp, code_base, &cgctx, addrs, extra_pass)) { + bpf_jit_binary_free(bpf_hdr); + fp = org_fp; + goto out_addrs; + } bpf_jit_build_epilogue(code_base, &cgctx); if (bpf_jit_enable > 1) diff --git a/arch/powerpc/net/bpf_jit_comp32.c b/arch/powerpc/net/bpf_jit_comp32.c index beb12cbc8c29..a74d52204f8d 100644 --- a/arch/powerpc/net/bpf_jit_comp32.c +++ b/arch/powerpc/net/bpf_jit_comp32.c @@ -200,7 +200,7 @@ void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 fun } } -static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out) +static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out) { /* * By now, the eBPF program has already setup parameters in r3-r6 @@ -261,7 +261,9 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 bpf_jit_emit_common_epilogue(image, ctx); EMIT(PPC_RAW_BCTR()); + /* out: */ + return 0; } /* Assemble the body code between the prologue & epilogue */ @@ -1090,7 +1092,9 @@ cond_branch: */ case BPF_JMP | BPF_TAIL_CALL: ctx->seen |= SEEN_TAILCALL; - bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]); + ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]); + if (ret < 0) + return ret; break; default: diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c index b87a63dba9c8..f06c62089b14 100644 --- a/arch/powerpc/net/bpf_jit_comp64.c +++ b/arch/powerpc/net/bpf_jit_comp64.c @@ -206,7 +206,7 @@ void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 fun EMIT(PPC_RAW_BCTRL()); } -static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out) +static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out) { /* * By now, the eBPF program has already setup parameters in r3, r4 and r5 @@ -267,7 +267,9 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 bpf_jit_emit_common_epilogue(image, ctx); EMIT(PPC_RAW_BCTR()); + /* out: */ + return 0; } /* Assemble the body code between the prologue & epilogue */ @@ -993,7 +995,9 @@ cond_branch: */ case BPF_JMP | BPF_TAIL_CALL: ctx->seen |= SEEN_TAILCALL; - bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]); + ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]); + if (ret < 0) + return ret; break; default: From 8bbc9d822421d9ac8ff9ed26a3713c9afc69d6c8 Mon Sep 17 00:00:00 2001 From: "Naveen N. Rao" Date: Wed, 6 Oct 2021 01:55:22 +0530 Subject: [PATCH 04/19] powerpc/bpf: Fix BPF_MOD when imm == 1 Only ignore the operation if dividing by 1. Fixes: 156d0e290e969c ("powerpc/ebpf/jit: Implement JIT compiler for extended BPF") Signed-off-by: Naveen N. Rao Tested-by: Johan Almbladh Reviewed-by: Christophe Leroy Acked-by: Song Liu Acked-by: Johan Almbladh Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/c674ca18c3046885602caebb326213731c675d06.1633464148.git.naveen.n.rao@linux.vnet.ibm.com --- arch/powerpc/net/bpf_jit_comp64.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c index f06c62089b14..d67f6d62e2e1 100644 --- a/arch/powerpc/net/bpf_jit_comp64.c +++ b/arch/powerpc/net/bpf_jit_comp64.c @@ -391,8 +391,14 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context * case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */ if (imm == 0) return -EINVAL; - else if (imm == 1) - goto bpf_alu32_trunc; + if (imm == 1) { + if (BPF_OP(code) == BPF_DIV) { + goto bpf_alu32_trunc; + } else { + EMIT(PPC_RAW_LI(dst_reg, 0)); + break; + } + } PPC_LI32(b2p[TMP_REG_1], imm); switch (BPF_CLASS(code)) { From 5855c4c1f415ca3ba1046e77c0b3d3dfc96c9025 Mon Sep 17 00:00:00 2001 From: "Naveen N. Rao" Date: Wed, 6 Oct 2021 01:55:23 +0530 Subject: [PATCH 05/19] powerpc/bpf: Fix BPF_SUB when imm == 0x80000000 We aren't handling subtraction involving an immediate value of 0x80000000 properly. Fix the same. Fixes: 156d0e290e969c ("powerpc/ebpf/jit: Implement JIT compiler for extended BPF") Signed-off-by: Naveen N. Rao Reviewed-by: Christophe Leroy [mpe: Fold in fix from Naveen to use imm <= 32768] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/fc4b1276eb10761fd7ce0814c8dd089da2815251.1633464148.git.naveen.n.rao@linux.vnet.ibm.com --- arch/powerpc/net/bpf_jit_comp64.c | 27 +++++++++++++++++---------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c index d67f6d62e2e1..2ea1c3f6e287 100644 --- a/arch/powerpc/net/bpf_jit_comp64.c +++ b/arch/powerpc/net/bpf_jit_comp64.c @@ -330,18 +330,25 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context * EMIT(PPC_RAW_SUB(dst_reg, dst_reg, src_reg)); goto bpf_alu32_trunc; case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */ - case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */ case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */ + if (!imm) { + goto bpf_alu32_trunc; + } else if (imm >= -32768 && imm < 32768) { + EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm))); + } else { + PPC_LI32(b2p[TMP_REG_1], imm); + EMIT(PPC_RAW_ADD(dst_reg, dst_reg, b2p[TMP_REG_1])); + } + goto bpf_alu32_trunc; + case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */ case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */ - if (BPF_OP(code) == BPF_SUB) - imm = -imm; - if (imm) { - if (imm >= -32768 && imm < 32768) - EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm))); - else { - PPC_LI32(b2p[TMP_REG_1], imm); - EMIT(PPC_RAW_ADD(dst_reg, dst_reg, b2p[TMP_REG_1])); - } + if (!imm) { + goto bpf_alu32_trunc; + } else if (imm > -32768 && imm <= 32768) { + EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(-imm))); + } else { + PPC_LI32(b2p[TMP_REG_1], imm); + EMIT(PPC_RAW_SUB(dst_reg, dst_reg, b2p[TMP_REG_1])); } goto bpf_alu32_trunc; case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */ From 030905920f32e91a52794937f67434ac0b3ea41a Mon Sep 17 00:00:00 2001 From: "Naveen N. Rao" Date: Wed, 6 Oct 2021 01:55:24 +0530 Subject: [PATCH 06/19] powerpc/security: Add a helper to query stf_barrier type Add a helper to return the stf_barrier type for the current processor. Signed-off-by: Naveen N. Rao Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/3bd5d7f96ea1547991ac2ce3137dc2b220bae285.1633464148.git.naveen.n.rao@linux.vnet.ibm.com --- arch/powerpc/include/asm/security_features.h | 5 +++++ arch/powerpc/kernel/security.c | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/arch/powerpc/include/asm/security_features.h b/arch/powerpc/include/asm/security_features.h index 792eefaf230b..27574f218b37 100644 --- a/arch/powerpc/include/asm/security_features.h +++ b/arch/powerpc/include/asm/security_features.h @@ -39,6 +39,11 @@ static inline bool security_ftr_enabled(u64 feature) return !!(powerpc_security_features & feature); } +#ifdef CONFIG_PPC_BOOK3S_64 +enum stf_barrier_type stf_barrier_type_get(void); +#else +static inline enum stf_barrier_type stf_barrier_type_get(void) { return STF_BARRIER_NONE; } +#endif // Features indicating support for Spectre/Meltdown mitigations diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c index 1a998490fe60..15fb5ea1b9ea 100644 --- a/arch/powerpc/kernel/security.c +++ b/arch/powerpc/kernel/security.c @@ -263,6 +263,11 @@ static int __init handle_no_stf_barrier(char *p) early_param("no_stf_barrier", handle_no_stf_barrier); +enum stf_barrier_type stf_barrier_type_get(void) +{ + return stf_enabled_flush_types; +} + /* This is the generic flag used by other architectures */ static int __init handle_ssbd(char *p) { From b7540d62509453263604a155bf2d5f0ed450cba2 Mon Sep 17 00:00:00 2001 From: "Naveen N. Rao" Date: Wed, 6 Oct 2021 01:55:25 +0530 Subject: [PATCH 07/19] powerpc/bpf: Emit stf barrier instruction sequences for BPF_NOSPEC Emit similar instruction sequences to commit a048a07d7f4535 ("powerpc/64s: Add support for a store forwarding barrier at kernel entry/exit") when encountering BPF_NOSPEC. Mitigations are enabled depending on what the firmware advertises. In particular, we do not gate these mitigations based on current settings, just like in x86. Due to this, we don't need to take any action if mitigations are enabled or disabled at runtime. Signed-off-by: Naveen N. Rao Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/956570cbc191cd41f8274bed48ee757a86dac62a.1633464148.git.naveen.n.rao@linux.vnet.ibm.com --- arch/powerpc/net/bpf_jit64.h | 8 ++--- arch/powerpc/net/bpf_jit_comp64.c | 55 ++++++++++++++++++++++++++++--- 2 files changed, 55 insertions(+), 8 deletions(-) diff --git a/arch/powerpc/net/bpf_jit64.h b/arch/powerpc/net/bpf_jit64.h index 7b713edfa7e2..b63b35e45e55 100644 --- a/arch/powerpc/net/bpf_jit64.h +++ b/arch/powerpc/net/bpf_jit64.h @@ -16,18 +16,18 @@ * with our redzone usage. * * [ prev sp ] <------------- - * [ nv gpr save area ] 6*8 | + * [ nv gpr save area ] 5*8 | * [ tail_call_cnt ] 8 | - * [ local_tmp_var ] 8 | + * [ local_tmp_var ] 16 | * fp (r31) --> [ ebpf stack space ] upto 512 | * [ frame header ] 32/112 | * sp (r1) ---> [ stack pointer ] -------------- */ /* for gpr non volatile registers BPG_REG_6 to 10 */ -#define BPF_PPC_STACK_SAVE (6*8) +#define BPF_PPC_STACK_SAVE (5*8) /* for bpf JIT code internal usage */ -#define BPF_PPC_STACK_LOCALS 16 +#define BPF_PPC_STACK_LOCALS 24 /* stack frame excluding BPF stack, ensure this is quadword aligned */ #define BPF_PPC_STACKFRAME (STACK_FRAME_MIN_SIZE + \ BPF_PPC_STACK_LOCALS + BPF_PPC_STACK_SAVE) diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c index 2ea1c3f6e287..8b5157ccfeba 100644 --- a/arch/powerpc/net/bpf_jit_comp64.c +++ b/arch/powerpc/net/bpf_jit_comp64.c @@ -15,6 +15,7 @@ #include #include #include +#include #include "bpf_jit64.h" @@ -35,9 +36,9 @@ static inline bool bpf_has_stack_frame(struct codegen_context *ctx) * [ prev sp ] <------------- * [ ... ] | * sp (r1) ---> [ stack pointer ] -------------- - * [ nv gpr save area ] 6*8 + * [ nv gpr save area ] 5*8 * [ tail_call_cnt ] 8 - * [ local_tmp_var ] 8 + * [ local_tmp_var ] 16 * [ unused red zone ] 208 bytes protected */ static int bpf_jit_stack_local(struct codegen_context *ctx) @@ -45,12 +46,12 @@ static int bpf_jit_stack_local(struct codegen_context *ctx) if (bpf_has_stack_frame(ctx)) return STACK_FRAME_MIN_SIZE + ctx->stack_size; else - return -(BPF_PPC_STACK_SAVE + 16); + return -(BPF_PPC_STACK_SAVE + 24); } static int bpf_jit_stack_tailcallcnt(struct codegen_context *ctx) { - return bpf_jit_stack_local(ctx) + 8; + return bpf_jit_stack_local(ctx) + 16; } static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg) @@ -272,10 +273,33 @@ static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 o return 0; } +/* + * We spill into the redzone always, even if the bpf program has its own stackframe. + * Offsets hardcoded based on BPF_PPC_STACK_SAVE -- see bpf_jit_stack_local() + */ +void bpf_stf_barrier(void); + +asm ( +" .global bpf_stf_barrier ;" +" bpf_stf_barrier: ;" +" std 21,-64(1) ;" +" std 22,-56(1) ;" +" sync ;" +" ld 21,-64(1) ;" +" ld 22,-56(1) ;" +" ori 31,31,0 ;" +" .rept 14 ;" +" b 1f ;" +" 1: ;" +" .endr ;" +" blr ;" +); + /* Assemble the body code between the prologue & epilogue */ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx, u32 *addrs, bool extra_pass) { + enum stf_barrier_type stf_barrier = stf_barrier_type_get(); const struct bpf_insn *insn = fp->insnsi; int flen = fp->len; int i, ret; @@ -646,6 +670,29 @@ emit_clear: * BPF_ST NOSPEC (speculation barrier) */ case BPF_ST | BPF_NOSPEC: + if (!security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) || + !security_ftr_enabled(SEC_FTR_STF_BARRIER)) + break; + + switch (stf_barrier) { + case STF_BARRIER_EIEIO: + EMIT(PPC_RAW_EIEIO() | 0x02000000); + break; + case STF_BARRIER_SYNC_ORI: + EMIT(PPC_RAW_SYNC()); + EMIT(PPC_RAW_LD(b2p[TMP_REG_1], _R13, 0)); + EMIT(PPC_RAW_ORI(_R31, _R31, 0)); + break; + case STF_BARRIER_FALLBACK: + EMIT(PPC_RAW_MFLR(b2p[TMP_REG_1])); + PPC_LI64(12, dereference_kernel_function_descriptor(bpf_stf_barrier)); + EMIT(PPC_RAW_MTCTR(12)); + EMIT(PPC_RAW_BCTRL()); + EMIT(PPC_RAW_MTLR(b2p[TMP_REG_1])); + break; + case STF_BARRIER_NONE: + break; + } break; /* From c9b8da77f22d28348d1f89a6c4d3fec102e9b1c4 Mon Sep 17 00:00:00 2001 From: "Naveen N. Rao" Date: Wed, 6 Oct 2021 01:55:26 +0530 Subject: [PATCH 08/19] powerpc/bpf ppc32: Fix ALU32 BPF_ARSH operation Correct the destination register used for ALU32 BPF_ARSH operation. Fixes: 51c66ad849a703 ("powerpc/bpf: Implement extended BPF on PPC32") Signed-off-by: Naveen N. Rao Reviewed-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/6d24c1f9e79b6f61f5135eaf2ea1e8bcd4dac87b.1633464148.git.naveen.n.rao@linux.vnet.ibm.com --- arch/powerpc/net/bpf_jit_comp32.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/net/bpf_jit_comp32.c b/arch/powerpc/net/bpf_jit_comp32.c index a74d52204f8d..519ecb9ab672 100644 --- a/arch/powerpc/net/bpf_jit_comp32.c +++ b/arch/powerpc/net/bpf_jit_comp32.c @@ -625,7 +625,7 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context * EMIT(PPC_RAW_LI(dst_reg_h, 0)); break; case BPF_ALU | BPF_ARSH | BPF_X: /* (s32) dst >>= src */ - EMIT(PPC_RAW_SRAW(dst_reg_h, dst_reg, src_reg)); + EMIT(PPC_RAW_SRAW(dst_reg, dst_reg, src_reg)); break; case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */ bpf_set_seen_register(ctx, tmp_reg); From e8278d44443207bb6609c7b064073f353e6f4978 Mon Sep 17 00:00:00 2001 From: "Naveen N. Rao" Date: Wed, 6 Oct 2021 01:55:27 +0530 Subject: [PATCH 09/19] powerpc/bpf ppc32: Fix JMP32_JSET_K 'andi' only takes an unsigned 16-bit value. Correct the imm range used when emitting andi. Fixes: 51c66ad849a703 ("powerpc/bpf: Implement extended BPF on PPC32") Signed-off-by: Naveen N. Rao Reviewed-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/b94489f52831305ec15aca4dd04a3527236be7e8.1633464148.git.naveen.n.rao@linux.vnet.ibm.com --- arch/powerpc/net/bpf_jit_comp32.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/net/bpf_jit_comp32.c b/arch/powerpc/net/bpf_jit_comp32.c index 519ecb9ab672..7c65de9ed4fa 100644 --- a/arch/powerpc/net/bpf_jit_comp32.c +++ b/arch/powerpc/net/bpf_jit_comp32.c @@ -1075,7 +1075,7 @@ cond_branch: break; case BPF_JMP32 | BPF_JSET | BPF_K: /* andi does not sign-extend the immediate */ - if (imm >= -32768 && imm < 32768) { + if (imm >= 0 && imm < 32768) { /* PPC_ANDI is _only/always_ dot-form */ EMIT(PPC_RAW_ANDI(_R0, dst_reg, imm)); } else { From 48164fccdff6d5cc11308126c050bd25a329df25 Mon Sep 17 00:00:00 2001 From: "Naveen N. Rao" Date: Wed, 6 Oct 2021 01:55:28 +0530 Subject: [PATCH 10/19] powerpc/bpf ppc32: Do not emit zero extend instruction for 64-bit BPF_END Suppress emitting zero extend instruction for 64-bit BPF_END_FROM_[L|B]E operation. Fixes: 51c66ad849a703 ("powerpc/bpf: Implement extended BPF on PPC32") Signed-off-by: Naveen N. Rao Reviewed-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/b4e3c3546121315a8e2059b19a1bda84971816e4.1633464148.git.naveen.n.rao@linux.vnet.ibm.com --- arch/powerpc/net/bpf_jit_comp32.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/net/bpf_jit_comp32.c b/arch/powerpc/net/bpf_jit_comp32.c index 7c65de9ed4fa..68dc8a8231de 100644 --- a/arch/powerpc/net/bpf_jit_comp32.c +++ b/arch/powerpc/net/bpf_jit_comp32.c @@ -1107,7 +1107,7 @@ cond_branch: return -EOPNOTSUPP; } if (BPF_CLASS(code) == BPF_ALU && !fp->aux->verifier_zext && - !insn_is_zext(&insn[i + 1])) + !insn_is_zext(&insn[i + 1]) && !(BPF_OP(code) == BPF_END && imm == 64)) EMIT(PPC_RAW_LI(dst_reg_h, 0)); } From 548b762763b885b81850db676258df47c55dd5f9 Mon Sep 17 00:00:00 2001 From: "Naveen N. Rao" Date: Wed, 6 Oct 2021 01:55:29 +0530 Subject: [PATCH 11/19] powerpc/bpf ppc32: Fix BPF_SUB when imm == 0x80000000 Special case handling of the smallest 32-bit negative number for BPF_SUB. Fixes: 51c66ad849a703 ("powerpc/bpf: Implement extended BPF on PPC32") Signed-off-by: Naveen N. Rao Reviewed-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/7135360a0cdf70adedbccf9863128b8daef18764.1633464148.git.naveen.n.rao@linux.vnet.ibm.com --- arch/powerpc/net/bpf_jit_comp32.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/net/bpf_jit_comp32.c b/arch/powerpc/net/bpf_jit_comp32.c index 68dc8a8231de..0da31d41d413 100644 --- a/arch/powerpc/net/bpf_jit_comp32.c +++ b/arch/powerpc/net/bpf_jit_comp32.c @@ -357,7 +357,7 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context * PPC_LI32(_R0, imm); EMIT(PPC_RAW_ADDC(dst_reg, dst_reg, _R0)); } - if (imm >= 0) + if (imm >= 0 || (BPF_OP(code) == BPF_SUB && imm == 0x80000000)) EMIT(PPC_RAW_ADDZE(dst_reg_h, dst_reg_h)); else EMIT(PPC_RAW_ADDME(dst_reg_h, dst_reg_h)); From 3e607dc4df180b72a38e75030cb0f94d12808712 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Tue, 5 Oct 2021 00:56:38 +1000 Subject: [PATCH 12/19] powerpc/64s: fix program check interrupt emergency stack path Emergency stack path was jumping into a 3: label inside the __GEN_COMMON_BODY macro for the normal path after it had finished, rather than jumping over it. By a small miracle this is the correct place to build up a new interrupt frame with the existing stack pointer, so things basically worked okay with an added weird looking 700 trap frame on top (which had the wrong ->nip so it didn't decode bug messages either). Fix this by avoiding using numeric labels when jumping over non-trivial macros. Before: LE PAGE_SIZE=64K MMU=Radix SMP NR_CPUS=2048 NUMA PowerNV Modules linked in: CPU: 0 PID: 88 Comm: sh Not tainted 5.15.0-rc2-00034-ge057cdade6e5 #2637 NIP: 7265677368657265 LR: c00000000006c0c8 CTR: c0000000000097f0 REGS: c0000000fffb3a50 TRAP: 0700 Not tainted MSR: 9000000000021031 CR: 00000700 XER: 20040000 CFAR: c0000000000098b0 IRQMASK: 0 GPR00: c00000000006c964 c0000000fffb3cf0 c000000001513800 0000000000000000 GPR04: 0000000048ab0778 0000000042000000 0000000000000000 0000000000001299 GPR08: 000001e447c718ec 0000000022424282 0000000000002710 c00000000006bee8 GPR12: 9000000000009033 c0000000016b0000 00000000000000b0 0000000000000001 GPR16: 0000000000000000 0000000000000002 0000000000000000 0000000000000ff8 GPR20: 0000000000001fff 0000000000000007 0000000000000080 00007fff89d90158 GPR24: 0000000002000000 0000000002000000 0000000000000255 0000000000000300 GPR28: c000000001270000 0000000042000000 0000000048ab0778 c000000080647e80 NIP [7265677368657265] 0x7265677368657265 LR [c00000000006c0c8] ___do_page_fault+0x3f8/0xb10 Call Trace: [c0000000fffb3cf0] [c00000000000bdac] soft_nmi_common+0x13c/0x1d0 (unreliable) --- interrupt: 700 at decrementer_common_virt+0xb8/0x230 NIP: c0000000000098b8 LR: c00000000006c0c8 CTR: c0000000000097f0 REGS: c0000000fffb3d60 TRAP: 0700 Not tainted MSR: 9000000000021031 CR: 22424282 XER: 20040000 CFAR: c0000000000098b0 IRQMASK: 0 GPR00: c00000000006c964 0000000000002400 c000000001513800 0000000000000000 GPR04: 0000000048ab0778 0000000042000000 0000000000000000 0000000000001299 GPR08: 000001e447c718ec 0000000022424282 0000000000002710 c00000000006bee8 GPR12: 9000000000009033 c0000000016b0000 00000000000000b0 0000000000000001 GPR16: 0000000000000000 0000000000000002 0000000000000000 0000000000000ff8 GPR20: 0000000000001fff 0000000000000007 0000000000000080 00007fff89d90158 GPR24: 0000000002000000 0000000002000000 0000000000000255 0000000000000300 GPR28: c000000001270000 0000000042000000 0000000048ab0778 c000000080647e80 NIP [c0000000000098b8] decrementer_common_virt+0xb8/0x230 LR [c00000000006c0c8] ___do_page_fault+0x3f8/0xb10 --- interrupt: 700 Instruction dump: XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX ---[ end trace 6d28218e0cc3c949 ]--- After: ------------[ cut here ]------------ kernel BUG at arch/powerpc/kernel/exceptions-64s.S:491! Oops: Exception in kernel mode, sig: 5 [#1] LE PAGE_SIZE=64K MMU=Radix SMP NR_CPUS=2048 NUMA PowerNV Modules linked in: CPU: 0 PID: 88 Comm: login Not tainted 5.15.0-rc2-00034-ge057cdade6e5-dirty #2638 NIP: c0000000000098b8 LR: c00000000006bf04 CTR: c0000000000097f0 REGS: c0000000fffb3d60 TRAP: 0700 Not tainted MSR: 9000000000021031 CR: 24482227 XER: 00040000 CFAR: c0000000000098b0 IRQMASK: 0 GPR00: c00000000006bf04 0000000000002400 c000000001513800 c000000001271868 GPR04: 00000000100f0d29 0000000042000000 0000000000000007 0000000000000009 GPR08: 00000000100f0d29 0000000024482227 0000000000002710 c000000000181b3c GPR12: 9000000000009033 c0000000016b0000 00000000100f0d29 c000000005b22f00 GPR16: 00000000ffff0000 0000000000000001 0000000000000009 00000000100eed90 GPR20: 00000000100eed90 0000000010000000 000000001000a49c 00000000100f1430 GPR24: c000000001271868 0000000002000000 0000000000000215 0000000000000300 GPR28: c000000001271800 0000000042000000 00000000100f0d29 c000000080647860 NIP [c0000000000098b8] decrementer_common_virt+0xb8/0x230 LR [c00000000006bf04] ___do_page_fault+0x234/0xb10 Call Trace: Instruction dump: 4182000c 39400001 48000008 894d0932 714a0001 39400008 408225fc 718a4000 7c2a0b78 3821fcf0 41c20008 e82d0910 <0981fcf0> f92101a0 f9610170 f9810178 ---[ end trace a5dbd1f5ea4ccc51 ]--- Fixes: 0a882e28468f4 ("powerpc/64s/exception: remove bad stack branch") Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20211004145642.1331214-2-npiggin@gmail.com --- arch/powerpc/kernel/exceptions-64s.S | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 37859e62a8dc..024d9231f88c 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -1665,27 +1665,30 @@ EXC_COMMON_BEGIN(program_check_common) */ andi. r10,r12,MSR_PR - bne 2f /* If userspace, go normal path */ + bne .Lnormal_stack /* If userspace, go normal path */ andis. r10,r12,(SRR1_PROGTM)@h - bne 1f /* If TM, emergency */ + bne .Lemergency_stack /* If TM, emergency */ cmpdi r1,-INT_FRAME_SIZE /* check if r1 is in userspace */ - blt 2f /* normal path if not */ + blt .Lnormal_stack /* normal path if not */ /* Use the emergency stack */ -1: andi. r10,r12,MSR_PR /* Set CR0 correctly for label */ +.Lemergency_stack: + andi. r10,r12,MSR_PR /* Set CR0 correctly for label */ /* 3 in EXCEPTION_PROLOG_COMMON */ mr r10,r1 /* Save r1 */ ld r1,PACAEMERGSP(r13) /* Use emergency stack */ subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */ __ISTACK(program_check)=0 __GEN_COMMON_BODY program_check - b 3f -2: + b .Ldo_program_check + +.Lnormal_stack: __ISTACK(program_check)=1 __GEN_COMMON_BODY program_check -3: + +.Ldo_program_check: addi r3,r1,STACK_FRAME_OVERHEAD bl program_check_exception REST_NVGPRS(r1) /* instruction emulation may change GPRs */ From d0afd44c05f8f4e4c91487c02d43c87a31552462 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Tue, 5 Oct 2021 00:56:39 +1000 Subject: [PATCH 13/19] powerpc/traps: do not enable irqs in _exception _exception can be called by machine check handlers when the MCE hits user code (e.g., pseries and powernv). This will enable local irqs because, which is a dicey thing to do in NMI or hard irq context. This seemed to worked out okay because a userspace MCE can basically be treated like a synchronous interrupt (after async / imprecise MCEs are filtered out). Since NMI and hard irq handlers have started growing nmi_enter / irq_enter, and more irq state sanity checks, this has started to cause problems (or at least trigger warnings). The Fixes tag to the commit which introduced this rather than try to work out exactly which commit was the first that could possibly cause a problem because that may be difficult to prove. Fixes: 9f2f79e3a3c1 ("powerpc: Disable interrupts in 64-bit kernel FP and vector faults") Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20211004145642.1331214-3-npiggin@gmail.com --- arch/powerpc/kernel/traps.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index aac8c0412ff9..e453b666613b 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -340,10 +340,16 @@ static bool exception_common(int signr, struct pt_regs *regs, int code, return false; } - show_signal_msg(signr, regs, code, addr); + /* + * Must not enable interrupts even for user-mode exception, because + * this can be called from machine check, which may be a NMI or IRQ + * which don't like interrupts being enabled. Could check for + * in_hardirq || in_nmi perhaps, but there doesn't seem to be a good + * reason why _exception() should enable irqs for an exception handler, + * the handlers themselves do that directly. + */ - if (arch_irqs_disabled()) - interrupt_cond_local_irq_enable(regs); + show_signal_msg(signr, regs, code, addr); current->thread.trap_nr = code; From ff058a8ada5df0d84e5537cfaf89d06d71501580 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Tue, 5 Oct 2021 00:56:40 +1000 Subject: [PATCH 14/19] powerpc/64: warn if local irqs are enabled in NMI or hardirq context This can help catch bugs such as the one fixed by the previous change to prevent _exception() from enabling irqs. ppc32 could have a similar warning but it has no good config option to debug this stuff (the test may be overkill to add for production kernels). Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20211004145642.1331214-4-npiggin@gmail.com --- arch/powerpc/kernel/irq.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 551b653228c4..c4f1d6b7d992 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -229,6 +229,9 @@ notrace void arch_local_irq_restore(unsigned long mask) return; } + if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) + WARN_ON_ONCE(in_nmi() || in_hardirq()); + /* * After the stb, interrupts are unmasked and there are no interrupts * pending replay. The restart sequence makes this atomic with @@ -321,6 +324,9 @@ notrace void arch_local_irq_restore(unsigned long mask) if (mask) return; + if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) + WARN_ON_ONCE(in_nmi() || in_hardirq()); + /* * From this point onward, we can take interrupts, preempt, * etc... unless we got hard-disabled. We check if an event From 768c47010392ece9766a56479b4e0cf04a536916 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Tue, 5 Oct 2021 00:56:41 +1000 Subject: [PATCH 15/19] powerpc/64/interrupt: Reconcile soft-mask state in NMI and fix false BUG If a NMI hits early in an interrupt handler before the irq soft-mask state is reconciled, that can cause a false-positive BUG with a CONFIG_PPC_IRQ_SOFT_MASK_DEBUG assertion. Remove that assertion and instead check the case that if regs->msr has EE clear, then regs->softe should be marked as disabled so the irq state looks correct to NMI handlers, the same as how it's fixed up in the case it was implicit soft-masked. This doesn't fix a known problem -- the change that was fixed by commit 4ec5feec1ad02 ("powerpc/64s: Make NMI record implicitly soft-masked code as irqs disabled") was the addition of a warning in the soft-nmi watchdog interrupt which can never actually fire when MSR[EE]=0. However it may be important if NMI handlers grow more code, and it's less surprising to anything using 'regs' - (I tripped over this when working in the area). Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20211004145642.1331214-5-npiggin@gmail.com --- arch/powerpc/include/asm/interrupt.h | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/arch/powerpc/include/asm/interrupt.h b/arch/powerpc/include/asm/interrupt.h index 6b800d3e2681..b894b7169706 100644 --- a/arch/powerpc/include/asm/interrupt.h +++ b/arch/powerpc/include/asm/interrupt.h @@ -265,13 +265,16 @@ static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct inte local_paca->irq_soft_mask = IRQS_ALL_DISABLED; local_paca->irq_happened |= PACA_IRQ_HARD_DIS; - if (is_implicit_soft_masked(regs)) { - // Adjust regs->softe soft implicit soft-mask, so - // arch_irq_disabled_regs(regs) behaves as expected. + if (!(regs->msr & MSR_EE) || is_implicit_soft_masked(regs)) { + /* + * Adjust regs->softe to be soft-masked if it had not been + * reconcied (e.g., interrupt entry with MSR[EE]=0 but softe + * not yet set disabled), or if it was in an implicit soft + * masked state. This makes arch_irq_disabled_regs(regs) + * behave as expected. + */ regs->softe = IRQS_ALL_DISABLED; } - if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) - BUG_ON(!arch_irq_disabled_regs(regs) && !(regs->msr & MSR_EE)); /* Don't do any per-CPU operations until interrupt state is fixed */ From f08fb25bc66986b0952724530a640d9970fa52c1 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Tue, 5 Oct 2021 00:56:42 +1000 Subject: [PATCH 16/19] powerpc/64s: Fix unrecoverable MCE calling async handler from NMI MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The machine check handler is not considered NMI on 64s. The early handler is the true NMI handler, and then it schedules the machine_check_exception handler to run when interrupts are enabled. This works fine except the case of an unrecoverable MCE, where the true NMI is taken when MSR[RI] is clear, it can not recover, so it calls machine_check_exception directly so something might be done about it. Calling an async handler from NMI context can result in irq state and other things getting corrupted. This can also trigger the BUG at arch/powerpc/include/asm/interrupt.h:168 BUG_ON(!arch_irq_disabled_regs(regs) && !(regs->msr & MSR_EE)); Fix this by making an _async version of the handler which is called in the normal case, and a NMI version that is called for unrecoverable interrupts. Fixes: 2b43dd7653cc ("powerpc/64: enable MSR[EE] in irq replay pt_regs") Signed-off-by: Nicholas Piggin Tested-by: Cédric Le Goater Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20211004145642.1331214-6-npiggin@gmail.com --- arch/powerpc/include/asm/interrupt.h | 5 ++--- arch/powerpc/kernel/exceptions-64s.S | 8 +++++-- arch/powerpc/kernel/traps.c | 31 ++++++++++++++++------------ 3 files changed, 26 insertions(+), 18 deletions(-) diff --git a/arch/powerpc/include/asm/interrupt.h b/arch/powerpc/include/asm/interrupt.h index b894b7169706..a1d238255f07 100644 --- a/arch/powerpc/include/asm/interrupt.h +++ b/arch/powerpc/include/asm/interrupt.h @@ -528,10 +528,9 @@ static __always_inline long ____##func(struct pt_regs *regs) /* kernel/traps.c */ DECLARE_INTERRUPT_HANDLER_NMI(system_reset_exception); #ifdef CONFIG_PPC_BOOK3S_64 -DECLARE_INTERRUPT_HANDLER_ASYNC(machine_check_exception); -#else -DECLARE_INTERRUPT_HANDLER_NMI(machine_check_exception); +DECLARE_INTERRUPT_HANDLER_ASYNC(machine_check_exception_async); #endif +DECLARE_INTERRUPT_HANDLER_NMI(machine_check_exception); DECLARE_INTERRUPT_HANDLER(SMIException); DECLARE_INTERRUPT_HANDLER(handle_hmi_exception); DECLARE_INTERRUPT_HANDLER(unknown_exception); diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 024d9231f88c..eaf1f72131a1 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -1243,7 +1243,7 @@ EXC_COMMON_BEGIN(machine_check_common) li r10,MSR_RI mtmsrd r10,1 addi r3,r1,STACK_FRAME_OVERHEAD - bl machine_check_exception + bl machine_check_exception_async b interrupt_return_srr @@ -1303,7 +1303,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) subi r12,r12,1 sth r12,PACA_IN_MCE(r13) - /* Invoke machine_check_exception to print MCE event and panic. */ + /* + * Invoke machine_check_exception to print MCE event and panic. + * This is the NMI version of the handler because we are called from + * the early handler which is a true NMI. + */ addi r3,r1,STACK_FRAME_OVERHEAD bl machine_check_exception diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index e453b666613b..11741703d26e 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -796,24 +796,22 @@ void die_mce(const char *str, struct pt_regs *regs, long err) * do_exit() checks for in_interrupt() and panics in that case, so * exit the irq/nmi before calling die. */ - if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) - irq_exit(); - else + if (in_nmi()) nmi_exit(); + else + irq_exit(); die(str, regs, err); } /* - * BOOK3S_64 does not call this handler as a non-maskable interrupt + * BOOK3S_64 does not usually call this handler as a non-maskable interrupt * (it uses its own early real-mode handler to handle the MCE proper * and then raises irq_work to call this handler when interrupts are - * enabled). + * enabled). The only time when this is not true is if the early handler + * is unrecoverable, then it does call this directly to try to get a + * message out. */ -#ifdef CONFIG_PPC_BOOK3S_64 -DEFINE_INTERRUPT_HANDLER_ASYNC(machine_check_exception) -#else -DEFINE_INTERRUPT_HANDLER_NMI(machine_check_exception) -#endif +static void __machine_check_exception(struct pt_regs *regs) { int recover = 0; @@ -847,12 +845,19 @@ bail: /* Must die if the interrupt is not recoverable */ if (regs_is_unrecoverable(regs)) die_mce("Unrecoverable Machine check", regs, SIGBUS); +} #ifdef CONFIG_PPC_BOOK3S_64 - return; -#else - return 0; +DEFINE_INTERRUPT_HANDLER_ASYNC(machine_check_exception_async) +{ + __machine_check_exception(regs); +} #endif +DEFINE_INTERRUPT_HANDLER_NMI(machine_check_exception) +{ + __machine_check_exception(regs); + + return 0; } DEFINE_INTERRUPT_HANDLER(SMIException) /* async? */ From 5a4b0320783a19f877dd595813569b3c25f4ff81 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Le=20Goater?= Date: Thu, 30 Sep 2021 12:25:35 +0200 Subject: [PATCH 17/19] powerpc/pseries/msi: Add an empty irq_write_msi_msg() handler MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The IPR drivers tests for MSI support at probe time with MSI vector 0 and when done, frees the IRQ with free_irq(). This test was introduced by 95fecd90397e ("ipr: add test for MSI interrupt support") as an improvement of commit 5a9ef25b14d3 ("[SCSI] ipr: add MSI support") because a boot failure was reported on a Bimini PowerPC system: https://lore.kernel.org/r/1242926159.3007.5.camel@localhost.localdomain It was finally decided to remove MSI support on Bimini systems in 6eb0ac03899a ("powerpc/maple: Add a quirk to disable MSI for IPR on Bimini"). Linux 5.15-rc1 added MSI domain support to the pseries machine and when free_irq is called() in the driver, msi_domain_deactivate() also is. This resets the MSI table entry of the associate vector by calling __pci_write_msi_msg() with an empty message and breaks any further activation of the same vector. In the case of the IPR driver, it breaks the initialization sequence of the IOA. Introduce an empty irq_write_msi_msg() handler in the MSI domain of the pseries machine to avoid clearing the MSI vector entry. Updating the entry is not strictly necessary since it is initialized by the underlying hypervisor, PowerVM or QEMU/KVM. Fixes: a5f3d2c17b07 ("powerpc/pseries/pci: Add MSI domains") Signed-off-by: Cédric Le Goater Reported-by: Abdul Haleem Tested-by: Mahesh Salgaonkar [mpe: Tweak comment wording and formatting slightly] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20210930102535.1047230-1-clg@kaod.org --- arch/powerpc/platforms/pseries/msi.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c index 1b305e411862..8627362f613e 100644 --- a/arch/powerpc/platforms/pseries/msi.c +++ b/arch/powerpc/platforms/pseries/msi.c @@ -507,12 +507,27 @@ static void pseries_msi_unmask(struct irq_data *d) irq_chip_unmask_parent(d); } +static void pseries_msi_write_msg(struct irq_data *data, struct msi_msg *msg) +{ + struct msi_desc *entry = irq_data_get_msi_desc(data); + + /* + * Do not update the MSIx vector table. It's not strictly necessary + * because the table is initialized by the underlying hypervisor, PowerVM + * or QEMU/KVM. However, if the MSIx vector entry is cleared, any further + * activation will fail. This can happen in some drivers (eg. IPR) which + * deactivate an IRQ used for testing MSI support. + */ + entry->msg = *msg; +} + static struct irq_chip pseries_pci_msi_irq_chip = { .name = "pSeries-PCI-MSI", .irq_shutdown = pseries_msi_shutdown, .irq_mask = pseries_msi_mask, .irq_unmask = pseries_msi_unmask, .irq_eoi = irq_chip_eoi_parent, + .irq_write_msi_msg = pseries_msi_write_msg, }; static struct msi_domain_info pseries_msi_domain_info = { From d93f9e23744b7bf11a98b2ddb091d129482ae179 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Wed, 15 Sep 2021 16:12:24 +0200 Subject: [PATCH 18/19] powerpc/32s: Fix kuap_kernel_restore() At interrupt exit, kuap_kernel_restore() calls kuap_unlock() with the value contained in regs->kuap. However, when regs->kuap contains 0xffffffff it means that KUAP was not unlocked so calling kuap_unlock() is unrelevant and results in jeopardising the contents of kernel space segment registers. So check that regs->kuap doesn't contain KUAP_NONE before calling kuap_unlock(). In the meantime it also means that if KUAP has not been correcly locked back at interrupt exit, it must be locked before continuing. This is done by checking the content of current->thread.kuap which was returned by kuap_get_and_assert_locked() Fixes: 16132529cee5 ("powerpc/32s: Rework Kernel Userspace Access Protection") Reported-by: Stan Johnson Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/0d0c4d0f050a637052287c09ba521bad960a2790.1631715131.git.christophe.leroy@csgroup.eu --- arch/powerpc/include/asm/book3s/32/kup.h | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/arch/powerpc/include/asm/book3s/32/kup.h b/arch/powerpc/include/asm/book3s/32/kup.h index d4b145b279f6..9f38040f0641 100644 --- a/arch/powerpc/include/asm/book3s/32/kup.h +++ b/arch/powerpc/include/asm/book3s/32/kup.h @@ -136,6 +136,14 @@ static inline void kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap) if (kuap_is_disabled()) return; + if (unlikely(kuap != KUAP_NONE)) { + current->thread.kuap = KUAP_NONE; + kuap_lock(kuap, false); + } + + if (likely(regs->kuap == KUAP_NONE)) + return; + current->thread.kuap = regs->kuap; kuap_unlock(regs->kuap, false); From eb8257a12192f43ffd41bd90932c39dade958042 Mon Sep 17 00:00:00 2001 From: Mahesh Salgaonkar Date: Mon, 20 Sep 2021 22:03:26 +0530 Subject: [PATCH 19/19] pseries/eeh: Fix the kdump kernel crash during eeh_pseries_init On pseries LPAR when an empty slot is assigned to partition OR in single LPAR mode, kdump kernel crashes during issuing PHB reset. In the kdump scenario, we traverse all PHBs and issue reset using the pe_config_addr of the first child device present under each PHB. However the code assumes that none of the PHB slots can be empty and uses list_first_entry() to get the first child device under the PHB. Since list_first_entry() expects the list to be non-empty, it returns an invalid pci_dn entry and ends up accessing NULL phb pointer under pci_dn->phb causing kdump kernel crash. This patch fixes the below kdump kernel crash by skipping empty slots: audit: initializing netlink subsys (disabled) thermal_sys: Registered thermal governor 'fair_share' thermal_sys: Registered thermal governor 'step_wise' cpuidle: using governor menu pstore: Registered nvram as persistent store backend Issue PHB reset ... audit: type=2000 audit(1631267818.000:1): state=initialized audit_enabled=0 res=1 BUG: Kernel NULL pointer dereference on read at 0x00000268 Faulting instruction address: 0xc000000008101fb0 Oops: Kernel access of bad area, sig: 7 [#1] LE PAGE_SIZE=64K MMU=Radix SMP NR_CPUS=2048 NUMA pSeries Modules linked in: CPU: 7 PID: 1 Comm: swapper/7 Not tainted 5.14.0 #1 NIP: c000000008101fb0 LR: c000000009284ccc CTR: c000000008029d70 REGS: c00000001161b840 TRAP: 0300 Not tainted (5.14.0) MSR: 8000000002009033 CR: 28000224 XER: 20040002 CFAR: c000000008101f0c DAR: 0000000000000268 DSISR: 00080000 IRQMASK: 0 ... NIP pseries_eeh_get_pe_config_addr+0x100/0x1b0 LR __machine_initcall_pseries_eeh_pseries_init+0x2cc/0x350 Call Trace: 0xc00000001161bb80 (unreliable) __machine_initcall_pseries_eeh_pseries_init+0x2cc/0x350 do_one_initcall+0x60/0x2d0 kernel_init_freeable+0x350/0x3f8 kernel_init+0x3c/0x17c ret_from_kernel_thread+0x5c/0x64 Fixes: 5a090f7c363fd ("powerpc/pseries: PCIE PHB reset") Signed-off-by: Mahesh Salgaonkar [mpe: Tweak wording and trim oops] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/163215558252.413351.8600189949820258982.stgit@jupiter --- arch/powerpc/platforms/pseries/eeh_pseries.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c index bc15200852b7..09fafcf2d3a0 100644 --- a/arch/powerpc/platforms/pseries/eeh_pseries.c +++ b/arch/powerpc/platforms/pseries/eeh_pseries.c @@ -867,6 +867,10 @@ static int __init eeh_pseries_init(void) if (is_kdump_kernel() || reset_devices) { pr_info("Issue PHB reset ...\n"); list_for_each_entry(phb, &hose_list, list_node) { + // Skip if the slot is empty + if (list_empty(&PCI_DN(phb->dn)->child_list)) + continue; + pdn = list_first_entry(&PCI_DN(phb->dn)->child_list, struct pci_dn, list); config_addr = pseries_eeh_get_pe_config_addr(pdn);