mirror of
https://github.com/torvalds/linux.git
synced 2024-11-25 05:32:00 +00:00
powerpc fixes for 5.15 #3
Fix a regression hit by the IPR SCSI driver, introduced by the recent addition of MSI domains on pseries. A big series including 8 BPF fixes, some with potential security impact and the rest various code generation issues. Fix our program check assembler entry path, which was accidentally jumping into a gas macro and generating strange stack frames, which could confuse find_bug(). A couple of fixes, and related changes, to fix corner cases in our machine check handling. Fix our DMA IOMMU ops, which were not always returning the optimal DMA mask, leading to at least one device falling back to 32-bit DMA when it shouldn't. A fix for KUAP handling on 32-bit Book3S. Fix crashes seen when kdumping on some pseries systems. Thanks to: Naveen N. Rao, Nicholas Piggin, Alexey Kardashevskiy, Cédric Le Goater, Christophe Leroy, Mahesh Salgaonkar, Abdul Haleem, Christoph Hellwig, Johan Almbladh, Stan Johnson. -----BEGIN PGP SIGNATURE----- iQJHBAABCAAxFiEEJFGtCPCthwEv2Y/bUevqPMjhpYAFAmFi044THG1wZUBlbGxl cm1hbi5pZC5hdQAKCRBR6+o8yOGlgGtXD/0a8dy70W8lXq3k1Zz33vzp/NSnq5Cj d1lTI6hgbow5KbMgoKi3f2RC1S1qd0Kenz4zDD+rljRg8xrUCswUB+al2mrkeaBm 9s82lWwemmdB8LUbWAV/SscaJ6Pc5ODmjwGpsB36gdBgaYnmedQky4knh1m0NNCS nyY6ugQIP6GnAZ2mojKnWx84Gj8hIYs5rEqAeDhHGNvLKSt2yIQv5LzqGM423j2O 44nbMmEt8Jq6oJCSeMRf3j5rgc6RTkwnL/543faRe5BBNbuMU01oIhDrGALXY8Ip gOjyRsQYy1s+dU1rypaP/rlE3hRAUGmShCYQXA6kcESgwZc+BMSSShM/FhvVx2C7 UnFiFHyWnrP3PrhDGW+WGhrUN2kyoZZ7V6x5bggW93pyVCmA9RW62J8HU8DwzucU IBO7TBNoHsscKEw8dSwG6jBOfx30bEwD2tl2PjNaDFIk7WndMOT4MbjPTDlXj38u PbLPlY1xEghUafAdLwWL46wQMc0pLHyAaiM/Wl8nUaES73tdtMTLtg7QH/+uBc+a iS1ji1U9VOpRjtBOjQHmPtRiit9aL1UJ0PDMrJWQpKxALv139jdlhjdzexnPgNAe 8l/9bvTsCTn5j5C3m1XaUSSGM2AyawMEC0+9bJWMKuD0F8JmZfUYMSjSk0CWJFRG e+xG8TGXClfTxg== =q+1t -----END PGP SIGNATURE----- Merge tag 'powerpc-5.15-3' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux Pull powerpc fixes from Michael Ellerman: "A bit of a big batch, partly because I didn't send any last week, and also just because the BPF fixes happened to land this week. Summary: - Fix a regression hit by the IPR SCSI driver, introduced by the recent addition of MSI domains on pseries. - A big series including 8 BPF fixes, some with potential security impact and the rest various code generation issues. - Fix our program check assembler entry path, which was accidentally jumping into a gas macro and generating strange stack frames, which could confuse find_bug(). - A couple of fixes, and related changes, to fix corner cases in our machine check handling. - Fix our DMA IOMMU ops, which were not always returning the optimal DMA mask, leading to at least one device falling back to 32-bit DMA when it shouldn't. - A fix for KUAP handling on 32-bit Book3S. - Fix crashes seen when kdumping on some pseries systems. Thanks to Naveen N. Rao, Nicholas Piggin, Alexey Kardashevskiy, Cédric Le Goater, Christophe Leroy, Mahesh Salgaonkar, Abdul Haleem, Christoph Hellwig, Johan Almbladh, Stan Johnson" * tag 'powerpc-5.15-3' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: pseries/eeh: Fix the kdump kernel crash during eeh_pseries_init powerpc/32s: Fix kuap_kernel_restore() powerpc/pseries/msi: Add an empty irq_write_msi_msg() handler powerpc/64s: Fix unrecoverable MCE calling async handler from NMI powerpc/64/interrupt: Reconcile soft-mask state in NMI and fix false BUG powerpc/64: warn if local irqs are enabled in NMI or hardirq context powerpc/traps: do not enable irqs in _exception powerpc/64s: fix program check interrupt emergency stack path powerpc/bpf ppc32: Fix BPF_SUB when imm == 0x80000000 powerpc/bpf ppc32: Do not emit zero extend instruction for 64-bit BPF_END powerpc/bpf ppc32: Fix JMP32_JSET_K powerpc/bpf ppc32: Fix ALU32 BPF_ARSH operation powerpc/bpf: Emit stf barrier instruction sequences for BPF_NOSPEC powerpc/security: Add a helper to query stf_barrier type powerpc/bpf: Fix BPF_SUB when imm == 0x80000000 powerpc/bpf: Fix BPF_MOD when imm == 1 powerpc/bpf: Validate branch ranges powerpc/lib: Add helper to check if offset is within conditional branch range powerpc/iommu: Report the correct most efficient DMA mask for PCI devices
This commit is contained in:
commit
efb52a7d95
@ -136,6 +136,14 @@ static inline void kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap)
|
||||
if (kuap_is_disabled())
|
||||
return;
|
||||
|
||||
if (unlikely(kuap != KUAP_NONE)) {
|
||||
current->thread.kuap = KUAP_NONE;
|
||||
kuap_lock(kuap, false);
|
||||
}
|
||||
|
||||
if (likely(regs->kuap == KUAP_NONE))
|
||||
return;
|
||||
|
||||
current->thread.kuap = regs->kuap;
|
||||
|
||||
kuap_unlock(regs->kuap, false);
|
||||
|
@ -23,6 +23,7 @@
|
||||
#define BRANCH_ABSOLUTE 0x2
|
||||
|
||||
bool is_offset_in_branch_range(long offset);
|
||||
bool is_offset_in_cond_branch_range(long offset);
|
||||
int create_branch(struct ppc_inst *instr, const u32 *addr,
|
||||
unsigned long target, int flags);
|
||||
int create_cond_branch(struct ppc_inst *instr, const u32 *addr,
|
||||
|
@ -265,13 +265,16 @@ static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct inte
|
||||
local_paca->irq_soft_mask = IRQS_ALL_DISABLED;
|
||||
local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
|
||||
|
||||
if (is_implicit_soft_masked(regs)) {
|
||||
// Adjust regs->softe soft implicit soft-mask, so
|
||||
// arch_irq_disabled_regs(regs) behaves as expected.
|
||||
if (!(regs->msr & MSR_EE) || is_implicit_soft_masked(regs)) {
|
||||
/*
|
||||
* Adjust regs->softe to be soft-masked if it had not been
|
||||
* reconcied (e.g., interrupt entry with MSR[EE]=0 but softe
|
||||
* not yet set disabled), or if it was in an implicit soft
|
||||
* masked state. This makes arch_irq_disabled_regs(regs)
|
||||
* behave as expected.
|
||||
*/
|
||||
regs->softe = IRQS_ALL_DISABLED;
|
||||
}
|
||||
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
|
||||
BUG_ON(!arch_irq_disabled_regs(regs) && !(regs->msr & MSR_EE));
|
||||
|
||||
/* Don't do any per-CPU operations until interrupt state is fixed */
|
||||
|
||||
@ -525,10 +528,9 @@ static __always_inline long ____##func(struct pt_regs *regs)
|
||||
/* kernel/traps.c */
|
||||
DECLARE_INTERRUPT_HANDLER_NMI(system_reset_exception);
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
DECLARE_INTERRUPT_HANDLER_ASYNC(machine_check_exception);
|
||||
#else
|
||||
DECLARE_INTERRUPT_HANDLER_NMI(machine_check_exception);
|
||||
DECLARE_INTERRUPT_HANDLER_ASYNC(machine_check_exception_async);
|
||||
#endif
|
||||
DECLARE_INTERRUPT_HANDLER_NMI(machine_check_exception);
|
||||
DECLARE_INTERRUPT_HANDLER(SMIException);
|
||||
DECLARE_INTERRUPT_HANDLER(handle_hmi_exception);
|
||||
DECLARE_INTERRUPT_HANDLER(unknown_exception);
|
||||
|
@ -39,6 +39,11 @@ static inline bool security_ftr_enabled(u64 feature)
|
||||
return !!(powerpc_security_features & feature);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
enum stf_barrier_type stf_barrier_type_get(void);
|
||||
#else
|
||||
static inline enum stf_barrier_type stf_barrier_type_get(void) { return STF_BARRIER_NONE; }
|
||||
#endif
|
||||
|
||||
// Features indicating support for Spectre/Meltdown mitigations
|
||||
|
||||
|
@ -184,6 +184,15 @@ u64 dma_iommu_get_required_mask(struct device *dev)
|
||||
struct iommu_table *tbl = get_iommu_table_base(dev);
|
||||
u64 mask;
|
||||
|
||||
if (dev_is_pci(dev)) {
|
||||
u64 bypass_mask = dma_direct_get_required_mask(dev);
|
||||
|
||||
if (dma_iommu_dma_supported(dev, bypass_mask)) {
|
||||
dev_info(dev, "%s: returning bypass mask 0x%llx\n", __func__, bypass_mask);
|
||||
return bypass_mask;
|
||||
}
|
||||
}
|
||||
|
||||
if (!tbl)
|
||||
return 0;
|
||||
|
||||
|
@ -1243,7 +1243,7 @@ EXC_COMMON_BEGIN(machine_check_common)
|
||||
li r10,MSR_RI
|
||||
mtmsrd r10,1
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
bl machine_check_exception
|
||||
bl machine_check_exception_async
|
||||
b interrupt_return_srr
|
||||
|
||||
|
||||
@ -1303,7 +1303,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
|
||||
subi r12,r12,1
|
||||
sth r12,PACA_IN_MCE(r13)
|
||||
|
||||
/* Invoke machine_check_exception to print MCE event and panic. */
|
||||
/*
|
||||
* Invoke machine_check_exception to print MCE event and panic.
|
||||
* This is the NMI version of the handler because we are called from
|
||||
* the early handler which is a true NMI.
|
||||
*/
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
bl machine_check_exception
|
||||
|
||||
@ -1665,27 +1669,30 @@ EXC_COMMON_BEGIN(program_check_common)
|
||||
*/
|
||||
|
||||
andi. r10,r12,MSR_PR
|
||||
bne 2f /* If userspace, go normal path */
|
||||
bne .Lnormal_stack /* If userspace, go normal path */
|
||||
|
||||
andis. r10,r12,(SRR1_PROGTM)@h
|
||||
bne 1f /* If TM, emergency */
|
||||
bne .Lemergency_stack /* If TM, emergency */
|
||||
|
||||
cmpdi r1,-INT_FRAME_SIZE /* check if r1 is in userspace */
|
||||
blt 2f /* normal path if not */
|
||||
blt .Lnormal_stack /* normal path if not */
|
||||
|
||||
/* Use the emergency stack */
|
||||
1: andi. r10,r12,MSR_PR /* Set CR0 correctly for label */
|
||||
.Lemergency_stack:
|
||||
andi. r10,r12,MSR_PR /* Set CR0 correctly for label */
|
||||
/* 3 in EXCEPTION_PROLOG_COMMON */
|
||||
mr r10,r1 /* Save r1 */
|
||||
ld r1,PACAEMERGSP(r13) /* Use emergency stack */
|
||||
subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */
|
||||
__ISTACK(program_check)=0
|
||||
__GEN_COMMON_BODY program_check
|
||||
b 3f
|
||||
2:
|
||||
b .Ldo_program_check
|
||||
|
||||
.Lnormal_stack:
|
||||
__ISTACK(program_check)=1
|
||||
__GEN_COMMON_BODY program_check
|
||||
3:
|
||||
|
||||
.Ldo_program_check:
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
bl program_check_exception
|
||||
REST_NVGPRS(r1) /* instruction emulation may change GPRs */
|
||||
|
@ -229,6 +229,9 @@ notrace void arch_local_irq_restore(unsigned long mask)
|
||||
return;
|
||||
}
|
||||
|
||||
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
|
||||
WARN_ON_ONCE(in_nmi() || in_hardirq());
|
||||
|
||||
/*
|
||||
* After the stb, interrupts are unmasked and there are no interrupts
|
||||
* pending replay. The restart sequence makes this atomic with
|
||||
@ -321,6 +324,9 @@ notrace void arch_local_irq_restore(unsigned long mask)
|
||||
if (mask)
|
||||
return;
|
||||
|
||||
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
|
||||
WARN_ON_ONCE(in_nmi() || in_hardirq());
|
||||
|
||||
/*
|
||||
* From this point onward, we can take interrupts, preempt,
|
||||
* etc... unless we got hard-disabled. We check if an event
|
||||
|
@ -263,6 +263,11 @@ static int __init handle_no_stf_barrier(char *p)
|
||||
|
||||
early_param("no_stf_barrier", handle_no_stf_barrier);
|
||||
|
||||
enum stf_barrier_type stf_barrier_type_get(void)
|
||||
{
|
||||
return stf_enabled_flush_types;
|
||||
}
|
||||
|
||||
/* This is the generic flag used by other architectures */
|
||||
static int __init handle_ssbd(char *p)
|
||||
{
|
||||
|
@ -340,10 +340,16 @@ static bool exception_common(int signr, struct pt_regs *regs, int code,
|
||||
return false;
|
||||
}
|
||||
|
||||
show_signal_msg(signr, regs, code, addr);
|
||||
/*
|
||||
* Must not enable interrupts even for user-mode exception, because
|
||||
* this can be called from machine check, which may be a NMI or IRQ
|
||||
* which don't like interrupts being enabled. Could check for
|
||||
* in_hardirq || in_nmi perhaps, but there doesn't seem to be a good
|
||||
* reason why _exception() should enable irqs for an exception handler,
|
||||
* the handlers themselves do that directly.
|
||||
*/
|
||||
|
||||
if (arch_irqs_disabled())
|
||||
interrupt_cond_local_irq_enable(regs);
|
||||
show_signal_msg(signr, regs, code, addr);
|
||||
|
||||
current->thread.trap_nr = code;
|
||||
|
||||
@ -790,24 +796,22 @@ void die_mce(const char *str, struct pt_regs *regs, long err)
|
||||
* do_exit() checks for in_interrupt() and panics in that case, so
|
||||
* exit the irq/nmi before calling die.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
|
||||
irq_exit();
|
||||
else
|
||||
if (in_nmi())
|
||||
nmi_exit();
|
||||
else
|
||||
irq_exit();
|
||||
die(str, regs, err);
|
||||
}
|
||||
|
||||
/*
|
||||
* BOOK3S_64 does not call this handler as a non-maskable interrupt
|
||||
* BOOK3S_64 does not usually call this handler as a non-maskable interrupt
|
||||
* (it uses its own early real-mode handler to handle the MCE proper
|
||||
* and then raises irq_work to call this handler when interrupts are
|
||||
* enabled).
|
||||
* enabled). The only time when this is not true is if the early handler
|
||||
* is unrecoverable, then it does call this directly to try to get a
|
||||
* message out.
|
||||
*/
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
DEFINE_INTERRUPT_HANDLER_ASYNC(machine_check_exception)
|
||||
#else
|
||||
DEFINE_INTERRUPT_HANDLER_NMI(machine_check_exception)
|
||||
#endif
|
||||
static void __machine_check_exception(struct pt_regs *regs)
|
||||
{
|
||||
int recover = 0;
|
||||
|
||||
@ -841,12 +845,19 @@ bail:
|
||||
/* Must die if the interrupt is not recoverable */
|
||||
if (regs_is_unrecoverable(regs))
|
||||
die_mce("Unrecoverable Machine check", regs, SIGBUS);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
return;
|
||||
#else
|
||||
return 0;
|
||||
DEFINE_INTERRUPT_HANDLER_ASYNC(machine_check_exception_async)
|
||||
{
|
||||
__machine_check_exception(regs);
|
||||
}
|
||||
#endif
|
||||
DEFINE_INTERRUPT_HANDLER_NMI(machine_check_exception)
|
||||
{
|
||||
__machine_check_exception(regs);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
DEFINE_INTERRUPT_HANDLER(SMIException) /* async? */
|
||||
|
@ -228,6 +228,11 @@ bool is_offset_in_branch_range(long offset)
|
||||
return (offset >= -0x2000000 && offset <= 0x1fffffc && !(offset & 0x3));
|
||||
}
|
||||
|
||||
bool is_offset_in_cond_branch_range(long offset)
|
||||
{
|
||||
return offset >= -0x8000 && offset <= 0x7fff && !(offset & 0x3);
|
||||
}
|
||||
|
||||
/*
|
||||
* Helper to check if a given instruction is a conditional branch
|
||||
* Derived from the conditional checks in analyse_instr()
|
||||
@ -280,7 +285,7 @@ int create_cond_branch(struct ppc_inst *instr, const u32 *addr,
|
||||
offset = offset - (unsigned long)addr;
|
||||
|
||||
/* Check we can represent the target in the instruction format */
|
||||
if (offset < -0x8000 || offset > 0x7FFF || offset & 0x3)
|
||||
if (!is_offset_in_cond_branch_range(offset))
|
||||
return 1;
|
||||
|
||||
/* Mask out the flags and target, so they don't step on each other. */
|
||||
|
@ -24,16 +24,30 @@
|
||||
#define EMIT(instr) PLANT_INSTR(image, ctx->idx, instr)
|
||||
|
||||
/* Long jump; (unconditional 'branch') */
|
||||
#define PPC_JMP(dest) EMIT(PPC_INST_BRANCH | \
|
||||
(((dest) - (ctx->idx * 4)) & 0x03fffffc))
|
||||
#define PPC_JMP(dest) \
|
||||
do { \
|
||||
long offset = (long)(dest) - (ctx->idx * 4); \
|
||||
if (!is_offset_in_branch_range(offset)) { \
|
||||
pr_err_ratelimited("Branch offset 0x%lx (@%u) out of range\n", offset, ctx->idx); \
|
||||
return -ERANGE; \
|
||||
} \
|
||||
EMIT(PPC_INST_BRANCH | (offset & 0x03fffffc)); \
|
||||
} while (0)
|
||||
|
||||
/* blr; (unconditional 'branch' with link) to absolute address */
|
||||
#define PPC_BL_ABS(dest) EMIT(PPC_INST_BL | \
|
||||
(((dest) - (unsigned long)(image + ctx->idx)) & 0x03fffffc))
|
||||
/* "cond" here covers BO:BI fields. */
|
||||
#define PPC_BCC_SHORT(cond, dest) EMIT(PPC_INST_BRANCH_COND | \
|
||||
(((cond) & 0x3ff) << 16) | \
|
||||
(((dest) - (ctx->idx * 4)) & \
|
||||
0xfffc))
|
||||
#define PPC_BCC_SHORT(cond, dest) \
|
||||
do { \
|
||||
long offset = (long)(dest) - (ctx->idx * 4); \
|
||||
if (!is_offset_in_cond_branch_range(offset)) { \
|
||||
pr_err_ratelimited("Conditional branch offset 0x%lx (@%u) out of range\n", offset, ctx->idx); \
|
||||
return -ERANGE; \
|
||||
} \
|
||||
EMIT(PPC_INST_BRANCH_COND | (((cond) & 0x3ff) << 16) | (offset & 0xfffc)); \
|
||||
} while (0)
|
||||
|
||||
/* Sign-extended 32-bit immediate load */
|
||||
#define PPC_LI32(d, i) do { \
|
||||
if ((int)(uintptr_t)(i) >= -32768 && \
|
||||
@ -78,11 +92,6 @@
|
||||
#define PPC_FUNC_ADDR(d,i) do { PPC_LI32(d, i); } while(0)
|
||||
#endif
|
||||
|
||||
static inline bool is_nearbranch(int offset)
|
||||
{
|
||||
return (offset < 32768) && (offset >= -32768);
|
||||
}
|
||||
|
||||
/*
|
||||
* The fly in the ointment of code size changing from pass to pass is
|
||||
* avoided by padding the short branch case with a NOP. If code size differs
|
||||
@ -91,7 +100,7 @@ static inline bool is_nearbranch(int offset)
|
||||
* state.
|
||||
*/
|
||||
#define PPC_BCC(cond, dest) do { \
|
||||
if (is_nearbranch((dest) - (ctx->idx * 4))) { \
|
||||
if (is_offset_in_cond_branch_range((long)(dest) - (ctx->idx * 4))) { \
|
||||
PPC_BCC_SHORT(cond, dest); \
|
||||
EMIT(PPC_RAW_NOP()); \
|
||||
} else { \
|
||||
|
@ -16,18 +16,18 @@
|
||||
* with our redzone usage.
|
||||
*
|
||||
* [ prev sp ] <-------------
|
||||
* [ nv gpr save area ] 6*8 |
|
||||
* [ nv gpr save area ] 5*8 |
|
||||
* [ tail_call_cnt ] 8 |
|
||||
* [ local_tmp_var ] 8 |
|
||||
* [ local_tmp_var ] 16 |
|
||||
* fp (r31) --> [ ebpf stack space ] upto 512 |
|
||||
* [ frame header ] 32/112 |
|
||||
* sp (r1) ---> [ stack pointer ] --------------
|
||||
*/
|
||||
|
||||
/* for gpr non volatile registers BPG_REG_6 to 10 */
|
||||
#define BPF_PPC_STACK_SAVE (6*8)
|
||||
#define BPF_PPC_STACK_SAVE (5*8)
|
||||
/* for bpf JIT code internal usage */
|
||||
#define BPF_PPC_STACK_LOCALS 16
|
||||
#define BPF_PPC_STACK_LOCALS 24
|
||||
/* stack frame excluding BPF stack, ensure this is quadword aligned */
|
||||
#define BPF_PPC_STACKFRAME (STACK_FRAME_MIN_SIZE + \
|
||||
BPF_PPC_STACK_LOCALS + BPF_PPC_STACK_SAVE)
|
||||
|
@ -210,7 +210,11 @@ skip_init_ctx:
|
||||
/* Now build the prologue, body code & epilogue for real. */
|
||||
cgctx.idx = 0;
|
||||
bpf_jit_build_prologue(code_base, &cgctx);
|
||||
bpf_jit_build_body(fp, code_base, &cgctx, addrs, extra_pass);
|
||||
if (bpf_jit_build_body(fp, code_base, &cgctx, addrs, extra_pass)) {
|
||||
bpf_jit_binary_free(bpf_hdr);
|
||||
fp = org_fp;
|
||||
goto out_addrs;
|
||||
}
|
||||
bpf_jit_build_epilogue(code_base, &cgctx);
|
||||
|
||||
if (bpf_jit_enable > 1)
|
||||
|
@ -200,7 +200,7 @@ void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 fun
|
||||
}
|
||||
}
|
||||
|
||||
static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
|
||||
static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
|
||||
{
|
||||
/*
|
||||
* By now, the eBPF program has already setup parameters in r3-r6
|
||||
@ -261,7 +261,9 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32
|
||||
bpf_jit_emit_common_epilogue(image, ctx);
|
||||
|
||||
EMIT(PPC_RAW_BCTR());
|
||||
|
||||
/* out: */
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Assemble the body code between the prologue & epilogue */
|
||||
@ -355,7 +357,7 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
|
||||
PPC_LI32(_R0, imm);
|
||||
EMIT(PPC_RAW_ADDC(dst_reg, dst_reg, _R0));
|
||||
}
|
||||
if (imm >= 0)
|
||||
if (imm >= 0 || (BPF_OP(code) == BPF_SUB && imm == 0x80000000))
|
||||
EMIT(PPC_RAW_ADDZE(dst_reg_h, dst_reg_h));
|
||||
else
|
||||
EMIT(PPC_RAW_ADDME(dst_reg_h, dst_reg_h));
|
||||
@ -623,7 +625,7 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
|
||||
EMIT(PPC_RAW_LI(dst_reg_h, 0));
|
||||
break;
|
||||
case BPF_ALU | BPF_ARSH | BPF_X: /* (s32) dst >>= src */
|
||||
EMIT(PPC_RAW_SRAW(dst_reg_h, dst_reg, src_reg));
|
||||
EMIT(PPC_RAW_SRAW(dst_reg, dst_reg, src_reg));
|
||||
break;
|
||||
case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
|
||||
bpf_set_seen_register(ctx, tmp_reg);
|
||||
@ -1073,7 +1075,7 @@ cond_branch:
|
||||
break;
|
||||
case BPF_JMP32 | BPF_JSET | BPF_K:
|
||||
/* andi does not sign-extend the immediate */
|
||||
if (imm >= -32768 && imm < 32768) {
|
||||
if (imm >= 0 && imm < 32768) {
|
||||
/* PPC_ANDI is _only/always_ dot-form */
|
||||
EMIT(PPC_RAW_ANDI(_R0, dst_reg, imm));
|
||||
} else {
|
||||
@ -1090,7 +1092,9 @@ cond_branch:
|
||||
*/
|
||||
case BPF_JMP | BPF_TAIL_CALL:
|
||||
ctx->seen |= SEEN_TAILCALL;
|
||||
bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
|
||||
ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
break;
|
||||
|
||||
default:
|
||||
@ -1103,7 +1107,7 @@ cond_branch:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
if (BPF_CLASS(code) == BPF_ALU && !fp->aux->verifier_zext &&
|
||||
!insn_is_zext(&insn[i + 1]))
|
||||
!insn_is_zext(&insn[i + 1]) && !(BPF_OP(code) == BPF_END && imm == 64))
|
||||
EMIT(PPC_RAW_LI(dst_reg_h, 0));
|
||||
}
|
||||
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include <linux/if_vlan.h>
|
||||
#include <asm/kprobes.h>
|
||||
#include <linux/bpf.h>
|
||||
#include <asm/security_features.h>
|
||||
|
||||
#include "bpf_jit64.h"
|
||||
|
||||
@ -35,9 +36,9 @@ static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
|
||||
* [ prev sp ] <-------------
|
||||
* [ ... ] |
|
||||
* sp (r1) ---> [ stack pointer ] --------------
|
||||
* [ nv gpr save area ] 6*8
|
||||
* [ nv gpr save area ] 5*8
|
||||
* [ tail_call_cnt ] 8
|
||||
* [ local_tmp_var ] 8
|
||||
* [ local_tmp_var ] 16
|
||||
* [ unused red zone ] 208 bytes protected
|
||||
*/
|
||||
static int bpf_jit_stack_local(struct codegen_context *ctx)
|
||||
@ -45,12 +46,12 @@ static int bpf_jit_stack_local(struct codegen_context *ctx)
|
||||
if (bpf_has_stack_frame(ctx))
|
||||
return STACK_FRAME_MIN_SIZE + ctx->stack_size;
|
||||
else
|
||||
return -(BPF_PPC_STACK_SAVE + 16);
|
||||
return -(BPF_PPC_STACK_SAVE + 24);
|
||||
}
|
||||
|
||||
static int bpf_jit_stack_tailcallcnt(struct codegen_context *ctx)
|
||||
{
|
||||
return bpf_jit_stack_local(ctx) + 8;
|
||||
return bpf_jit_stack_local(ctx) + 16;
|
||||
}
|
||||
|
||||
static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
|
||||
@ -206,7 +207,7 @@ void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 fun
|
||||
EMIT(PPC_RAW_BCTRL());
|
||||
}
|
||||
|
||||
static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
|
||||
static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
|
||||
{
|
||||
/*
|
||||
* By now, the eBPF program has already setup parameters in r3, r4 and r5
|
||||
@ -267,13 +268,38 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32
|
||||
bpf_jit_emit_common_epilogue(image, ctx);
|
||||
|
||||
EMIT(PPC_RAW_BCTR());
|
||||
|
||||
/* out: */
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* We spill into the redzone always, even if the bpf program has its own stackframe.
|
||||
* Offsets hardcoded based on BPF_PPC_STACK_SAVE -- see bpf_jit_stack_local()
|
||||
*/
|
||||
void bpf_stf_barrier(void);
|
||||
|
||||
asm (
|
||||
" .global bpf_stf_barrier ;"
|
||||
" bpf_stf_barrier: ;"
|
||||
" std 21,-64(1) ;"
|
||||
" std 22,-56(1) ;"
|
||||
" sync ;"
|
||||
" ld 21,-64(1) ;"
|
||||
" ld 22,-56(1) ;"
|
||||
" ori 31,31,0 ;"
|
||||
" .rept 14 ;"
|
||||
" b 1f ;"
|
||||
" 1: ;"
|
||||
" .endr ;"
|
||||
" blr ;"
|
||||
);
|
||||
|
||||
/* Assemble the body code between the prologue & epilogue */
|
||||
int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx,
|
||||
u32 *addrs, bool extra_pass)
|
||||
{
|
||||
enum stf_barrier_type stf_barrier = stf_barrier_type_get();
|
||||
const struct bpf_insn *insn = fp->insnsi;
|
||||
int flen = fp->len;
|
||||
int i, ret;
|
||||
@ -328,18 +354,25 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
|
||||
EMIT(PPC_RAW_SUB(dst_reg, dst_reg, src_reg));
|
||||
goto bpf_alu32_trunc;
|
||||
case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */
|
||||
case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
|
||||
case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
|
||||
if (!imm) {
|
||||
goto bpf_alu32_trunc;
|
||||
} else if (imm >= -32768 && imm < 32768) {
|
||||
EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm)));
|
||||
} else {
|
||||
PPC_LI32(b2p[TMP_REG_1], imm);
|
||||
EMIT(PPC_RAW_ADD(dst_reg, dst_reg, b2p[TMP_REG_1]));
|
||||
}
|
||||
goto bpf_alu32_trunc;
|
||||
case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
|
||||
case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
|
||||
if (BPF_OP(code) == BPF_SUB)
|
||||
imm = -imm;
|
||||
if (imm) {
|
||||
if (imm >= -32768 && imm < 32768)
|
||||
EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm)));
|
||||
else {
|
||||
PPC_LI32(b2p[TMP_REG_1], imm);
|
||||
EMIT(PPC_RAW_ADD(dst_reg, dst_reg, b2p[TMP_REG_1]));
|
||||
}
|
||||
if (!imm) {
|
||||
goto bpf_alu32_trunc;
|
||||
} else if (imm > -32768 && imm <= 32768) {
|
||||
EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(-imm)));
|
||||
} else {
|
||||
PPC_LI32(b2p[TMP_REG_1], imm);
|
||||
EMIT(PPC_RAW_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]));
|
||||
}
|
||||
goto bpf_alu32_trunc;
|
||||
case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
|
||||
@ -389,8 +422,14 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
|
||||
case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
|
||||
if (imm == 0)
|
||||
return -EINVAL;
|
||||
else if (imm == 1)
|
||||
goto bpf_alu32_trunc;
|
||||
if (imm == 1) {
|
||||
if (BPF_OP(code) == BPF_DIV) {
|
||||
goto bpf_alu32_trunc;
|
||||
} else {
|
||||
EMIT(PPC_RAW_LI(dst_reg, 0));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
PPC_LI32(b2p[TMP_REG_1], imm);
|
||||
switch (BPF_CLASS(code)) {
|
||||
@ -631,6 +670,29 @@ emit_clear:
|
||||
* BPF_ST NOSPEC (speculation barrier)
|
||||
*/
|
||||
case BPF_ST | BPF_NOSPEC:
|
||||
if (!security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) ||
|
||||
!security_ftr_enabled(SEC_FTR_STF_BARRIER))
|
||||
break;
|
||||
|
||||
switch (stf_barrier) {
|
||||
case STF_BARRIER_EIEIO:
|
||||
EMIT(PPC_RAW_EIEIO() | 0x02000000);
|
||||
break;
|
||||
case STF_BARRIER_SYNC_ORI:
|
||||
EMIT(PPC_RAW_SYNC());
|
||||
EMIT(PPC_RAW_LD(b2p[TMP_REG_1], _R13, 0));
|
||||
EMIT(PPC_RAW_ORI(_R31, _R31, 0));
|
||||
break;
|
||||
case STF_BARRIER_FALLBACK:
|
||||
EMIT(PPC_RAW_MFLR(b2p[TMP_REG_1]));
|
||||
PPC_LI64(12, dereference_kernel_function_descriptor(bpf_stf_barrier));
|
||||
EMIT(PPC_RAW_MTCTR(12));
|
||||
EMIT(PPC_RAW_BCTRL());
|
||||
EMIT(PPC_RAW_MTLR(b2p[TMP_REG_1]));
|
||||
break;
|
||||
case STF_BARRIER_NONE:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
|
||||
/*
|
||||
@ -993,7 +1055,9 @@ cond_branch:
|
||||
*/
|
||||
case BPF_JMP | BPF_TAIL_CALL:
|
||||
ctx->seen |= SEEN_TAILCALL;
|
||||
bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
|
||||
ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
break;
|
||||
|
||||
default:
|
||||
|
@ -867,6 +867,10 @@ static int __init eeh_pseries_init(void)
|
||||
if (is_kdump_kernel() || reset_devices) {
|
||||
pr_info("Issue PHB reset ...\n");
|
||||
list_for_each_entry(phb, &hose_list, list_node) {
|
||||
// Skip if the slot is empty
|
||||
if (list_empty(&PCI_DN(phb->dn)->child_list))
|
||||
continue;
|
||||
|
||||
pdn = list_first_entry(&PCI_DN(phb->dn)->child_list, struct pci_dn, list);
|
||||
config_addr = pseries_eeh_get_pe_config_addr(pdn);
|
||||
|
||||
|
@ -507,12 +507,27 @@ static void pseries_msi_unmask(struct irq_data *d)
|
||||
irq_chip_unmask_parent(d);
|
||||
}
|
||||
|
||||
static void pseries_msi_write_msg(struct irq_data *data, struct msi_msg *msg)
|
||||
{
|
||||
struct msi_desc *entry = irq_data_get_msi_desc(data);
|
||||
|
||||
/*
|
||||
* Do not update the MSIx vector table. It's not strictly necessary
|
||||
* because the table is initialized by the underlying hypervisor, PowerVM
|
||||
* or QEMU/KVM. However, if the MSIx vector entry is cleared, any further
|
||||
* activation will fail. This can happen in some drivers (eg. IPR) which
|
||||
* deactivate an IRQ used for testing MSI support.
|
||||
*/
|
||||
entry->msg = *msg;
|
||||
}
|
||||
|
||||
static struct irq_chip pseries_pci_msi_irq_chip = {
|
||||
.name = "pSeries-PCI-MSI",
|
||||
.irq_shutdown = pseries_msi_shutdown,
|
||||
.irq_mask = pseries_msi_mask,
|
||||
.irq_unmask = pseries_msi_unmask,
|
||||
.irq_eoi = irq_chip_eoi_parent,
|
||||
.irq_write_msi_msg = pseries_msi_write_msg,
|
||||
};
|
||||
|
||||
static struct msi_domain_info pseries_msi_domain_info = {
|
||||
|
Loading…
Reference in New Issue
Block a user