Merge branch 'fixes' into next

Merge our fixes branch into next.

That lets us resolve a conflict in arch/powerpc/sysdev/xive/common.c.

Between cbc06f051c ("powerpc/xive: Do not skip CPU-less nodes when
creating the IPIs"), which moved request_irq() out of xive_init_ipis(),
and 17df41fec5 ("powerpc: use IRQF_NO_DEBUG for IPIs") which added
IRQF_NO_DEBUG to that request_irq() call, which has now moved.
This commit is contained in:
Michael Ellerman 2021-09-03 22:54:12 +10:00
commit a3314262ee
18 changed files with 125 additions and 81 deletions

View File

@ -4,6 +4,8 @@
#include <asm/bug.h> #include <asm/bug.h>
#include <asm/book3s/32/mmu-hash.h> #include <asm/book3s/32/mmu-hash.h>
#include <asm/mmu.h>
#include <asm/synch.h>
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
@ -28,6 +30,15 @@ static inline void kuep_lock(void)
return; return;
update_user_segments(mfsr(0) | SR_NX); update_user_segments(mfsr(0) | SR_NX);
/*
* This isync() shouldn't be necessary as the kernel is not excepted to
* run any instruction in userspace soon after the update of segments,
* but hash based cores (at least G3) seem to exhibit a random
* behaviour when the 'isync' is not there. 603 cores don't have this
* behaviour so don't do the 'isync' as it saves several CPU cycles.
*/
if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
isync(); /* Context sync required after mtsr() */
} }
static inline void kuep_unlock(void) static inline void kuep_unlock(void)
@ -36,6 +47,15 @@ static inline void kuep_unlock(void)
return; return;
update_user_segments(mfsr(0) & ~SR_NX); update_user_segments(mfsr(0) & ~SR_NX);
/*
* This isync() shouldn't be necessary as a 'rfi' will soon be executed
* to return to userspace, but hash based cores (at least G3) seem to
* exhibit a random behaviour when the 'isync' is not there. 603 cores
* don't have this behaviour so don't do the 'isync' as it saves several
* CPU cycles.
*/
if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
isync(); /* Context sync required after mtsr() */
} }
#ifdef CONFIG_PPC_KUAP #ifdef CONFIG_PPC_KUAP

View File

@ -583,6 +583,9 @@ DECLARE_INTERRUPT_HANDLER_NMI(hmi_exception_realmode);
DECLARE_INTERRUPT_HANDLER_ASYNC(TAUException); DECLARE_INTERRUPT_HANDLER_ASYNC(TAUException);
/* irq.c */
DECLARE_INTERRUPT_HANDLER_ASYNC(do_IRQ);
void __noreturn unrecoverable_exception(struct pt_regs *regs); void __noreturn unrecoverable_exception(struct pt_regs *regs);
void replay_system_reset(void); void replay_system_reset(void);

View File

@ -52,7 +52,7 @@ extern void *mcheckirq_ctx[NR_CPUS];
extern void *hardirq_ctx[NR_CPUS]; extern void *hardirq_ctx[NR_CPUS];
extern void *softirq_ctx[NR_CPUS]; extern void *softirq_ctx[NR_CPUS];
extern void do_IRQ(struct pt_regs *regs); void __do_IRQ(struct pt_regs *regs);
extern void __init init_IRQ(void); extern void __init init_IRQ(void);
extern void __do_irq(struct pt_regs *regs); extern void __do_irq(struct pt_regs *regs);

View File

@ -77,6 +77,22 @@ struct pt_regs
unsigned long __pad[4]; /* Maintain 16 byte interrupt stack alignment */ unsigned long __pad[4]; /* Maintain 16 byte interrupt stack alignment */
}; };
#endif #endif
#if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE)
struct { /* Must be a multiple of 16 bytes */
unsigned long mas0;
unsigned long mas1;
unsigned long mas2;
unsigned long mas3;
unsigned long mas6;
unsigned long mas7;
unsigned long srr0;
unsigned long srr1;
unsigned long csrr0;
unsigned long csrr1;
unsigned long dsrr0;
unsigned long dsrr1;
};
#endif
}; };
#endif #endif

View File

@ -302,24 +302,21 @@ int main(void)
STACK_PT_REGS_OFFSET(STACK_REGS_IAMR, iamr); STACK_PT_REGS_OFFSET(STACK_REGS_IAMR, iamr);
#endif #endif
#if defined(CONFIG_PPC32) #if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE)
#if defined(CONFIG_BOOKE) || defined(CONFIG_40x) STACK_PT_REGS_OFFSET(MAS0, mas0);
DEFINE(EXC_LVL_SIZE, STACK_EXC_LVL_FRAME_SIZE);
DEFINE(MAS0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas0));
/* we overload MMUCR for 44x on MAS0 since they are mutually exclusive */ /* we overload MMUCR for 44x on MAS0 since they are mutually exclusive */
DEFINE(MMUCR, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas0)); STACK_PT_REGS_OFFSET(MMUCR, mas0);
DEFINE(MAS1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas1)); STACK_PT_REGS_OFFSET(MAS1, mas1);
DEFINE(MAS2, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas2)); STACK_PT_REGS_OFFSET(MAS2, mas2);
DEFINE(MAS3, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas3)); STACK_PT_REGS_OFFSET(MAS3, mas3);
DEFINE(MAS6, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas6)); STACK_PT_REGS_OFFSET(MAS6, mas6);
DEFINE(MAS7, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas7)); STACK_PT_REGS_OFFSET(MAS7, mas7);
DEFINE(_SRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, srr0)); STACK_PT_REGS_OFFSET(_SRR0, srr0);
DEFINE(_SRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, srr1)); STACK_PT_REGS_OFFSET(_SRR1, srr1);
DEFINE(_CSRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, csrr0)); STACK_PT_REGS_OFFSET(_CSRR0, csrr0);
DEFINE(_CSRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, csrr1)); STACK_PT_REGS_OFFSET(_CSRR1, csrr1);
DEFINE(_DSRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, dsrr0)); STACK_PT_REGS_OFFSET(_DSRR0, dsrr0);
DEFINE(_DSRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, dsrr1)); STACK_PT_REGS_OFFSET(_DSRR1, dsrr1);
#endif
#endif #endif
/* About the CPU features table */ /* About the CPU features table */

View File

@ -812,7 +812,6 @@ __start_interrupts:
* syscall register convention is in Documentation/powerpc/syscall64-abi.rst * syscall register convention is in Documentation/powerpc/syscall64-abi.rst
*/ */
EXC_VIRT_BEGIN(system_call_vectored, 0x3000, 0x1000) EXC_VIRT_BEGIN(system_call_vectored, 0x3000, 0x1000)
1:
/* SCV 0 */ /* SCV 0 */
mr r9,r13 mr r9,r13
GET_PACA(r13) GET_PACA(r13)
@ -842,10 +841,12 @@ EXC_VIRT_BEGIN(system_call_vectored, 0x3000, 0x1000)
b system_call_vectored_sigill b system_call_vectored_sigill
#endif #endif
.endr .endr
2:
EXC_VIRT_END(system_call_vectored, 0x3000, 0x1000) EXC_VIRT_END(system_call_vectored, 0x3000, 0x1000)
SOFT_MASK_TABLE(1b, 2b) // Treat scv vectors as soft-masked, see comment above. // Treat scv vectors as soft-masked, see comment above.
// Use absolute values rather than labels here, so they don't get relocated,
// because this code runs unrelocated.
SOFT_MASK_TABLE(0xc000000000003000, 0xc000000000004000)
#ifdef CONFIG_RELOCATABLE #ifdef CONFIG_RELOCATABLE
TRAMP_VIRT_BEGIN(system_call_vectored_tramp) TRAMP_VIRT_BEGIN(system_call_vectored_tramp)

View File

@ -300,7 +300,7 @@ ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE)
EXCEPTION_PROLOG_1 EXCEPTION_PROLOG_1
EXCEPTION_PROLOG_2 INTERRUPT_DATA_STORAGE DataAccess handle_dar_dsisr=1 EXCEPTION_PROLOG_2 INTERRUPT_DATA_STORAGE DataAccess handle_dar_dsisr=1
prepare_transfer_to_handler prepare_transfer_to_handler
lwz r5, _DSISR(r11) lwz r5, _DSISR(r1)
andis. r0, r5, DSISR_DABRMATCH@h andis. r0, r5, DSISR_DABRMATCH@h
bne- 1f bne- 1f
bl do_page_fault bl do_page_fault

View File

@ -168,20 +168,18 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV)
/* only on e500mc */ /* only on e500mc */
#define DBG_STACK_BASE dbgirq_ctx #define DBG_STACK_BASE dbgirq_ctx
#define EXC_LVL_FRAME_OVERHEAD (THREAD_SIZE - INT_FRAME_SIZE - EXC_LVL_SIZE)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#define BOOKE_LOAD_EXC_LEVEL_STACK(level) \ #define BOOKE_LOAD_EXC_LEVEL_STACK(level) \
mfspr r8,SPRN_PIR; \ mfspr r8,SPRN_PIR; \
slwi r8,r8,2; \ slwi r8,r8,2; \
addis r8,r8,level##_STACK_BASE@ha; \ addis r8,r8,level##_STACK_BASE@ha; \
lwz r8,level##_STACK_BASE@l(r8); \ lwz r8,level##_STACK_BASE@l(r8); \
addi r8,r8,EXC_LVL_FRAME_OVERHEAD; addi r8,r8,THREAD_SIZE - INT_FRAME_SIZE;
#else #else
#define BOOKE_LOAD_EXC_LEVEL_STACK(level) \ #define BOOKE_LOAD_EXC_LEVEL_STACK(level) \
lis r8,level##_STACK_BASE@ha; \ lis r8,level##_STACK_BASE@ha; \
lwz r8,level##_STACK_BASE@l(r8); \ lwz r8,level##_STACK_BASE@l(r8); \
addi r8,r8,EXC_LVL_FRAME_OVERHEAD; addi r8,r8,THREAD_SIZE - INT_FRAME_SIZE;
#endif #endif
/* /*
@ -208,7 +206,7 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV)
mtmsr r11; \ mtmsr r11; \
mfspr r11,SPRN_SPRG_THREAD; /* if from user, start at top of */\ mfspr r11,SPRN_SPRG_THREAD; /* if from user, start at top of */\
lwz r11, TASK_STACK - THREAD(r11); /* this thread's kernel stack */\ lwz r11, TASK_STACK - THREAD(r11); /* this thread's kernel stack */\
addi r11,r11,EXC_LVL_FRAME_OVERHEAD; /* allocate stack frame */\ addi r11,r11,THREAD_SIZE - INT_FRAME_SIZE; /* allocate stack frame */\
beq 1f; \ beq 1f; \
/* COMING FROM USER MODE */ \ /* COMING FROM USER MODE */ \
stw r9,_CCR(r11); /* save CR */\ stw r9,_CCR(r11); /* save CR */\
@ -516,24 +514,5 @@ label:
bl kernel_fp_unavailable_exception; \ bl kernel_fp_unavailable_exception; \
b interrupt_return b interrupt_return
#else /* __ASSEMBLY__ */
struct exception_regs {
unsigned long mas0;
unsigned long mas1;
unsigned long mas2;
unsigned long mas3;
unsigned long mas6;
unsigned long mas7;
unsigned long srr0;
unsigned long srr1;
unsigned long csrr0;
unsigned long csrr1;
unsigned long dsrr0;
unsigned long dsrr1;
};
/* ensure this structure is always sized to a multiple of the stack alignment */
#define STACK_EXC_LVL_FRAME_SIZE ALIGN(sizeof (struct exception_regs), 16)
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* __HEAD_BOOKE_H__ */ #endif /* __HEAD_BOOKE_H__ */

View File

@ -750,7 +750,7 @@ void __do_irq(struct pt_regs *regs)
trace_irq_exit(regs); trace_irq_exit(regs);
} }
DEFINE_INTERRUPT_HANDLER_ASYNC(do_IRQ) void __do_IRQ(struct pt_regs *regs)
{ {
struct pt_regs *old_regs = set_irq_regs(regs); struct pt_regs *old_regs = set_irq_regs(regs);
void *cursp, *irqsp, *sirqsp; void *cursp, *irqsp, *sirqsp;
@ -774,6 +774,11 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(do_IRQ)
set_irq_regs(old_regs); set_irq_regs(old_regs);
} }
DEFINE_INTERRUPT_HANDLER_ASYNC(do_IRQ)
{
__do_IRQ(regs);
}
static void *__init alloc_vm_stack(void) static void *__init alloc_vm_stack(void)
{ {
return __vmalloc_node(THREAD_SIZE, THREAD_ALIGN, THREADINFO_GFP, return __vmalloc_node(THREAD_SIZE, THREAD_ALIGN, THREADINFO_GFP,

View File

@ -292,7 +292,8 @@ int kprobe_handler(struct pt_regs *regs)
if (user_mode(regs)) if (user_mode(regs))
return 0; return 0;
if (!(regs->msr & MSR_IR) || !(regs->msr & MSR_DR)) if (!IS_ENABLED(CONFIG_BOOKE) &&
(!(regs->msr & MSR_IR) || !(regs->msr & MSR_DR)))
return 0; return 0;
/* /*

View File

@ -1167,7 +1167,7 @@ static int __init topology_init(void)
* CPU. For instance, the boot cpu might never be valid * CPU. For instance, the boot cpu might never be valid
* for hotplugging. * for hotplugging.
*/ */
if (smp_ops->cpu_offline_self) if (smp_ops && smp_ops->cpu_offline_self)
c->hotpluggable = 1; c->hotpluggable = 1;
#endif #endif

View File

@ -585,7 +585,7 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(timer_interrupt)
#if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC) #if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC)
if (atomic_read(&ppc_n_lost_interrupts) != 0) if (atomic_read(&ppc_n_lost_interrupts) != 0)
do_IRQ(regs); __do_IRQ(regs);
#endif #endif
old_regs = set_irq_regs(regs); old_regs = set_irq_regs(regs);

View File

@ -1104,7 +1104,7 @@ DEFINE_INTERRUPT_HANDLER(RunModeException)
_exception(SIGTRAP, regs, TRAP_UNK, 0); _exception(SIGTRAP, regs, TRAP_UNK, 0);
} }
DEFINE_INTERRUPT_HANDLER(single_step_exception) static void __single_step_exception(struct pt_regs *regs)
{ {
clear_single_step(regs); clear_single_step(regs);
clear_br_trace(regs); clear_br_trace(regs);
@ -1121,6 +1121,11 @@ DEFINE_INTERRUPT_HANDLER(single_step_exception)
_exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
} }
DEFINE_INTERRUPT_HANDLER(single_step_exception)
{
__single_step_exception(regs);
}
/* /*
* After we have successfully emulated an instruction, we have to * After we have successfully emulated an instruction, we have to
* check if the instruction was being single-stepped, and if so, * check if the instruction was being single-stepped, and if so,
@ -1130,7 +1135,7 @@ DEFINE_INTERRUPT_HANDLER(single_step_exception)
static void emulate_single_step(struct pt_regs *regs) static void emulate_single_step(struct pt_regs *regs)
{ {
if (single_stepping(regs)) if (single_stepping(regs))
single_step_exception(regs); __single_step_exception(regs);
} }
static inline int __parse_fpscr(unsigned long fpscr) static inline int __parse_fpscr(unsigned long fpscr)

View File

@ -27,6 +27,13 @@ KASAN_SANITIZE := n
ccflags-y := -shared -fno-common -fno-builtin -nostdlib \ ccflags-y := -shared -fno-common -fno-builtin -nostdlib \
-Wl,-soname=linux-vdso64.so.1 -Wl,--hash-style=both -Wl,-soname=linux-vdso64.so.1 -Wl,--hash-style=both
# Go prior to 1.16.x assumes r30 is not clobbered by any VDSO code. That used to be true
# by accident when the VDSO was hand-written asm code, but may not be now that the VDSO is
# compiler generated. To avoid breaking Go tell GCC not to use r30. Impact on code
# generation is minimal, it will just use r29 instead.
ccflags-y += $(call cc-option, -ffixed-r30)
asflags-y := -D__VDSO64__ -s asflags-y := -D__VDSO64__ -s
targets += vdso64.lds targets += vdso64.lds

View File

@ -18,16 +18,12 @@
/* /*
* Updates the attributes of a page in three steps: * Updates the attributes of a page in three steps:
* *
* 1. invalidate the page table entry * 1. take the page_table_lock
* 2. flush the TLB * 2. install the new entry with the updated attributes
* 3. install the new entry with the updated attributes * 3. flush the TLB
*
* Invalidating the pte means there are situations where this will not work
* when in theory it should.
* For example:
* - removing write from page whilst it is being executed
* - setting a page read-only whilst it is being read by another CPU
* *
* This sequence is safe against concurrent updates, and also allows updating the
* attributes of a page currently being executed or accessed.
*/ */
static int change_page_attr(pte_t *ptep, unsigned long addr, void *data) static int change_page_attr(pte_t *ptep, unsigned long addr, void *data)
{ {
@ -36,9 +32,7 @@ static int change_page_attr(pte_t *ptep, unsigned long addr, void *data)
spin_lock(&init_mm.page_table_lock); spin_lock(&init_mm.page_table_lock);
/* invalidate the PTE so it's safe to modify */ pte = ptep_get(ptep);
pte = ptep_get_and_clear(&init_mm, addr, ptep);
flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
/* modify the PTE bits as desired, then apply */ /* modify the PTE bits as desired, then apply */
switch (action) { switch (action) {
@ -59,11 +53,14 @@ static int change_page_attr(pte_t *ptep, unsigned long addr, void *data)
break; break;
} }
set_pte_at(&init_mm, addr, ptep, pte); pte_update(&init_mm, addr, ptep, ~0UL, pte_val(pte), 0);
/* See ptesync comment in radix__set_pte_at() */ /* See ptesync comment in radix__set_pte_at() */
if (radix_enabled()) if (radix_enabled())
asm volatile("ptesync": : :"memory"); asm volatile("ptesync": : :"memory");
flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
spin_unlock(&init_mm.page_table_lock); spin_unlock(&init_mm.page_table_lock);
return 0; return 0;

View File

@ -98,7 +98,7 @@ config PPC_BOOK3S_64
select PPC_HAVE_PMU_SUPPORT select PPC_HAVE_PMU_SUPPORT
select HAVE_ARCH_TRANSPARENT_HUGEPAGE select HAVE_ARCH_TRANSPARENT_HUGEPAGE
select ARCH_ENABLE_HUGEPAGE_MIGRATION if HUGETLB_PAGE && MIGRATION select ARCH_ENABLE_HUGEPAGE_MIGRATION if HUGETLB_PAGE && MIGRATION
select ARCH_ENABLE_PMD_SPLIT_PTLOCK select ARCH_ENABLE_SPLIT_PMD_PTLOCK
select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE
select ARCH_SUPPORTS_HUGETLBFS select ARCH_SUPPORTS_HUGETLBFS
select ARCH_SUPPORTS_NUMA_BALANCING select ARCH_SUPPORTS_NUMA_BALANCING

View File

@ -77,7 +77,7 @@
#include "../../../../drivers/pci/pci.h" #include "../../../../drivers/pci/pci.h"
DEFINE_STATIC_KEY_FALSE(shared_processor); DEFINE_STATIC_KEY_FALSE(shared_processor);
EXPORT_SYMBOL_GPL(shared_processor); EXPORT_SYMBOL(shared_processor);
int CMO_PrPSP = -1; int CMO_PrPSP = -1;
int CMO_SecPSP = -1; int CMO_SecPSP = -1;
@ -541,9 +541,10 @@ static void init_cpu_char_feature_flags(struct h_cpu_char_result *result)
* H_CPU_BEHAV_FAVOUR_SECURITY_H could be set only if * H_CPU_BEHAV_FAVOUR_SECURITY_H could be set only if
* H_CPU_BEHAV_FAVOUR_SECURITY is. * H_CPU_BEHAV_FAVOUR_SECURITY is.
*/ */
if (!(result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY)) if (!(result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY)) {
security_ftr_clear(SEC_FTR_FAVOUR_SECURITY); security_ftr_clear(SEC_FTR_FAVOUR_SECURITY);
else if (result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY_H) pseries_security_flavor = 0;
} else if (result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY_H)
pseries_security_flavor = 1; pseries_security_flavor = 1;
else else
pseries_security_flavor = 2; pseries_security_flavor = 2;

View File

@ -66,6 +66,7 @@ static struct irq_domain *xive_irq_domain;
static struct xive_ipi_desc { static struct xive_ipi_desc {
unsigned int irq; unsigned int irq;
char name[16]; char name[16];
atomic_t started;
} *xive_ipis; } *xive_ipis;
/* /*
@ -1107,7 +1108,7 @@ static const struct irq_domain_ops xive_ipi_irq_domain_ops = {
.alloc = xive_ipi_irq_domain_alloc, .alloc = xive_ipi_irq_domain_alloc,
}; };
static int __init xive_request_ipi(void) static int __init xive_init_ipis(void)
{ {
struct fwnode_handle *fwnode; struct fwnode_handle *fwnode;
struct irq_domain *ipi_domain; struct irq_domain *ipi_domain;
@ -1131,10 +1132,6 @@ static int __init xive_request_ipi(void)
struct xive_ipi_desc *xid = &xive_ipis[node]; struct xive_ipi_desc *xid = &xive_ipis[node];
struct xive_ipi_alloc_info info = { node }; struct xive_ipi_alloc_info info = { node };
/* Skip nodes without CPUs */
if (cpumask_empty(cpumask_of_node(node)))
continue;
/* /*
* Map one IPI interrupt per node for all cpus of that node. * Map one IPI interrupt per node for all cpus of that node.
* Since the HW interrupt number doesn't have any meaning, * Since the HW interrupt number doesn't have any meaning,
@ -1146,12 +1143,6 @@ static int __init xive_request_ipi(void)
xid->irq = ret; xid->irq = ret;
snprintf(xid->name, sizeof(xid->name), "IPI-%d", node); snprintf(xid->name, sizeof(xid->name), "IPI-%d", node);
ret = request_irq(xid->irq, xive_muxed_ipi_action,
IRQF_NO_DEBUG | IRQF_PERCPU | IRQF_NO_THREAD,
xid->name, NULL);
WARN(ret < 0, "Failed to request IPI %d: %d\n", xid->irq, ret);
} }
return ret; return ret;
@ -1166,6 +1157,22 @@ out:
return ret; return ret;
} }
static int xive_request_ipi(unsigned int cpu)
{
struct xive_ipi_desc *xid = &xive_ipis[early_cpu_to_node(cpu)];
int ret;
if (atomic_inc_return(&xid->started) > 1)
return 0;
ret = request_irq(xid->irq, xive_muxed_ipi_action,
IRQF_NO_DEBUG | IRQF_PERCPU | IRQF_NO_THREAD,
xid->name, NULL);
WARN(ret < 0, "Failed to request IPI %d: %d\n", xid->irq, ret);
return ret;
}
static int xive_setup_cpu_ipi(unsigned int cpu) static int xive_setup_cpu_ipi(unsigned int cpu)
{ {
unsigned int xive_ipi_irq = xive_ipi_cpu_to_irq(cpu); unsigned int xive_ipi_irq = xive_ipi_cpu_to_irq(cpu);
@ -1180,6 +1187,9 @@ static int xive_setup_cpu_ipi(unsigned int cpu)
if (xc->hw_ipi != XIVE_BAD_IRQ) if (xc->hw_ipi != XIVE_BAD_IRQ)
return 0; return 0;
/* Register the IPI */
xive_request_ipi(cpu);
/* Grab an IPI from the backend, this will populate xc->hw_ipi */ /* Grab an IPI from the backend, this will populate xc->hw_ipi */
if (xive_ops->get_ipi(cpu, xc)) if (xive_ops->get_ipi(cpu, xc))
return -EIO; return -EIO;
@ -1219,6 +1229,8 @@ static void xive_cleanup_cpu_ipi(unsigned int cpu, struct xive_cpu *xc)
if (xc->hw_ipi == XIVE_BAD_IRQ) if (xc->hw_ipi == XIVE_BAD_IRQ)
return; return;
/* TODO: clear IPI mapping */
/* Mask the IPI */ /* Mask the IPI */
xive_do_source_set_mask(&xc->ipi_data, true); xive_do_source_set_mask(&xc->ipi_data, true);
@ -1241,7 +1253,7 @@ void __init xive_smp_probe(void)
smp_ops->cause_ipi = xive_cause_ipi; smp_ops->cause_ipi = xive_cause_ipi;
/* Register the IPI */ /* Register the IPI */
xive_request_ipi(); xive_init_ipis();
/* Allocate and setup IPI for the boot CPU */ /* Allocate and setup IPI for the boot CPU */
xive_setup_cpu_ipi(smp_processor_id()); xive_setup_cpu_ipi(smp_processor_id());