Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

drivers/ptp/Kconfig:
  55c8fca1da ("ptp_pch: Restore dependency on PCI")
  e5f3155267 ("ethernet: fix PTP_1588_CLOCK dependencies")

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2021-08-19 18:09:18 -07:00
commit f444fea789
181 changed files with 1574 additions and 1132 deletions

View File

@ -152,47 +152,6 @@ allOf:
maxItems: 1 maxItems: 1
st,drdy-int-pin: false st,drdy-int-pin: false
- if:
properties:
compatible:
enum:
# Two intertial interrupts i.e. accelerometer/gyro interrupts
- st,h3lis331dl-accel
- st,l3g4200d-gyro
- st,l3g4is-gyro
- st,l3gd20-gyro
- st,l3gd20h-gyro
- st,lis2de12
- st,lis2dw12
- st,lis2hh12
- st,lis2dh12-accel
- st,lis331dl-accel
- st,lis331dlh-accel
- st,lis3de
- st,lis3dh-accel
- st,lis3dhh
- st,lis3mdl-magn
- st,lng2dm-accel
- st,lps331ap-press
- st,lsm303agr-accel
- st,lsm303dlh-accel
- st,lsm303dlhc-accel
- st,lsm303dlm-accel
- st,lsm330-accel
- st,lsm330-gyro
- st,lsm330d-accel
- st,lsm330d-gyro
- st,lsm330dl-accel
- st,lsm330dl-gyro
- st,lsm330dlc-accel
- st,lsm330dlc-gyro
- st,lsm9ds0-gyro
- st,lsm9ds1-magn
then:
properties:
interrupts:
maxItems: 2
required: required:
- compatible - compatible
- reg - reg

View File

@ -17,6 +17,7 @@ Introduction
busses/index busses/index
i2c-topology i2c-topology
muxes/i2c-mux-gpio muxes/i2c-mux-gpio
i2c-sysfs
Writing device drivers Writing device drivers
====================== ======================

View File

@ -25,10 +25,10 @@ On x86:
- vcpu->mutex is taken outside kvm->arch.hyperv.hv_lock - vcpu->mutex is taken outside kvm->arch.hyperv.hv_lock
- kvm->arch.mmu_lock is an rwlock. kvm->arch.tdp_mmu_pages_lock is - kvm->arch.mmu_lock is an rwlock. kvm->arch.tdp_mmu_pages_lock and
taken inside kvm->arch.mmu_lock, and cannot be taken without already kvm->arch.mmu_unsync_pages_lock are taken inside kvm->arch.mmu_lock, and
holding kvm->arch.mmu_lock (typically with ``read_lock``, otherwise cannot be taken without already holding kvm->arch.mmu_lock (typically with
there's no need to take kvm->arch.tdp_mmu_pages_lock at all). ``read_lock`` for the TDP MMU, thus the need for additional spinlocks).
Everything else is a leaf: no other lock is taken inside the critical Everything else is a leaf: no other lock is taken inside the critical
sections. sections.

View File

@ -2,7 +2,7 @@
VERSION = 5 VERSION = 5
PATCHLEVEL = 14 PATCHLEVEL = 14
SUBLEVEL = 0 SUBLEVEL = 0
EXTRAVERSION = -rc5 EXTRAVERSION = -rc6
NAME = Opossums on Parade NAME = Opossums on Parade
# *DOCUMENTATION* # *DOCUMENTATION*

View File

@ -94,10 +94,14 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
kvm->arch.return_nisv_io_abort_to_user = true; kvm->arch.return_nisv_io_abort_to_user = true;
break; break;
case KVM_CAP_ARM_MTE: case KVM_CAP_ARM_MTE:
if (!system_supports_mte() || kvm->created_vcpus) mutex_lock(&kvm->lock);
return -EINVAL; if (!system_supports_mte() || kvm->created_vcpus) {
r = 0; r = -EINVAL;
kvm->arch.mte_enabled = true; } else {
r = 0;
kvm->arch.mte_enabled = true;
}
mutex_unlock(&kvm->lock);
break; break;
default: default:
r = -EINVAL; r = -EINVAL;

View File

@ -193,7 +193,7 @@ static bool range_is_memory(u64 start, u64 end)
{ {
struct kvm_mem_range r1, r2; struct kvm_mem_range r1, r2;
if (!find_mem_range(start, &r1) || !find_mem_range(end, &r2)) if (!find_mem_range(start, &r1) || !find_mem_range(end - 1, &r2))
return false; return false;
if (r1.start != r2.start) if (r1.start != r2.start)
return false; return false;

View File

@ -583,6 +583,9 @@ DECLARE_INTERRUPT_HANDLER_NMI(hmi_exception_realmode);
DECLARE_INTERRUPT_HANDLER_ASYNC(TAUException); DECLARE_INTERRUPT_HANDLER_ASYNC(TAUException);
/* irq.c */
DECLARE_INTERRUPT_HANDLER_ASYNC(do_IRQ);
void __noreturn unrecoverable_exception(struct pt_regs *regs); void __noreturn unrecoverable_exception(struct pt_regs *regs);
void replay_system_reset(void); void replay_system_reset(void);

View File

@ -52,7 +52,7 @@ extern void *mcheckirq_ctx[NR_CPUS];
extern void *hardirq_ctx[NR_CPUS]; extern void *hardirq_ctx[NR_CPUS];
extern void *softirq_ctx[NR_CPUS]; extern void *softirq_ctx[NR_CPUS];
extern void do_IRQ(struct pt_regs *regs); void __do_IRQ(struct pt_regs *regs);
extern void __init init_IRQ(void); extern void __init init_IRQ(void);
extern void __do_irq(struct pt_regs *regs); extern void __do_irq(struct pt_regs *regs);

View File

@ -70,6 +70,22 @@ struct pt_regs
unsigned long __pad[4]; /* Maintain 16 byte interrupt stack alignment */ unsigned long __pad[4]; /* Maintain 16 byte interrupt stack alignment */
}; };
#endif #endif
#if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE)
struct { /* Must be a multiple of 16 bytes */
unsigned long mas0;
unsigned long mas1;
unsigned long mas2;
unsigned long mas3;
unsigned long mas6;
unsigned long mas7;
unsigned long srr0;
unsigned long srr1;
unsigned long csrr0;
unsigned long csrr1;
unsigned long dsrr0;
unsigned long dsrr1;
};
#endif
}; };
#endif #endif

View File

@ -309,24 +309,21 @@ int main(void)
STACK_PT_REGS_OFFSET(STACK_REGS_IAMR, iamr); STACK_PT_REGS_OFFSET(STACK_REGS_IAMR, iamr);
#endif #endif
#if defined(CONFIG_PPC32) #if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE)
#if defined(CONFIG_BOOKE) || defined(CONFIG_40x) STACK_PT_REGS_OFFSET(MAS0, mas0);
DEFINE(EXC_LVL_SIZE, STACK_EXC_LVL_FRAME_SIZE);
DEFINE(MAS0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas0));
/* we overload MMUCR for 44x on MAS0 since they are mutually exclusive */ /* we overload MMUCR for 44x on MAS0 since they are mutually exclusive */
DEFINE(MMUCR, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas0)); STACK_PT_REGS_OFFSET(MMUCR, mas0);
DEFINE(MAS1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas1)); STACK_PT_REGS_OFFSET(MAS1, mas1);
DEFINE(MAS2, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas2)); STACK_PT_REGS_OFFSET(MAS2, mas2);
DEFINE(MAS3, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas3)); STACK_PT_REGS_OFFSET(MAS3, mas3);
DEFINE(MAS6, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas6)); STACK_PT_REGS_OFFSET(MAS6, mas6);
DEFINE(MAS7, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas7)); STACK_PT_REGS_OFFSET(MAS7, mas7);
DEFINE(_SRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, srr0)); STACK_PT_REGS_OFFSET(_SRR0, srr0);
DEFINE(_SRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, srr1)); STACK_PT_REGS_OFFSET(_SRR1, srr1);
DEFINE(_CSRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, csrr0)); STACK_PT_REGS_OFFSET(_CSRR0, csrr0);
DEFINE(_CSRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, csrr1)); STACK_PT_REGS_OFFSET(_CSRR1, csrr1);
DEFINE(_DSRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, dsrr0)); STACK_PT_REGS_OFFSET(_DSRR0, dsrr0);
DEFINE(_DSRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, dsrr1)); STACK_PT_REGS_OFFSET(_DSRR1, dsrr1);
#endif
#endif #endif
/* About the CPU features table */ /* About the CPU features table */

View File

@ -300,7 +300,7 @@ ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE)
EXCEPTION_PROLOG_1 EXCEPTION_PROLOG_1
EXCEPTION_PROLOG_2 INTERRUPT_DATA_STORAGE DataAccess handle_dar_dsisr=1 EXCEPTION_PROLOG_2 INTERRUPT_DATA_STORAGE DataAccess handle_dar_dsisr=1
prepare_transfer_to_handler prepare_transfer_to_handler
lwz r5, _DSISR(r11) lwz r5, _DSISR(r1)
andis. r0, r5, DSISR_DABRMATCH@h andis. r0, r5, DSISR_DABRMATCH@h
bne- 1f bne- 1f
bl do_page_fault bl do_page_fault

View File

@ -168,20 +168,18 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV)
/* only on e500mc */ /* only on e500mc */
#define DBG_STACK_BASE dbgirq_ctx #define DBG_STACK_BASE dbgirq_ctx
#define EXC_LVL_FRAME_OVERHEAD (THREAD_SIZE - INT_FRAME_SIZE - EXC_LVL_SIZE)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#define BOOKE_LOAD_EXC_LEVEL_STACK(level) \ #define BOOKE_LOAD_EXC_LEVEL_STACK(level) \
mfspr r8,SPRN_PIR; \ mfspr r8,SPRN_PIR; \
slwi r8,r8,2; \ slwi r8,r8,2; \
addis r8,r8,level##_STACK_BASE@ha; \ addis r8,r8,level##_STACK_BASE@ha; \
lwz r8,level##_STACK_BASE@l(r8); \ lwz r8,level##_STACK_BASE@l(r8); \
addi r8,r8,EXC_LVL_FRAME_OVERHEAD; addi r8,r8,THREAD_SIZE - INT_FRAME_SIZE;
#else #else
#define BOOKE_LOAD_EXC_LEVEL_STACK(level) \ #define BOOKE_LOAD_EXC_LEVEL_STACK(level) \
lis r8,level##_STACK_BASE@ha; \ lis r8,level##_STACK_BASE@ha; \
lwz r8,level##_STACK_BASE@l(r8); \ lwz r8,level##_STACK_BASE@l(r8); \
addi r8,r8,EXC_LVL_FRAME_OVERHEAD; addi r8,r8,THREAD_SIZE - INT_FRAME_SIZE;
#endif #endif
/* /*
@ -208,7 +206,7 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV)
mtmsr r11; \ mtmsr r11; \
mfspr r11,SPRN_SPRG_THREAD; /* if from user, start at top of */\ mfspr r11,SPRN_SPRG_THREAD; /* if from user, start at top of */\
lwz r11, TASK_STACK - THREAD(r11); /* this thread's kernel stack */\ lwz r11, TASK_STACK - THREAD(r11); /* this thread's kernel stack */\
addi r11,r11,EXC_LVL_FRAME_OVERHEAD; /* allocate stack frame */\ addi r11,r11,THREAD_SIZE - INT_FRAME_SIZE; /* allocate stack frame */\
beq 1f; \ beq 1f; \
/* COMING FROM USER MODE */ \ /* COMING FROM USER MODE */ \
stw r9,_CCR(r11); /* save CR */\ stw r9,_CCR(r11); /* save CR */\
@ -516,24 +514,5 @@ label:
bl kernel_fp_unavailable_exception; \ bl kernel_fp_unavailable_exception; \
b interrupt_return b interrupt_return
#else /* __ASSEMBLY__ */
struct exception_regs {
unsigned long mas0;
unsigned long mas1;
unsigned long mas2;
unsigned long mas3;
unsigned long mas6;
unsigned long mas7;
unsigned long srr0;
unsigned long srr1;
unsigned long csrr0;
unsigned long csrr1;
unsigned long dsrr0;
unsigned long dsrr1;
};
/* ensure this structure is always sized to a multiple of the stack alignment */
#define STACK_EXC_LVL_FRAME_SIZE ALIGN(sizeof (struct exception_regs), 16)
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* __HEAD_BOOKE_H__ */ #endif /* __HEAD_BOOKE_H__ */

View File

@ -750,7 +750,7 @@ void __do_irq(struct pt_regs *regs)
trace_irq_exit(regs); trace_irq_exit(regs);
} }
DEFINE_INTERRUPT_HANDLER_ASYNC(do_IRQ) void __do_IRQ(struct pt_regs *regs)
{ {
struct pt_regs *old_regs = set_irq_regs(regs); struct pt_regs *old_regs = set_irq_regs(regs);
void *cursp, *irqsp, *sirqsp; void *cursp, *irqsp, *sirqsp;
@ -774,6 +774,11 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(do_IRQ)
set_irq_regs(old_regs); set_irq_regs(old_regs);
} }
DEFINE_INTERRUPT_HANDLER_ASYNC(do_IRQ)
{
__do_IRQ(regs);
}
static void *__init alloc_vm_stack(void) static void *__init alloc_vm_stack(void)
{ {
return __vmalloc_node(THREAD_SIZE, THREAD_ALIGN, THREADINFO_GFP, return __vmalloc_node(THREAD_SIZE, THREAD_ALIGN, THREADINFO_GFP,

View File

@ -292,7 +292,8 @@ int kprobe_handler(struct pt_regs *regs)
if (user_mode(regs)) if (user_mode(regs))
return 0; return 0;
if (!(regs->msr & MSR_IR) || !(regs->msr & MSR_DR)) if (!IS_ENABLED(CONFIG_BOOKE) &&
(!(regs->msr & MSR_IR) || !(regs->msr & MSR_DR)))
return 0; return 0;
/* /*

View File

@ -1167,7 +1167,7 @@ static int __init topology_init(void)
* CPU. For instance, the boot cpu might never be valid * CPU. For instance, the boot cpu might never be valid
* for hotplugging. * for hotplugging.
*/ */
if (smp_ops->cpu_offline_self) if (smp_ops && smp_ops->cpu_offline_self)
c->hotpluggable = 1; c->hotpluggable = 1;
#endif #endif

View File

@ -586,7 +586,7 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(timer_interrupt)
#if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC) #if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC)
if (atomic_read(&ppc_n_lost_interrupts) != 0) if (atomic_read(&ppc_n_lost_interrupts) != 0)
do_IRQ(regs); __do_IRQ(regs);
#endif #endif
old_regs = set_irq_regs(regs); old_regs = set_irq_regs(regs);

View File

@ -1104,7 +1104,7 @@ DEFINE_INTERRUPT_HANDLER(RunModeException)
_exception(SIGTRAP, regs, TRAP_UNK, 0); _exception(SIGTRAP, regs, TRAP_UNK, 0);
} }
DEFINE_INTERRUPT_HANDLER(single_step_exception) static void __single_step_exception(struct pt_regs *regs)
{ {
clear_single_step(regs); clear_single_step(regs);
clear_br_trace(regs); clear_br_trace(regs);
@ -1121,6 +1121,11 @@ DEFINE_INTERRUPT_HANDLER(single_step_exception)
_exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
} }
DEFINE_INTERRUPT_HANDLER(single_step_exception)
{
__single_step_exception(regs);
}
/* /*
* After we have successfully emulated an instruction, we have to * After we have successfully emulated an instruction, we have to
* check if the instruction was being single-stepped, and if so, * check if the instruction was being single-stepped, and if so,
@ -1130,7 +1135,7 @@ DEFINE_INTERRUPT_HANDLER(single_step_exception)
static void emulate_single_step(struct pt_regs *regs) static void emulate_single_step(struct pt_regs *regs)
{ {
if (single_stepping(regs)) if (single_stepping(regs))
single_step_exception(regs); __single_step_exception(regs);
} }
static inline int __parse_fpscr(unsigned long fpscr) static inline int __parse_fpscr(unsigned long fpscr)

View File

@ -539,9 +539,10 @@ static void init_cpu_char_feature_flags(struct h_cpu_char_result *result)
* H_CPU_BEHAV_FAVOUR_SECURITY_H could be set only if * H_CPU_BEHAV_FAVOUR_SECURITY_H could be set only if
* H_CPU_BEHAV_FAVOUR_SECURITY is. * H_CPU_BEHAV_FAVOUR_SECURITY is.
*/ */
if (!(result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY)) if (!(result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY)) {
security_ftr_clear(SEC_FTR_FAVOUR_SECURITY); security_ftr_clear(SEC_FTR_FAVOUR_SECURITY);
else if (result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY_H) pseries_security_flavor = 0;
} else if (result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY_H)
pseries_security_flavor = 1; pseries_security_flavor = 1;
else else
pseries_security_flavor = 2; pseries_security_flavor = 2;

View File

@ -67,6 +67,7 @@ static struct irq_domain *xive_irq_domain;
static struct xive_ipi_desc { static struct xive_ipi_desc {
unsigned int irq; unsigned int irq;
char name[16]; char name[16];
atomic_t started;
} *xive_ipis; } *xive_ipis;
/* /*
@ -1120,7 +1121,7 @@ static const struct irq_domain_ops xive_ipi_irq_domain_ops = {
.alloc = xive_ipi_irq_domain_alloc, .alloc = xive_ipi_irq_domain_alloc,
}; };
static int __init xive_request_ipi(void) static int __init xive_init_ipis(void)
{ {
struct fwnode_handle *fwnode; struct fwnode_handle *fwnode;
struct irq_domain *ipi_domain; struct irq_domain *ipi_domain;
@ -1144,10 +1145,6 @@ static int __init xive_request_ipi(void)
struct xive_ipi_desc *xid = &xive_ipis[node]; struct xive_ipi_desc *xid = &xive_ipis[node];
struct xive_ipi_alloc_info info = { node }; struct xive_ipi_alloc_info info = { node };
/* Skip nodes without CPUs */
if (cpumask_empty(cpumask_of_node(node)))
continue;
/* /*
* Map one IPI interrupt per node for all cpus of that node. * Map one IPI interrupt per node for all cpus of that node.
* Since the HW interrupt number doesn't have any meaning, * Since the HW interrupt number doesn't have any meaning,
@ -1159,11 +1156,6 @@ static int __init xive_request_ipi(void)
xid->irq = ret; xid->irq = ret;
snprintf(xid->name, sizeof(xid->name), "IPI-%d", node); snprintf(xid->name, sizeof(xid->name), "IPI-%d", node);
ret = request_irq(xid->irq, xive_muxed_ipi_action,
IRQF_PERCPU | IRQF_NO_THREAD, xid->name, NULL);
WARN(ret < 0, "Failed to request IPI %d: %d\n", xid->irq, ret);
} }
return ret; return ret;
@ -1178,6 +1170,22 @@ out:
return ret; return ret;
} }
static int __init xive_request_ipi(unsigned int cpu)
{
struct xive_ipi_desc *xid = &xive_ipis[early_cpu_to_node(cpu)];
int ret;
if (atomic_inc_return(&xid->started) > 1)
return 0;
ret = request_irq(xid->irq, xive_muxed_ipi_action,
IRQF_PERCPU | IRQF_NO_THREAD,
xid->name, NULL);
WARN(ret < 0, "Failed to request IPI %d: %d\n", xid->irq, ret);
return ret;
}
static int xive_setup_cpu_ipi(unsigned int cpu) static int xive_setup_cpu_ipi(unsigned int cpu)
{ {
unsigned int xive_ipi_irq = xive_ipi_cpu_to_irq(cpu); unsigned int xive_ipi_irq = xive_ipi_cpu_to_irq(cpu);
@ -1192,6 +1200,9 @@ static int xive_setup_cpu_ipi(unsigned int cpu)
if (xc->hw_ipi != XIVE_BAD_IRQ) if (xc->hw_ipi != XIVE_BAD_IRQ)
return 0; return 0;
/* Register the IPI */
xive_request_ipi(cpu);
/* Grab an IPI from the backend, this will populate xc->hw_ipi */ /* Grab an IPI from the backend, this will populate xc->hw_ipi */
if (xive_ops->get_ipi(cpu, xc)) if (xive_ops->get_ipi(cpu, xc))
return -EIO; return -EIO;
@ -1231,6 +1242,8 @@ static void xive_cleanup_cpu_ipi(unsigned int cpu, struct xive_cpu *xc)
if (xc->hw_ipi == XIVE_BAD_IRQ) if (xc->hw_ipi == XIVE_BAD_IRQ)
return; return;
/* TODO: clear IPI mapping */
/* Mask the IPI */ /* Mask the IPI */
xive_do_source_set_mask(&xc->ipi_data, true); xive_do_source_set_mask(&xc->ipi_data, true);
@ -1253,7 +1266,7 @@ void __init xive_smp_probe(void)
smp_ops->cause_ipi = xive_cause_ipi; smp_ops->cause_ipi = xive_cause_ipi;
/* Register the IPI */ /* Register the IPI */
xive_request_ipi(); xive_init_ipis();
/* Allocate and setup IPI for the boot CPU */ /* Allocate and setup IPI for the boot CPU */
xive_setup_cpu_ipi(smp_processor_id()); xive_setup_cpu_ipi(smp_processor_id());

View File

@ -11,7 +11,7 @@ endif
CFLAGS_syscall_table.o += $(call cc-option,-Wno-override-init,) CFLAGS_syscall_table.o += $(call cc-option,-Wno-override-init,)
ifdef CONFIG_KEXEC ifdef CONFIG_KEXEC
AFLAGS_kexec_relocate.o := -mcmodel=medany -mno-relax AFLAGS_kexec_relocate.o := -mcmodel=medany $(call cc-option,-mno-relax)
endif endif
extra-y += head.o extra-y += head.o

View File

@ -197,7 +197,7 @@ static void __init setup_bootmem(void)
* if end of dram is equal to maximum addressable memory. For 64-bit * if end of dram is equal to maximum addressable memory. For 64-bit
* kernel, this problem can't happen here as the end of the virtual * kernel, this problem can't happen here as the end of the virtual
* address space is occupied by the kernel mapping then this check must * address space is occupied by the kernel mapping then this check must
* be done in create_kernel_page_table. * be done as soon as the kernel mapping base address is determined.
*/ */
max_mapped_addr = __pa(~(ulong)0); max_mapped_addr = __pa(~(ulong)0);
if (max_mapped_addr == (phys_ram_end - 1)) if (max_mapped_addr == (phys_ram_end - 1))

View File

@ -1038,6 +1038,13 @@ struct kvm_arch {
struct list_head lpage_disallowed_mmu_pages; struct list_head lpage_disallowed_mmu_pages;
struct kvm_page_track_notifier_node mmu_sp_tracker; struct kvm_page_track_notifier_node mmu_sp_tracker;
struct kvm_page_track_notifier_head track_notifier_head; struct kvm_page_track_notifier_head track_notifier_head;
/*
* Protects marking pages unsync during page faults, as TDP MMU page
* faults only take mmu_lock for read. For simplicity, the unsync
* pages lock is always taken when marking pages unsync regardless of
* whether mmu_lock is held for read or write.
*/
spinlock_t mmu_unsync_pages_lock;
struct list_head assigned_dev_head; struct list_head assigned_dev_head;
struct iommu_domain *iommu_domain; struct iommu_domain *iommu_domain;

View File

@ -184,6 +184,8 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
#define V_IGN_TPR_SHIFT 20 #define V_IGN_TPR_SHIFT 20
#define V_IGN_TPR_MASK (1 << V_IGN_TPR_SHIFT) #define V_IGN_TPR_MASK (1 << V_IGN_TPR_SHIFT)
#define V_IRQ_INJECTION_BITS_MASK (V_IRQ_MASK | V_INTR_PRIO_MASK | V_IGN_TPR_MASK)
#define V_INTR_MASKING_SHIFT 24 #define V_INTR_MASKING_SHIFT 24
#define V_INTR_MASKING_MASK (1 << V_INTR_MASKING_SHIFT) #define V_INTR_MASKING_MASK (1 << V_INTR_MASKING_SHIFT)

View File

@ -1986,7 +1986,8 @@ static struct irq_chip ioapic_chip __read_mostly = {
.irq_set_affinity = ioapic_set_affinity, .irq_set_affinity = ioapic_set_affinity,
.irq_retrigger = irq_chip_retrigger_hierarchy, .irq_retrigger = irq_chip_retrigger_hierarchy,
.irq_get_irqchip_state = ioapic_irq_get_chip_state, .irq_get_irqchip_state = ioapic_irq_get_chip_state,
.flags = IRQCHIP_SKIP_SET_WAKE, .flags = IRQCHIP_SKIP_SET_WAKE |
IRQCHIP_AFFINITY_PRE_STARTUP,
}; };
static struct irq_chip ioapic_ir_chip __read_mostly = { static struct irq_chip ioapic_ir_chip __read_mostly = {
@ -1999,7 +2000,8 @@ static struct irq_chip ioapic_ir_chip __read_mostly = {
.irq_set_affinity = ioapic_set_affinity, .irq_set_affinity = ioapic_set_affinity,
.irq_retrigger = irq_chip_retrigger_hierarchy, .irq_retrigger = irq_chip_retrigger_hierarchy,
.irq_get_irqchip_state = ioapic_irq_get_chip_state, .irq_get_irqchip_state = ioapic_irq_get_chip_state,
.flags = IRQCHIP_SKIP_SET_WAKE, .flags = IRQCHIP_SKIP_SET_WAKE |
IRQCHIP_AFFINITY_PRE_STARTUP,
}; };
static inline void init_IO_APIC_traps(void) static inline void init_IO_APIC_traps(void)

View File

@ -58,11 +58,13 @@ msi_set_affinity(struct irq_data *irqd, const struct cpumask *mask, bool force)
* The quirk bit is not set in this case. * The quirk bit is not set in this case.
* - The new vector is the same as the old vector * - The new vector is the same as the old vector
* - The old vector is MANAGED_IRQ_SHUTDOWN_VECTOR (interrupt starts up) * - The old vector is MANAGED_IRQ_SHUTDOWN_VECTOR (interrupt starts up)
* - The interrupt is not yet started up
* - The new destination CPU is the same as the old destination CPU * - The new destination CPU is the same as the old destination CPU
*/ */
if (!irqd_msi_nomask_quirk(irqd) || if (!irqd_msi_nomask_quirk(irqd) ||
cfg->vector == old_cfg.vector || cfg->vector == old_cfg.vector ||
old_cfg.vector == MANAGED_IRQ_SHUTDOWN_VECTOR || old_cfg.vector == MANAGED_IRQ_SHUTDOWN_VECTOR ||
!irqd_is_started(irqd) ||
cfg->dest_apicid == old_cfg.dest_apicid) { cfg->dest_apicid == old_cfg.dest_apicid) {
irq_msi_update_msg(irqd, cfg); irq_msi_update_msg(irqd, cfg);
return ret; return ret;
@ -150,7 +152,8 @@ static struct irq_chip pci_msi_controller = {
.irq_ack = irq_chip_ack_parent, .irq_ack = irq_chip_ack_parent,
.irq_retrigger = irq_chip_retrigger_hierarchy, .irq_retrigger = irq_chip_retrigger_hierarchy,
.irq_set_affinity = msi_set_affinity, .irq_set_affinity = msi_set_affinity,
.flags = IRQCHIP_SKIP_SET_WAKE, .flags = IRQCHIP_SKIP_SET_WAKE |
IRQCHIP_AFFINITY_PRE_STARTUP,
}; };
int pci_msi_prepare(struct irq_domain *domain, struct device *dev, int nvec, int pci_msi_prepare(struct irq_domain *domain, struct device *dev, int nvec,
@ -219,7 +222,8 @@ static struct irq_chip pci_msi_ir_controller = {
.irq_mask = pci_msi_mask_irq, .irq_mask = pci_msi_mask_irq,
.irq_ack = irq_chip_ack_parent, .irq_ack = irq_chip_ack_parent,
.irq_retrigger = irq_chip_retrigger_hierarchy, .irq_retrigger = irq_chip_retrigger_hierarchy,
.flags = IRQCHIP_SKIP_SET_WAKE, .flags = IRQCHIP_SKIP_SET_WAKE |
IRQCHIP_AFFINITY_PRE_STARTUP,
}; };
static struct msi_domain_info pci_msi_ir_domain_info = { static struct msi_domain_info pci_msi_ir_domain_info = {
@ -273,7 +277,8 @@ static struct irq_chip dmar_msi_controller = {
.irq_retrigger = irq_chip_retrigger_hierarchy, .irq_retrigger = irq_chip_retrigger_hierarchy,
.irq_compose_msi_msg = dmar_msi_compose_msg, .irq_compose_msi_msg = dmar_msi_compose_msg,
.irq_write_msi_msg = dmar_msi_write_msg, .irq_write_msi_msg = dmar_msi_write_msg,
.flags = IRQCHIP_SKIP_SET_WAKE, .flags = IRQCHIP_SKIP_SET_WAKE |
IRQCHIP_AFFINITY_PRE_STARTUP,
}; };
static int dmar_msi_init(struct irq_domain *domain, static int dmar_msi_init(struct irq_domain *domain,

View File

@ -285,15 +285,14 @@ static u64 mbm_overflow_count(u64 prev_msr, u64 cur_msr, unsigned int width)
return chunks >>= shift; return chunks >>= shift;
} }
static int __mon_event_count(u32 rmid, struct rmid_read *rr) static u64 __mon_event_count(u32 rmid, struct rmid_read *rr)
{ {
struct mbm_state *m; struct mbm_state *m;
u64 chunks, tval; u64 chunks, tval;
tval = __rmid_read(rmid, rr->evtid); tval = __rmid_read(rmid, rr->evtid);
if (tval & (RMID_VAL_ERROR | RMID_VAL_UNAVAIL)) { if (tval & (RMID_VAL_ERROR | RMID_VAL_UNAVAIL)) {
rr->val = tval; return tval;
return -EINVAL;
} }
switch (rr->evtid) { switch (rr->evtid) {
case QOS_L3_OCCUP_EVENT_ID: case QOS_L3_OCCUP_EVENT_ID:
@ -305,12 +304,6 @@ static int __mon_event_count(u32 rmid, struct rmid_read *rr)
case QOS_L3_MBM_LOCAL_EVENT_ID: case QOS_L3_MBM_LOCAL_EVENT_ID:
m = &rr->d->mbm_local[rmid]; m = &rr->d->mbm_local[rmid];
break; break;
default:
/*
* Code would never reach here because
* an invalid event id would fail the __rmid_read.
*/
return -EINVAL;
} }
if (rr->first) { if (rr->first) {
@ -361,23 +354,29 @@ void mon_event_count(void *info)
struct rdtgroup *rdtgrp, *entry; struct rdtgroup *rdtgrp, *entry;
struct rmid_read *rr = info; struct rmid_read *rr = info;
struct list_head *head; struct list_head *head;
u64 ret_val;
rdtgrp = rr->rgrp; rdtgrp = rr->rgrp;
if (__mon_event_count(rdtgrp->mon.rmid, rr)) ret_val = __mon_event_count(rdtgrp->mon.rmid, rr);
return;
/* /*
* For Ctrl groups read data from child monitor groups. * For Ctrl groups read data from child monitor groups and
* add them together. Count events which are read successfully.
* Discard the rmid_read's reporting errors.
*/ */
head = &rdtgrp->mon.crdtgrp_list; head = &rdtgrp->mon.crdtgrp_list;
if (rdtgrp->type == RDTCTRL_GROUP) { if (rdtgrp->type == RDTCTRL_GROUP) {
list_for_each_entry(entry, head, mon.crdtgrp_list) { list_for_each_entry(entry, head, mon.crdtgrp_list) {
if (__mon_event_count(entry->mon.rmid, rr)) if (__mon_event_count(entry->mon.rmid, rr) == 0)
return; ret_val = 0;
} }
} }
/* Report error if none of rmid_reads are successful */
if (ret_val)
rr->val = ret_val;
} }
/* /*

View File

@ -508,7 +508,7 @@ static struct irq_chip hpet_msi_controller __ro_after_init = {
.irq_set_affinity = msi_domain_set_affinity, .irq_set_affinity = msi_domain_set_affinity,
.irq_retrigger = irq_chip_retrigger_hierarchy, .irq_retrigger = irq_chip_retrigger_hierarchy,
.irq_write_msi_msg = hpet_msi_write_msg, .irq_write_msi_msg = hpet_msi_write_msg,
.flags = IRQCHIP_SKIP_SET_WAKE, .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_AFFINITY_PRE_STARTUP,
}; };
static int hpet_msi_init(struct irq_domain *domain, static int hpet_msi_init(struct irq_domain *domain,

View File

@ -208,30 +208,6 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
kvm_mmu_after_set_cpuid(vcpu); kvm_mmu_after_set_cpuid(vcpu);
} }
static int is_efer_nx(void)
{
return host_efer & EFER_NX;
}
static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
{
int i;
struct kvm_cpuid_entry2 *e, *entry;
entry = NULL;
for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
e = &vcpu->arch.cpuid_entries[i];
if (e->function == 0x80000001) {
entry = e;
break;
}
}
if (entry && cpuid_entry_has(entry, X86_FEATURE_NX) && !is_efer_nx()) {
cpuid_entry_clear(entry, X86_FEATURE_NX);
printk(KERN_INFO "kvm: guest NX capability removed\n");
}
}
int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu) int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu)
{ {
struct kvm_cpuid_entry2 *best; struct kvm_cpuid_entry2 *best;
@ -302,7 +278,6 @@ int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
vcpu->arch.cpuid_entries = e2; vcpu->arch.cpuid_entries = e2;
vcpu->arch.cpuid_nent = cpuid->nent; vcpu->arch.cpuid_nent = cpuid->nent;
cpuid_fix_nx_cap(vcpu);
kvm_update_cpuid_runtime(vcpu); kvm_update_cpuid_runtime(vcpu);
kvm_vcpu_after_set_cpuid(vcpu); kvm_vcpu_after_set_cpuid(vcpu);
@ -401,7 +376,6 @@ static __always_inline void kvm_cpu_cap_mask(enum cpuid_leafs leaf, u32 mask)
void kvm_set_cpu_caps(void) void kvm_set_cpu_caps(void)
{ {
unsigned int f_nx = is_efer_nx() ? F(NX) : 0;
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
unsigned int f_gbpages = F(GBPAGES); unsigned int f_gbpages = F(GBPAGES);
unsigned int f_lm = F(LM); unsigned int f_lm = F(LM);
@ -515,7 +489,7 @@ void kvm_set_cpu_caps(void)
F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) | F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
F(MTRR) | F(PGE) | F(MCA) | F(CMOV) | F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
F(PAT) | F(PSE36) | 0 /* Reserved */ | F(PAT) | F(PSE36) | 0 /* Reserved */ |
f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) | F(NX) | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
F(FXSR) | F(FXSR_OPT) | f_gbpages | F(RDTSCP) | F(FXSR) | F(FXSR_OPT) | f_gbpages | F(RDTSCP) |
0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW) 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW)
); );

View File

@ -1933,7 +1933,7 @@ ret_success:
void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu) void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu)
{ {
struct kvm_cpuid_entry2 *entry; struct kvm_cpuid_entry2 *entry;
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); struct kvm_vcpu_hv *hv_vcpu;
entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_INTERFACE, 0); entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_INTERFACE, 0);
if (entry && entry->eax == HYPERV_CPUID_SIGNATURE_EAX) { if (entry && entry->eax == HYPERV_CPUID_SIGNATURE_EAX) {

View File

@ -2535,6 +2535,7 @@ static void kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync) int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync)
{ {
struct kvm_mmu_page *sp; struct kvm_mmu_page *sp;
bool locked = false;
/* /*
* Force write-protection if the page is being tracked. Note, the page * Force write-protection if the page is being tracked. Note, the page
@ -2557,9 +2558,34 @@ int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync)
if (sp->unsync) if (sp->unsync)
continue; continue;
/*
* TDP MMU page faults require an additional spinlock as they
* run with mmu_lock held for read, not write, and the unsync
* logic is not thread safe. Take the spinklock regardless of
* the MMU type to avoid extra conditionals/parameters, there's
* no meaningful penalty if mmu_lock is held for write.
*/
if (!locked) {
locked = true;
spin_lock(&vcpu->kvm->arch.mmu_unsync_pages_lock);
/*
* Recheck after taking the spinlock, a different vCPU
* may have since marked the page unsync. A false
* positive on the unprotected check above is not
* possible as clearing sp->unsync _must_ hold mmu_lock
* for write, i.e. unsync cannot transition from 0->1
* while this CPU holds mmu_lock for read (or write).
*/
if (READ_ONCE(sp->unsync))
continue;
}
WARN_ON(sp->role.level != PG_LEVEL_4K); WARN_ON(sp->role.level != PG_LEVEL_4K);
kvm_unsync_page(vcpu, sp); kvm_unsync_page(vcpu, sp);
} }
if (locked)
spin_unlock(&vcpu->kvm->arch.mmu_unsync_pages_lock);
/* /*
* We need to ensure that the marking of unsync pages is visible * We need to ensure that the marking of unsync pages is visible
@ -5537,6 +5563,8 @@ void kvm_mmu_init_vm(struct kvm *kvm)
{ {
struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker; struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
spin_lock_init(&kvm->arch.mmu_unsync_pages_lock);
if (!kvm_mmu_init_tdp_mmu(kvm)) if (!kvm_mmu_init_tdp_mmu(kvm))
/* /*
* No smp_load/store wrappers needed here as we are in * No smp_load/store wrappers needed here as we are in

View File

@ -43,6 +43,7 @@ void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
if (!kvm->arch.tdp_mmu_enabled) if (!kvm->arch.tdp_mmu_enabled)
return; return;
WARN_ON(!list_empty(&kvm->arch.tdp_mmu_pages));
WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots)); WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));
/* /*
@ -81,8 +82,6 @@ static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head)
void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root, void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
bool shared) bool shared)
{ {
gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
kvm_lockdep_assert_mmu_lock_held(kvm, shared); kvm_lockdep_assert_mmu_lock_held(kvm, shared);
if (!refcount_dec_and_test(&root->tdp_mmu_root_count)) if (!refcount_dec_and_test(&root->tdp_mmu_root_count))
@ -94,7 +93,7 @@ void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
list_del_rcu(&root->link); list_del_rcu(&root->link);
spin_unlock(&kvm->arch.tdp_mmu_pages_lock); spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
zap_gfn_range(kvm, root, 0, max_gfn, false, false, shared); zap_gfn_range(kvm, root, 0, -1ull, false, false, shared);
call_rcu(&root->rcu_head, tdp_mmu_free_sp_rcu_callback); call_rcu(&root->rcu_head, tdp_mmu_free_sp_rcu_callback);
} }
@ -724,13 +723,29 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
gfn_t start, gfn_t end, bool can_yield, bool flush, gfn_t start, gfn_t end, bool can_yield, bool flush,
bool shared) bool shared)
{ {
gfn_t max_gfn_host = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
bool zap_all = (start == 0 && end >= max_gfn_host);
struct tdp_iter iter; struct tdp_iter iter;
/*
* No need to try to step down in the iterator when zapping all SPTEs,
* zapping the top-level non-leaf SPTEs will recurse on their children.
*/
int min_level = zap_all ? root->role.level : PG_LEVEL_4K;
/*
* Bound the walk at host.MAXPHYADDR, guest accesses beyond that will
* hit a #PF(RSVD) and never get to an EPT Violation/Misconfig / #NPF,
* and so KVM will never install a SPTE for such addresses.
*/
end = min(end, max_gfn_host);
kvm_lockdep_assert_mmu_lock_held(kvm, shared); kvm_lockdep_assert_mmu_lock_held(kvm, shared);
rcu_read_lock(); rcu_read_lock();
tdp_root_for_each_pte(iter, root, start, end) { for_each_tdp_pte_min_level(iter, root->spt, root->role.level,
min_level, start, end) {
retry: retry:
if (can_yield && if (can_yield &&
tdp_mmu_iter_cond_resched(kvm, &iter, flush, shared)) { tdp_mmu_iter_cond_resched(kvm, &iter, flush, shared)) {
@ -744,9 +759,10 @@ retry:
/* /*
* If this is a non-last-level SPTE that covers a larger range * If this is a non-last-level SPTE that covers a larger range
* than should be zapped, continue, and zap the mappings at a * than should be zapped, continue, and zap the mappings at a
* lower level. * lower level, except when zapping all SPTEs.
*/ */
if ((iter.gfn < start || if (!zap_all &&
(iter.gfn < start ||
iter.gfn + KVM_PAGES_PER_HPAGE(iter.level) > end) && iter.gfn + KVM_PAGES_PER_HPAGE(iter.level) > end) &&
!is_last_spte(iter.old_spte, iter.level)) !is_last_spte(iter.old_spte, iter.level))
continue; continue;
@ -794,12 +810,11 @@ bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start,
void kvm_tdp_mmu_zap_all(struct kvm *kvm) void kvm_tdp_mmu_zap_all(struct kvm *kvm)
{ {
gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
bool flush = false; bool flush = false;
int i; int i;
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, 0, max_gfn, flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, 0, -1ull,
flush, false); flush, false);
if (flush) if (flush)
@ -838,7 +853,6 @@ static struct kvm_mmu_page *next_invalidated_root(struct kvm *kvm,
*/ */
void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm) void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
{ {
gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
struct kvm_mmu_page *next_root; struct kvm_mmu_page *next_root;
struct kvm_mmu_page *root; struct kvm_mmu_page *root;
bool flush = false; bool flush = false;
@ -854,8 +868,7 @@ void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
rcu_read_unlock(); rcu_read_unlock();
flush = zap_gfn_range(kvm, root, 0, max_gfn, true, flush, flush = zap_gfn_range(kvm, root, 0, -1ull, true, flush, true);
true);
/* /*
* Put the reference acquired in * Put the reference acquired in

View File

@ -158,6 +158,9 @@ void recalc_intercepts(struct vcpu_svm *svm)
/* If SMI is not intercepted, ignore guest SMI intercept as well */ /* If SMI is not intercepted, ignore guest SMI intercept as well */
if (!intercept_smi) if (!intercept_smi)
vmcb_clr_intercept(c, INTERCEPT_SMI); vmcb_clr_intercept(c, INTERCEPT_SMI);
vmcb_set_intercept(c, INTERCEPT_VMLOAD);
vmcb_set_intercept(c, INTERCEPT_VMSAVE);
} }
static void copy_vmcb_control_area(struct vmcb_control_area *dst, static void copy_vmcb_control_area(struct vmcb_control_area *dst,
@ -503,7 +506,11 @@ static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12
static void nested_vmcb02_prepare_control(struct vcpu_svm *svm) static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
{ {
const u32 mask = V_INTR_MASKING_MASK | V_GIF_ENABLE_MASK | V_GIF_MASK; const u32 int_ctl_vmcb01_bits =
V_INTR_MASKING_MASK | V_GIF_MASK | V_GIF_ENABLE_MASK;
const u32 int_ctl_vmcb12_bits = V_TPR_MASK | V_IRQ_INJECTION_BITS_MASK;
struct kvm_vcpu *vcpu = &svm->vcpu; struct kvm_vcpu *vcpu = &svm->vcpu;
/* /*
@ -535,8 +542,8 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
vcpu->arch.l1_tsc_offset + svm->nested.ctl.tsc_offset; vcpu->arch.l1_tsc_offset + svm->nested.ctl.tsc_offset;
svm->vmcb->control.int_ctl = svm->vmcb->control.int_ctl =
(svm->nested.ctl.int_ctl & ~mask) | (svm->nested.ctl.int_ctl & int_ctl_vmcb12_bits) |
(svm->vmcb01.ptr->control.int_ctl & mask); (svm->vmcb01.ptr->control.int_ctl & int_ctl_vmcb01_bits);
svm->vmcb->control.virt_ext = svm->nested.ctl.virt_ext; svm->vmcb->control.virt_ext = svm->nested.ctl.virt_ext;
svm->vmcb->control.int_vector = svm->nested.ctl.int_vector; svm->vmcb->control.int_vector = svm->nested.ctl.int_vector;

View File

@ -1589,17 +1589,18 @@ static void svm_set_vintr(struct vcpu_svm *svm)
static void svm_clear_vintr(struct vcpu_svm *svm) static void svm_clear_vintr(struct vcpu_svm *svm)
{ {
const u32 mask = V_TPR_MASK | V_GIF_ENABLE_MASK | V_GIF_MASK | V_INTR_MASKING_MASK;
svm_clr_intercept(svm, INTERCEPT_VINTR); svm_clr_intercept(svm, INTERCEPT_VINTR);
/* Drop int_ctl fields related to VINTR injection. */ /* Drop int_ctl fields related to VINTR injection. */
svm->vmcb->control.int_ctl &= mask; svm->vmcb->control.int_ctl &= ~V_IRQ_INJECTION_BITS_MASK;
if (is_guest_mode(&svm->vcpu)) { if (is_guest_mode(&svm->vcpu)) {
svm->vmcb01.ptr->control.int_ctl &= mask; svm->vmcb01.ptr->control.int_ctl &= ~V_IRQ_INJECTION_BITS_MASK;
WARN_ON((svm->vmcb->control.int_ctl & V_TPR_MASK) != WARN_ON((svm->vmcb->control.int_ctl & V_TPR_MASK) !=
(svm->nested.ctl.int_ctl & V_TPR_MASK)); (svm->nested.ctl.int_ctl & V_TPR_MASK));
svm->vmcb->control.int_ctl |= svm->nested.ctl.int_ctl & ~mask;
svm->vmcb->control.int_ctl |= svm->nested.ctl.int_ctl &
V_IRQ_INJECTION_BITS_MASK;
} }
vmcb_mark_dirty(svm->vmcb, VMCB_INTR); vmcb_mark_dirty(svm->vmcb, VMCB_INTR);

View File

@ -330,6 +330,31 @@ void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu)
vcpu_put(vcpu); vcpu_put(vcpu);
} }
#define EPTP_PA_MASK GENMASK_ULL(51, 12)
static bool nested_ept_root_matches(hpa_t root_hpa, u64 root_eptp, u64 eptp)
{
return VALID_PAGE(root_hpa) &&
((root_eptp & EPTP_PA_MASK) == (eptp & EPTP_PA_MASK));
}
static void nested_ept_invalidate_addr(struct kvm_vcpu *vcpu, gpa_t eptp,
gpa_t addr)
{
uint i;
struct kvm_mmu_root_info *cached_root;
WARN_ON_ONCE(!mmu_is_nested(vcpu));
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
cached_root = &vcpu->arch.mmu->prev_roots[i];
if (nested_ept_root_matches(cached_root->hpa, cached_root->pgd,
eptp))
vcpu->arch.mmu->invlpg(vcpu, addr, cached_root->hpa);
}
}
static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu, static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
struct x86_exception *fault) struct x86_exception *fault)
{ {
@ -342,10 +367,22 @@ static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
vm_exit_reason = EXIT_REASON_PML_FULL; vm_exit_reason = EXIT_REASON_PML_FULL;
vmx->nested.pml_full = false; vmx->nested.pml_full = false;
exit_qualification &= INTR_INFO_UNBLOCK_NMI; exit_qualification &= INTR_INFO_UNBLOCK_NMI;
} else if (fault->error_code & PFERR_RSVD_MASK) } else {
vm_exit_reason = EXIT_REASON_EPT_MISCONFIG; if (fault->error_code & PFERR_RSVD_MASK)
else vm_exit_reason = EXIT_REASON_EPT_MISCONFIG;
vm_exit_reason = EXIT_REASON_EPT_VIOLATION; else
vm_exit_reason = EXIT_REASON_EPT_VIOLATION;
/*
* Although the caller (kvm_inject_emulated_page_fault) would
* have already synced the faulting address in the shadow EPT
* tables for the current EPTP12, we also need to sync it for
* any other cached EPTP02s based on the same EP4TA, since the
* TLB associates mappings to the EP4TA rather than the full EPTP.
*/
nested_ept_invalidate_addr(vcpu, vmcs12->ept_pointer,
fault->address);
}
nested_vmx_vmexit(vcpu, vm_exit_reason, 0, exit_qualification); nested_vmx_vmexit(vcpu, vm_exit_reason, 0, exit_qualification);
vmcs12->guest_physical_address = fault->address; vmcs12->guest_physical_address = fault->address;
@ -5325,14 +5362,6 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
return nested_vmx_succeed(vcpu); return nested_vmx_succeed(vcpu);
} }
#define EPTP_PA_MASK GENMASK_ULL(51, 12)
static bool nested_ept_root_matches(hpa_t root_hpa, u64 root_eptp, u64 eptp)
{
return VALID_PAGE(root_hpa) &&
((root_eptp & EPTP_PA_MASK) == (eptp & EPTP_PA_MASK));
}
/* Emulate the INVEPT instruction */ /* Emulate the INVEPT instruction */
static int handle_invept(struct kvm_vcpu *vcpu) static int handle_invept(struct kvm_vcpu *vcpu)
{ {
@ -5826,7 +5855,8 @@ static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu,
if (is_nmi(intr_info)) if (is_nmi(intr_info))
return true; return true;
else if (is_page_fault(intr_info)) else if (is_page_fault(intr_info))
return vcpu->arch.apf.host_apf_flags || !enable_ept; return vcpu->arch.apf.host_apf_flags ||
vmx_need_pf_intercept(vcpu);
else if (is_debug(intr_info) && else if (is_debug(intr_info) &&
vcpu->guest_debug & vcpu->guest_debug &
(KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))

View File

@ -522,7 +522,7 @@ static inline struct vmcs *alloc_vmcs(bool shadow)
static inline bool vmx_has_waitpkg(struct vcpu_vmx *vmx) static inline bool vmx_has_waitpkg(struct vcpu_vmx *vmx)
{ {
return vmx->secondary_exec_control & return secondary_exec_controls_get(vmx) &
SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE; SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE;
} }

View File

@ -10,6 +10,7 @@ BEGIN {
/^GNU objdump/ { /^GNU objdump/ {
verstr = "" verstr = ""
gsub(/\(.*\)/, "");
for (i = 3; i <= NF; i++) for (i = 3; i <= NF; i++)
if (match($(i), "^[0-9]")) { if (match($(i), "^[0-9]")) {
verstr = $(i); verstr = $(i);

View File

@ -9,12 +9,6 @@ config MQ_IOSCHED_DEADLINE
help help
MQ version of the deadline IO scheduler. MQ version of the deadline IO scheduler.
config MQ_IOSCHED_DEADLINE_CGROUP
tristate
default y
depends on MQ_IOSCHED_DEADLINE
depends on BLK_CGROUP
config MQ_IOSCHED_KYBER config MQ_IOSCHED_KYBER
tristate "Kyber I/O scheduler" tristate "Kyber I/O scheduler"
default y default y

View File

@ -22,8 +22,6 @@ obj-$(CONFIG_BLK_CGROUP_IOPRIO) += blk-ioprio.o
obj-$(CONFIG_BLK_CGROUP_IOLATENCY) += blk-iolatency.o obj-$(CONFIG_BLK_CGROUP_IOLATENCY) += blk-iolatency.o
obj-$(CONFIG_BLK_CGROUP_IOCOST) += blk-iocost.o obj-$(CONFIG_BLK_CGROUP_IOCOST) += blk-iocost.o
obj-$(CONFIG_MQ_IOSCHED_DEADLINE) += mq-deadline.o obj-$(CONFIG_MQ_IOSCHED_DEADLINE) += mq-deadline.o
mq-deadline-y += mq-deadline-main.o
mq-deadline-$(CONFIG_MQ_IOSCHED_DEADLINE_CGROUP)+= mq-deadline-cgroup.o
obj-$(CONFIG_MQ_IOSCHED_KYBER) += kyber-iosched.o obj-$(CONFIG_MQ_IOSCHED_KYBER) += kyber-iosched.o
bfq-y := bfq-iosched.o bfq-wf2q.o bfq-cgroup.o bfq-y := bfq-iosched.o bfq-wf2q.o bfq-cgroup.o
obj-$(CONFIG_IOSCHED_BFQ) += bfq.o obj-$(CONFIG_IOSCHED_BFQ) += bfq.o

View File

@ -3061,19 +3061,19 @@ static ssize_t ioc_weight_write(struct kernfs_open_file *of, char *buf,
if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX) if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX)
return -EINVAL; return -EINVAL;
spin_lock(&blkcg->lock); spin_lock_irq(&blkcg->lock);
iocc->dfl_weight = v * WEIGHT_ONE; iocc->dfl_weight = v * WEIGHT_ONE;
hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) { hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
struct ioc_gq *iocg = blkg_to_iocg(blkg); struct ioc_gq *iocg = blkg_to_iocg(blkg);
if (iocg) { if (iocg) {
spin_lock_irq(&iocg->ioc->lock); spin_lock(&iocg->ioc->lock);
ioc_now(iocg->ioc, &now); ioc_now(iocg->ioc, &now);
weight_updated(iocg, &now); weight_updated(iocg, &now);
spin_unlock_irq(&iocg->ioc->lock); spin_unlock(&iocg->ioc->lock);
} }
} }
spin_unlock(&blkcg->lock); spin_unlock_irq(&blkcg->lock);
return nbytes; return nbytes;
} }

View File

@ -2994,10 +2994,12 @@ static void queue_set_hctx_shared(struct request_queue *q, bool shared)
int i; int i;
queue_for_each_hw_ctx(q, hctx, i) { queue_for_each_hw_ctx(q, hctx, i) {
if (shared) if (shared) {
hctx->flags |= BLK_MQ_F_TAG_QUEUE_SHARED; hctx->flags |= BLK_MQ_F_TAG_QUEUE_SHARED;
else } else {
blk_mq_tag_idle(hctx);
hctx->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED; hctx->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED;
}
} }
} }

View File

@ -1,126 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/blk-cgroup.h>
#include <linux/ioprio.h>
#include "mq-deadline-cgroup.h"
static struct blkcg_policy dd_blkcg_policy;
static struct blkcg_policy_data *dd_cpd_alloc(gfp_t gfp)
{
struct dd_blkcg *pd;
pd = kzalloc(sizeof(*pd), gfp);
if (!pd)
return NULL;
pd->stats = alloc_percpu_gfp(typeof(*pd->stats),
GFP_KERNEL | __GFP_ZERO);
if (!pd->stats) {
kfree(pd);
return NULL;
}
return &pd->cpd;
}
static void dd_cpd_free(struct blkcg_policy_data *cpd)
{
struct dd_blkcg *dd_blkcg = container_of(cpd, typeof(*dd_blkcg), cpd);
free_percpu(dd_blkcg->stats);
kfree(dd_blkcg);
}
static struct dd_blkcg *dd_blkcg_from_pd(struct blkg_policy_data *pd)
{
return container_of(blkcg_to_cpd(pd->blkg->blkcg, &dd_blkcg_policy),
struct dd_blkcg, cpd);
}
/*
* Convert an association between a block cgroup and a request queue into a
* pointer to the mq-deadline information associated with a (blkcg, queue) pair.
*/
struct dd_blkcg *dd_blkcg_from_bio(struct bio *bio)
{
struct blkg_policy_data *pd;
pd = blkg_to_pd(bio->bi_blkg, &dd_blkcg_policy);
if (!pd)
return NULL;
return dd_blkcg_from_pd(pd);
}
static size_t dd_pd_stat(struct blkg_policy_data *pd, char *buf, size_t size)
{
static const char *const prio_class_name[] = {
[IOPRIO_CLASS_NONE] = "NONE",
[IOPRIO_CLASS_RT] = "RT",
[IOPRIO_CLASS_BE] = "BE",
[IOPRIO_CLASS_IDLE] = "IDLE",
};
struct dd_blkcg *blkcg = dd_blkcg_from_pd(pd);
int res = 0;
u8 prio;
for (prio = 0; prio < ARRAY_SIZE(blkcg->stats->stats); prio++)
res += scnprintf(buf + res, size - res,
" [%s] dispatched=%u inserted=%u merged=%u",
prio_class_name[prio],
ddcg_sum(blkcg, dispatched, prio) +
ddcg_sum(blkcg, merged, prio) -
ddcg_sum(blkcg, completed, prio),
ddcg_sum(blkcg, inserted, prio) -
ddcg_sum(blkcg, completed, prio),
ddcg_sum(blkcg, merged, prio));
return res;
}
static struct blkg_policy_data *dd_pd_alloc(gfp_t gfp, struct request_queue *q,
struct blkcg *blkcg)
{
struct dd_blkg *pd;
pd = kzalloc(sizeof(*pd), gfp);
if (!pd)
return NULL;
return &pd->pd;
}
static void dd_pd_free(struct blkg_policy_data *pd)
{
struct dd_blkg *dd_blkg = container_of(pd, typeof(*dd_blkg), pd);
kfree(dd_blkg);
}
static struct blkcg_policy dd_blkcg_policy = {
.cpd_alloc_fn = dd_cpd_alloc,
.cpd_free_fn = dd_cpd_free,
.pd_alloc_fn = dd_pd_alloc,
.pd_free_fn = dd_pd_free,
.pd_stat_fn = dd_pd_stat,
};
int dd_activate_policy(struct request_queue *q)
{
return blkcg_activate_policy(q, &dd_blkcg_policy);
}
void dd_deactivate_policy(struct request_queue *q)
{
blkcg_deactivate_policy(q, &dd_blkcg_policy);
}
int __init dd_blkcg_init(void)
{
return blkcg_policy_register(&dd_blkcg_policy);
}
void __exit dd_blkcg_exit(void)
{
blkcg_policy_unregister(&dd_blkcg_policy);
}

View File

@ -1,114 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#if !defined(_MQ_DEADLINE_CGROUP_H_)
#define _MQ_DEADLINE_CGROUP_H_
#include <linux/blk-cgroup.h>
struct request_queue;
/**
* struct io_stats_per_prio - I/O statistics per I/O priority class.
* @inserted: Number of inserted requests.
* @merged: Number of merged requests.
* @dispatched: Number of dispatched requests.
* @completed: Number of I/O completions.
*/
struct io_stats_per_prio {
local_t inserted;
local_t merged;
local_t dispatched;
local_t completed;
};
/* I/O statistics per I/O cgroup per I/O priority class (IOPRIO_CLASS_*). */
struct blkcg_io_stats {
struct io_stats_per_prio stats[4];
};
/**
* struct dd_blkcg - Per cgroup data.
* @cpd: blkcg_policy_data structure.
* @stats: I/O statistics.
*/
struct dd_blkcg {
struct blkcg_policy_data cpd; /* must be the first member */
struct blkcg_io_stats __percpu *stats;
};
/*
* Count one event of type 'event_type' and with I/O priority class
* 'prio_class'.
*/
#define ddcg_count(ddcg, event_type, prio_class) do { \
if (ddcg) { \
struct blkcg_io_stats *io_stats = get_cpu_ptr((ddcg)->stats); \
\
BUILD_BUG_ON(!__same_type((ddcg), struct dd_blkcg *)); \
BUILD_BUG_ON(!__same_type((prio_class), u8)); \
local_inc(&io_stats->stats[(prio_class)].event_type); \
put_cpu_ptr(io_stats); \
} \
} while (0)
/*
* Returns the total number of ddcg_count(ddcg, event_type, prio_class) calls
* across all CPUs. No locking or barriers since it is fine if the returned
* sum is slightly outdated.
*/
#define ddcg_sum(ddcg, event_type, prio) ({ \
unsigned int cpu; \
u32 sum = 0; \
\
BUILD_BUG_ON(!__same_type((ddcg), struct dd_blkcg *)); \
BUILD_BUG_ON(!__same_type((prio), u8)); \
for_each_present_cpu(cpu) \
sum += local_read(&per_cpu_ptr((ddcg)->stats, cpu)-> \
stats[(prio)].event_type); \
sum; \
})
#ifdef CONFIG_BLK_CGROUP
/**
* struct dd_blkg - Per (cgroup, request queue) data.
* @pd: blkg_policy_data structure.
*/
struct dd_blkg {
struct blkg_policy_data pd; /* must be the first member */
};
struct dd_blkcg *dd_blkcg_from_bio(struct bio *bio);
int dd_activate_policy(struct request_queue *q);
void dd_deactivate_policy(struct request_queue *q);
int __init dd_blkcg_init(void);
void __exit dd_blkcg_exit(void);
#else /* CONFIG_BLK_CGROUP */
static inline struct dd_blkcg *dd_blkcg_from_bio(struct bio *bio)
{
return NULL;
}
static inline int dd_activate_policy(struct request_queue *q)
{
return 0;
}
static inline void dd_deactivate_policy(struct request_queue *q)
{
}
static inline int dd_blkcg_init(void)
{
return 0;
}
static inline void dd_blkcg_exit(void)
{
}
#endif /* CONFIG_BLK_CGROUP */
#endif /* _MQ_DEADLINE_CGROUP_H_ */

View File

@ -25,7 +25,6 @@
#include "blk-mq-debugfs.h" #include "blk-mq-debugfs.h"
#include "blk-mq-tag.h" #include "blk-mq-tag.h"
#include "blk-mq-sched.h" #include "blk-mq-sched.h"
#include "mq-deadline-cgroup.h"
/* /*
* See Documentation/block/deadline-iosched.rst * See Documentation/block/deadline-iosched.rst
@ -57,6 +56,14 @@ enum dd_prio {
enum { DD_PRIO_COUNT = 3 }; enum { DD_PRIO_COUNT = 3 };
/* I/O statistics per I/O priority. */
struct io_stats_per_prio {
local_t inserted;
local_t merged;
local_t dispatched;
local_t completed;
};
/* I/O statistics for all I/O priorities (enum dd_prio). */ /* I/O statistics for all I/O priorities (enum dd_prio). */
struct io_stats { struct io_stats {
struct io_stats_per_prio stats[DD_PRIO_COUNT]; struct io_stats_per_prio stats[DD_PRIO_COUNT];
@ -79,9 +86,6 @@ struct deadline_data {
* run time data * run time data
*/ */
/* Request queue that owns this data structure. */
struct request_queue *queue;
struct dd_per_prio per_prio[DD_PRIO_COUNT]; struct dd_per_prio per_prio[DD_PRIO_COUNT];
/* Data direction of latest dispatched request. */ /* Data direction of latest dispatched request. */
@ -234,10 +238,8 @@ static void dd_merged_requests(struct request_queue *q, struct request *req,
struct deadline_data *dd = q->elevator->elevator_data; struct deadline_data *dd = q->elevator->elevator_data;
const u8 ioprio_class = dd_rq_ioclass(next); const u8 ioprio_class = dd_rq_ioclass(next);
const enum dd_prio prio = ioprio_class_to_prio[ioprio_class]; const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
struct dd_blkcg *blkcg = next->elv.priv[0];
dd_count(dd, merged, prio); dd_count(dd, merged, prio);
ddcg_count(blkcg, merged, ioprio_class);
/* /*
* if next expires before rq, assign its expire time to rq * if next expires before rq, assign its expire time to rq
@ -375,7 +377,6 @@ static struct request *__dd_dispatch_request(struct deadline_data *dd,
{ {
struct request *rq, *next_rq; struct request *rq, *next_rq;
enum dd_data_dir data_dir; enum dd_data_dir data_dir;
struct dd_blkcg *blkcg;
enum dd_prio prio; enum dd_prio prio;
u8 ioprio_class; u8 ioprio_class;
@ -474,8 +475,6 @@ done:
ioprio_class = dd_rq_ioclass(rq); ioprio_class = dd_rq_ioclass(rq);
prio = ioprio_class_to_prio[ioprio_class]; prio = ioprio_class_to_prio[ioprio_class];
dd_count(dd, dispatched, prio); dd_count(dd, dispatched, prio);
blkcg = rq->elv.priv[0];
ddcg_count(blkcg, dispatched, ioprio_class);
/* /*
* If the request needs its target zone locked, do it. * If the request needs its target zone locked, do it.
*/ */
@ -569,8 +568,6 @@ static void dd_exit_sched(struct elevator_queue *e)
struct deadline_data *dd = e->elevator_data; struct deadline_data *dd = e->elevator_data;
enum dd_prio prio; enum dd_prio prio;
dd_deactivate_policy(dd->queue);
for (prio = 0; prio <= DD_PRIO_MAX; prio++) { for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
struct dd_per_prio *per_prio = &dd->per_prio[prio]; struct dd_per_prio *per_prio = &dd->per_prio[prio];
@ -584,7 +581,7 @@ static void dd_exit_sched(struct elevator_queue *e)
} }
/* /*
* Initialize elevator private data (deadline_data) and associate with blkcg. * initialize elevator private data (deadline_data).
*/ */
static int dd_init_sched(struct request_queue *q, struct elevator_type *e) static int dd_init_sched(struct request_queue *q, struct elevator_type *e)
{ {
@ -593,12 +590,6 @@ static int dd_init_sched(struct request_queue *q, struct elevator_type *e)
enum dd_prio prio; enum dd_prio prio;
int ret = -ENOMEM; int ret = -ENOMEM;
/*
* Initialization would be very tricky if the queue is not frozen,
* hence the warning statement below.
*/
WARN_ON_ONCE(!percpu_ref_is_zero(&q->q_usage_counter));
eq = elevator_alloc(q, e); eq = elevator_alloc(q, e);
if (!eq) if (!eq)
return ret; return ret;
@ -614,8 +605,6 @@ static int dd_init_sched(struct request_queue *q, struct elevator_type *e)
if (!dd->stats) if (!dd->stats)
goto free_dd; goto free_dd;
dd->queue = q;
for (prio = 0; prio <= DD_PRIO_MAX; prio++) { for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
struct dd_per_prio *per_prio = &dd->per_prio[prio]; struct dd_per_prio *per_prio = &dd->per_prio[prio];
@ -635,17 +624,9 @@ static int dd_init_sched(struct request_queue *q, struct elevator_type *e)
spin_lock_init(&dd->lock); spin_lock_init(&dd->lock);
spin_lock_init(&dd->zone_lock); spin_lock_init(&dd->zone_lock);
ret = dd_activate_policy(q);
if (ret)
goto free_stats;
ret = 0;
q->elevator = eq; q->elevator = eq;
return 0; return 0;
free_stats:
free_percpu(dd->stats);
free_dd: free_dd:
kfree(dd); kfree(dd);
@ -718,7 +699,6 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
u8 ioprio_class = IOPRIO_PRIO_CLASS(ioprio); u8 ioprio_class = IOPRIO_PRIO_CLASS(ioprio);
struct dd_per_prio *per_prio; struct dd_per_prio *per_prio;
enum dd_prio prio; enum dd_prio prio;
struct dd_blkcg *blkcg;
LIST_HEAD(free); LIST_HEAD(free);
lockdep_assert_held(&dd->lock); lockdep_assert_held(&dd->lock);
@ -729,18 +709,8 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
*/ */
blk_req_zone_write_unlock(rq); blk_req_zone_write_unlock(rq);
/*
* If a block cgroup has been associated with the submitter and if an
* I/O priority has been set in the associated block cgroup, use the
* lowest of the cgroup priority and the request priority for the
* request. If no priority has been set in the request, use the cgroup
* priority.
*/
prio = ioprio_class_to_prio[ioprio_class]; prio = ioprio_class_to_prio[ioprio_class];
dd_count(dd, inserted, prio); dd_count(dd, inserted, prio);
blkcg = dd_blkcg_from_bio(rq->bio);
ddcg_count(blkcg, inserted, ioprio_class);
rq->elv.priv[0] = blkcg;
if (blk_mq_sched_try_insert_merge(q, rq, &free)) { if (blk_mq_sched_try_insert_merge(q, rq, &free)) {
blk_mq_free_requests(&free); blk_mq_free_requests(&free);
@ -789,10 +759,12 @@ static void dd_insert_requests(struct blk_mq_hw_ctx *hctx,
spin_unlock(&dd->lock); spin_unlock(&dd->lock);
} }
/* Callback from inside blk_mq_rq_ctx_init(). */ /*
* Nothing to do here. This is defined only to ensure that .finish_request
* method is called upon request completion.
*/
static void dd_prepare_request(struct request *rq) static void dd_prepare_request(struct request *rq)
{ {
rq->elv.priv[0] = NULL;
} }
/* /*
@ -815,13 +787,11 @@ static void dd_finish_request(struct request *rq)
{ {
struct request_queue *q = rq->q; struct request_queue *q = rq->q;
struct deadline_data *dd = q->elevator->elevator_data; struct deadline_data *dd = q->elevator->elevator_data;
struct dd_blkcg *blkcg = rq->elv.priv[0];
const u8 ioprio_class = dd_rq_ioclass(rq); const u8 ioprio_class = dd_rq_ioclass(rq);
const enum dd_prio prio = ioprio_class_to_prio[ioprio_class]; const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
struct dd_per_prio *per_prio = &dd->per_prio[prio]; struct dd_per_prio *per_prio = &dd->per_prio[prio];
dd_count(dd, completed, prio); dd_count(dd, completed, prio);
ddcg_count(blkcg, completed, ioprio_class);
if (blk_queue_is_zoned(q)) { if (blk_queue_is_zoned(q)) {
unsigned long flags; unsigned long flags;
@ -1144,26 +1114,11 @@ MODULE_ALIAS("mq-deadline-iosched");
static int __init deadline_init(void) static int __init deadline_init(void)
{ {
int ret; return elv_register(&mq_deadline);
ret = elv_register(&mq_deadline);
if (ret)
goto out;
ret = dd_blkcg_init();
if (ret)
goto unreg;
out:
return ret;
unreg:
elv_unregister(&mq_deadline);
goto out;
} }
static void __exit deadline_exit(void) static void __exit deadline_exit(void)
{ {
dd_blkcg_exit();
elv_unregister(&mq_deadline); elv_unregister(&mq_deadline);
} }

View File

@ -1768,7 +1768,7 @@ config CRYPTO_DRBG_HMAC
bool bool
default y default y
select CRYPTO_HMAC select CRYPTO_HMAC
select CRYPTO_SHA256 select CRYPTO_SHA512
config CRYPTO_DRBG_HASH config CRYPTO_DRBG_HASH
bool "Enable Hash DRBG" bool "Enable Hash DRBG"

View File

@ -3021,6 +3021,9 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev; struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
struct nd_mapping_desc *mapping; struct nd_mapping_desc *mapping;
/* range index 0 == unmapped in SPA or invalid-SPA */
if (memdev->range_index == 0 || spa->range_index == 0)
continue;
if (memdev->range_index != spa->range_index) if (memdev->range_index != spa->range_index)
continue; continue;
if (count >= ND_MAX_MAPPINGS) { if (count >= ND_MAX_MAPPINGS) {

View File

@ -2837,6 +2837,7 @@ void device_initialize(struct device *dev)
device_pm_init(dev); device_pm_init(dev);
set_dev_node(dev, -1); set_dev_node(dev, -1);
#ifdef CONFIG_GENERIC_MSI_IRQ #ifdef CONFIG_GENERIC_MSI_IRQ
raw_spin_lock_init(&dev->msi_lock);
INIT_LIST_HEAD(&dev->msi_list); INIT_LIST_HEAD(&dev->msi_list);
#endif #endif
INIT_LIST_HEAD(&dev->links.consumers); INIT_LIST_HEAD(&dev->links.consumers);

View File

@ -818,6 +818,10 @@ static bool nbd_clear_req(struct request *req, void *data, bool reserved)
{ {
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req); struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
/* don't abort one completed request */
if (blk_mq_request_completed(req))
return true;
mutex_lock(&cmd->lock); mutex_lock(&cmd->lock);
cmd->status = BLK_STS_IOERR; cmd->status = BLK_STS_IOERR;
mutex_unlock(&cmd->lock); mutex_unlock(&cmd->lock);
@ -2004,15 +2008,19 @@ static void nbd_disconnect_and_put(struct nbd_device *nbd)
{ {
mutex_lock(&nbd->config_lock); mutex_lock(&nbd->config_lock);
nbd_disconnect(nbd); nbd_disconnect(nbd);
nbd_clear_sock(nbd); sock_shutdown(nbd);
mutex_unlock(&nbd->config_lock);
/* /*
* Make sure recv thread has finished, so it does not drop the last * Make sure recv thread has finished, so it does not drop the last
* config ref and try to destroy the workqueue from inside the work * config ref and try to destroy the workqueue from inside the work
* queue. * queue. And this also ensure that we can safely call nbd_clear_que()
* to cancel the inflight I/Os.
*/ */
if (nbd->recv_workq) if (nbd->recv_workq)
flush_workqueue(nbd->recv_workq); flush_workqueue(nbd->recv_workq);
nbd_clear_que(nbd);
nbd->task_setup = NULL;
mutex_unlock(&nbd->config_lock);
if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF, if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
&nbd->config->runtime_flags)) &nbd->config->runtime_flags))
nbd_config_put(nbd); nbd_config_put(nbd);

View File

@ -692,6 +692,28 @@ static const struct blk_mq_ops virtio_mq_ops = {
static unsigned int virtblk_queue_depth; static unsigned int virtblk_queue_depth;
module_param_named(queue_depth, virtblk_queue_depth, uint, 0444); module_param_named(queue_depth, virtblk_queue_depth, uint, 0444);
static int virtblk_validate(struct virtio_device *vdev)
{
u32 blk_size;
if (!vdev->config->get) {
dev_err(&vdev->dev, "%s failure: config access disabled\n",
__func__);
return -EINVAL;
}
if (!virtio_has_feature(vdev, VIRTIO_BLK_F_BLK_SIZE))
return 0;
blk_size = virtio_cread32(vdev,
offsetof(struct virtio_blk_config, blk_size));
if (blk_size < SECTOR_SIZE || blk_size > PAGE_SIZE)
__virtio_clear_bit(vdev, VIRTIO_BLK_F_BLK_SIZE);
return 0;
}
static int virtblk_probe(struct virtio_device *vdev) static int virtblk_probe(struct virtio_device *vdev)
{ {
struct virtio_blk *vblk; struct virtio_blk *vblk;
@ -703,12 +725,6 @@ static int virtblk_probe(struct virtio_device *vdev)
u8 physical_block_exp, alignment_offset; u8 physical_block_exp, alignment_offset;
unsigned int queue_depth; unsigned int queue_depth;
if (!vdev->config->get) {
dev_err(&vdev->dev, "%s failure: config access disabled\n",
__func__);
return -EINVAL;
}
err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS), err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS),
GFP_KERNEL); GFP_KERNEL);
if (err < 0) if (err < 0)
@ -823,6 +839,14 @@ static int virtblk_probe(struct virtio_device *vdev)
else else
blk_size = queue_logical_block_size(q); blk_size = queue_logical_block_size(q);
if (unlikely(blk_size < SECTOR_SIZE || blk_size > PAGE_SIZE)) {
dev_err(&vdev->dev,
"block size is changed unexpectedly, now is %u\n",
blk_size);
err = -EINVAL;
goto err_cleanup_disk;
}
/* Use topology information if available */ /* Use topology information if available */
err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY, err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
struct virtio_blk_config, physical_block_exp, struct virtio_blk_config, physical_block_exp,
@ -881,6 +905,8 @@ static int virtblk_probe(struct virtio_device *vdev)
device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups); device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups);
return 0; return 0;
err_cleanup_disk:
blk_cleanup_disk(vblk->disk);
out_free_tags: out_free_tags:
blk_mq_free_tag_set(&vblk->tag_set); blk_mq_free_tag_set(&vblk->tag_set);
out_free_vq: out_free_vq:
@ -983,6 +1009,7 @@ static struct virtio_driver virtio_blk = {
.driver.name = KBUILD_MODNAME, .driver.name = KBUILD_MODNAME,
.driver.owner = THIS_MODULE, .driver.owner = THIS_MODULE,
.id_table = id_table, .id_table = id_table,
.validate = virtblk_validate,
.probe = virtblk_probe, .probe = virtblk_probe,
.remove = virtblk_remove, .remove = virtblk_remove,
.config_changed = virtblk_config_changed, .config_changed = virtblk_config_changed,

View File

@ -313,7 +313,7 @@ long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
return -ENXIO; return -ENXIO;
if (nr_pages < 0) if (nr_pages < 0)
return nr_pages; return -EINVAL;
avail = dax_dev->ops->direct_access(dax_dev, pgoff, nr_pages, avail = dax_dev->ops->direct_access(dax_dev, pgoff, nr_pages,
kaddr, pfn); kaddr, pfn);

View File

@ -35,15 +35,48 @@ efi_status_t check_platform_features(void)
} }
/* /*
* Although relocatable kernels can fix up the misalignment with respect to * Distro versions of GRUB may ignore the BSS allocation entirely (i.e., fail
* MIN_KIMG_ALIGN, the resulting virtual text addresses are subtly out of * to provide space, and fail to zero it). Check for this condition by double
* sync with those recorded in the vmlinux when kaslr is disabled but the * checking that the first and the last byte of the image are covered by the
* image required relocation anyway. Therefore retain 2M alignment unless * same EFI memory map entry.
* KASLR is in use.
*/ */
static u64 min_kimg_align(void) static bool check_image_region(u64 base, u64 size)
{ {
return efi_nokaslr ? MIN_KIMG_ALIGN : EFI_KIMG_ALIGN; unsigned long map_size, desc_size, buff_size;
efi_memory_desc_t *memory_map;
struct efi_boot_memmap map;
efi_status_t status;
bool ret = false;
int map_offset;
map.map = &memory_map;
map.map_size = &map_size;
map.desc_size = &desc_size;
map.desc_ver = NULL;
map.key_ptr = NULL;
map.buff_size = &buff_size;
status = efi_get_memory_map(&map);
if (status != EFI_SUCCESS)
return false;
for (map_offset = 0; map_offset < map_size; map_offset += desc_size) {
efi_memory_desc_t *md = (void *)memory_map + map_offset;
u64 end = md->phys_addr + md->num_pages * EFI_PAGE_SIZE;
/*
* Find the region that covers base, and return whether
* it covers base+size bytes.
*/
if (base >= md->phys_addr && base < end) {
ret = (base + size) <= end;
break;
}
}
efi_bs_call(free_pool, memory_map);
return ret;
} }
efi_status_t handle_kernel_image(unsigned long *image_addr, efi_status_t handle_kernel_image(unsigned long *image_addr,
@ -56,6 +89,16 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
unsigned long kernel_size, kernel_memsize = 0; unsigned long kernel_size, kernel_memsize = 0;
u32 phys_seed = 0; u32 phys_seed = 0;
/*
* Although relocatable kernels can fix up the misalignment with
* respect to MIN_KIMG_ALIGN, the resulting virtual text addresses are
* subtly out of sync with those recorded in the vmlinux when kaslr is
* disabled but the image required relocation anyway. Therefore retain
* 2M alignment if KASLR was explicitly disabled, even if it was not
* going to be activated to begin with.
*/
u64 min_kimg_align = efi_nokaslr ? MIN_KIMG_ALIGN : EFI_KIMG_ALIGN;
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
if (!efi_nokaslr) { if (!efi_nokaslr) {
status = efi_get_random_bytes(sizeof(phys_seed), status = efi_get_random_bytes(sizeof(phys_seed),
@ -76,6 +119,10 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
if (image->image_base != _text) if (image->image_base != _text)
efi_err("FIRMWARE BUG: efi_loaded_image_t::image_base has bogus value\n"); efi_err("FIRMWARE BUG: efi_loaded_image_t::image_base has bogus value\n");
if (!IS_ALIGNED((u64)_text, EFI_KIMG_ALIGN))
efi_err("FIRMWARE BUG: kernel image not aligned on %ldk boundary\n",
EFI_KIMG_ALIGN >> 10);
kernel_size = _edata - _text; kernel_size = _edata - _text;
kernel_memsize = kernel_size + (_end - _edata); kernel_memsize = kernel_size + (_end - _edata);
*reserve_size = kernel_memsize; *reserve_size = kernel_memsize;
@ -85,14 +132,18 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
* If KASLR is enabled, and we have some randomness available, * If KASLR is enabled, and we have some randomness available,
* locate the kernel at a randomized offset in physical memory. * locate the kernel at a randomized offset in physical memory.
*/ */
status = efi_random_alloc(*reserve_size, min_kimg_align(), status = efi_random_alloc(*reserve_size, min_kimg_align,
reserve_addr, phys_seed); reserve_addr, phys_seed);
if (status != EFI_SUCCESS)
efi_warn("efi_random_alloc() failed: 0x%lx\n", status);
} else { } else {
status = EFI_OUT_OF_RESOURCES; status = EFI_OUT_OF_RESOURCES;
} }
if (status != EFI_SUCCESS) { if (status != EFI_SUCCESS) {
if (IS_ALIGNED((u64)_text, min_kimg_align())) { if (!check_image_region((u64)_text, kernel_memsize)) {
efi_err("FIRMWARE BUG: Image BSS overlaps adjacent EFI memory region\n");
} else if (IS_ALIGNED((u64)_text, min_kimg_align)) {
/* /*
* Just execute from wherever we were loaded by the * Just execute from wherever we were loaded by the
* UEFI PE/COFF loader if the alignment is suitable. * UEFI PE/COFF loader if the alignment is suitable.
@ -103,7 +154,7 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
} }
status = efi_allocate_pages_aligned(*reserve_size, reserve_addr, status = efi_allocate_pages_aligned(*reserve_size, reserve_addr,
ULONG_MAX, min_kimg_align()); ULONG_MAX, min_kimg_align);
if (status != EFI_SUCCESS) { if (status != EFI_SUCCESS) {
efi_err("Failed to relocate kernel\n"); efi_err("Failed to relocate kernel\n");

View File

@ -30,6 +30,8 @@ static unsigned long get_entry_num_slots(efi_memory_desc_t *md,
region_end = min(md->phys_addr + md->num_pages * EFI_PAGE_SIZE - 1, region_end = min(md->phys_addr + md->num_pages * EFI_PAGE_SIZE - 1,
(u64)ULONG_MAX); (u64)ULONG_MAX);
if (region_end < size)
return 0;
first_slot = round_up(md->phys_addr, align); first_slot = round_up(md->phys_addr, align);
last_slot = round_down(region_end - size + 1, align); last_slot = round_down(region_end - size + 1, align);

View File

@ -1224,14 +1224,14 @@ static int bcm_iproc_i2c_unreg_slave(struct i2c_client *slave)
disable_irq(iproc_i2c->irq); disable_irq(iproc_i2c->irq);
tasklet_kill(&iproc_i2c->slave_rx_tasklet);
/* disable all slave interrupts */ /* disable all slave interrupts */
tmp = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET); tmp = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
tmp &= ~(IE_S_ALL_INTERRUPT_MASK << tmp &= ~(IE_S_ALL_INTERRUPT_MASK <<
IE_S_ALL_INTERRUPT_SHIFT); IE_S_ALL_INTERRUPT_SHIFT);
iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, tmp); iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, tmp);
tasklet_kill(&iproc_i2c->slave_rx_tasklet);
/* Erase the slave address programmed */ /* Erase the slave address programmed */
tmp = iproc_i2c_rd_reg(iproc_i2c, S_CFG_SMBUS_ADDR_OFFSET); tmp = iproc_i2c_rd_reg(iproc_i2c, S_CFG_SMBUS_ADDR_OFFSET);
tmp &= ~BIT(S_CFG_EN_NIC_SMB_ADDR3_SHIFT); tmp &= ~BIT(S_CFG_EN_NIC_SMB_ADDR3_SHIFT);

View File

@ -141,7 +141,7 @@ static ssize_t i2cdev_read(struct file *file, char __user *buf, size_t count,
if (count > 8192) if (count > 8192)
count = 8192; count = 8192;
tmp = kmalloc(count, GFP_KERNEL); tmp = kzalloc(count, GFP_KERNEL);
if (tmp == NULL) if (tmp == NULL)
return -ENOMEM; return -ENOMEM;
@ -150,7 +150,8 @@ static ssize_t i2cdev_read(struct file *file, char __user *buf, size_t count,
ret = i2c_master_recv(client, tmp, count); ret = i2c_master_recv(client, tmp, count);
if (ret >= 0) if (ret >= 0)
ret = copy_to_user(buf, tmp, count) ? -EFAULT : ret; if (copy_to_user(buf, tmp, ret))
ret = -EFAULT;
kfree(tmp); kfree(tmp);
return ret; return ret;
} }

View File

@ -231,6 +231,7 @@ config DMARD10
config FXLS8962AF config FXLS8962AF
tristate tristate
depends on I2C || !I2C # cannot be built-in for modular I2C
config FXLS8962AF_I2C config FXLS8962AF_I2C
tristate "NXP FXLS8962AF/FXLS8964AF Accelerometer I2C Driver" tristate "NXP FXLS8962AF/FXLS8964AF Accelerometer I2C Driver"
@ -247,6 +248,7 @@ config FXLS8962AF_I2C
config FXLS8962AF_SPI config FXLS8962AF_SPI
tristate "NXP FXLS8962AF/FXLS8964AF Accelerometer SPI Driver" tristate "NXP FXLS8962AF/FXLS8964AF Accelerometer SPI Driver"
depends on SPI depends on SPI
depends on I2C || !I2C
select FXLS8962AF select FXLS8962AF
select REGMAP_SPI select REGMAP_SPI
help help

View File

@ -637,7 +637,7 @@ static int fxls8962af_i2c_raw_read_errata3(struct fxls8962af_data *data,
return ret; return ret;
} }
return ret; return 0;
} }
static int fxls8962af_fifo_transfer(struct fxls8962af_data *data, static int fxls8962af_fifo_transfer(struct fxls8962af_data *data,

View File

@ -664,8 +664,8 @@ static int palmas_adc_wakeup_configure(struct palmas_gpadc *adc)
adc_period = adc->auto_conversion_period; adc_period = adc->auto_conversion_period;
for (i = 0; i < 16; ++i) { for (i = 0; i < 16; ++i) {
if (((1000 * (1 << i)) / 32) < adc_period) if (((1000 * (1 << i)) / 32) >= adc_period)
continue; break;
} }
if (i > 0) if (i > 0)
i--; i--;

View File

@ -568,7 +568,6 @@ static int ti_ads7950_probe(struct spi_device *spi)
st->ring_xfer.tx_buf = &st->tx_buf[0]; st->ring_xfer.tx_buf = &st->tx_buf[0];
st->ring_xfer.rx_buf = &st->rx_buf[0]; st->ring_xfer.rx_buf = &st->rx_buf[0];
/* len will be set later */ /* len will be set later */
st->ring_xfer.cs_change = true;
spi_message_add_tail(&st->ring_xfer, &st->ring_msg); spi_message_add_tail(&st->ring_xfer, &st->ring_msg);

View File

@ -25,6 +25,8 @@
#include <linux/iio/trigger_consumer.h> #include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h> #include <linux/iio/triggered_buffer.h>
#include <linux/time.h>
#define HDC100X_REG_TEMP 0x00 #define HDC100X_REG_TEMP 0x00
#define HDC100X_REG_HUMIDITY 0x01 #define HDC100X_REG_HUMIDITY 0x01
@ -166,7 +168,7 @@ static int hdc100x_get_measurement(struct hdc100x_data *data,
struct iio_chan_spec const *chan) struct iio_chan_spec const *chan)
{ {
struct i2c_client *client = data->client; struct i2c_client *client = data->client;
int delay = data->adc_int_us[chan->address]; int delay = data->adc_int_us[chan->address] + 1*USEC_PER_MSEC;
int ret; int ret;
__be16 val; __be16 val;
@ -316,7 +318,7 @@ static irqreturn_t hdc100x_trigger_handler(int irq, void *p)
struct iio_dev *indio_dev = pf->indio_dev; struct iio_dev *indio_dev = pf->indio_dev;
struct hdc100x_data *data = iio_priv(indio_dev); struct hdc100x_data *data = iio_priv(indio_dev);
struct i2c_client *client = data->client; struct i2c_client *client = data->client;
int delay = data->adc_int_us[0] + data->adc_int_us[1]; int delay = data->adc_int_us[0] + data->adc_int_us[1] + 2*USEC_PER_MSEC;
int ret; int ret;
/* dual read starts at temp register */ /* dual read starts at temp register */

View File

@ -411,12 +411,11 @@ int __adis_initial_startup(struct adis *adis)
int ret; int ret;
/* check if the device has rst pin low */ /* check if the device has rst pin low */
gpio = devm_gpiod_get_optional(&adis->spi->dev, "reset", GPIOD_ASIS); gpio = devm_gpiod_get_optional(&adis->spi->dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(gpio)) if (IS_ERR(gpio))
return PTR_ERR(gpio); return PTR_ERR(gpio);
if (gpio) { if (gpio) {
gpiod_set_value_cansleep(gpio, 1);
msleep(10); msleep(10);
/* bring device out of reset */ /* bring device out of reset */
gpiod_set_value_cansleep(gpio, 0); gpiod_set_value_cansleep(gpio, 0);

View File

@ -119,7 +119,7 @@ static int cfi_use_status_reg(struct cfi_private *cfi)
struct cfi_pri_amdstd *extp = cfi->cmdset_priv; struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
u8 poll_mask = CFI_POLL_STATUS_REG | CFI_POLL_DQ; u8 poll_mask = CFI_POLL_STATUS_REG | CFI_POLL_DQ;
return extp->MinorVersion >= '5' && return extp && extp->MinorVersion >= '5' &&
(extp->SoftwareFeatures & poll_mask) == CFI_POLL_STATUS_REG; (extp->SoftwareFeatures & poll_mask) == CFI_POLL_STATUS_REG;
} }

View File

@ -229,7 +229,7 @@ static int mchp48l640_write(struct mtd_info *mtd, loff_t to, size_t len,
woff += ws; woff += ws;
} }
return ret; return 0;
} }
static int mchp48l640_read_page(struct mtd_info *mtd, loff_t from, size_t len, static int mchp48l640_read_page(struct mtd_info *mtd, loff_t from, size_t len,
@ -255,6 +255,7 @@ static int mchp48l640_read_page(struct mtd_info *mtd, loff_t from, size_t len,
if (!ret) if (!ret)
*retlen += len; *retlen += len;
kfree(cmd);
return ret; return ret;
fail: fail:
@ -286,7 +287,7 @@ static int mchp48l640_read(struct mtd_info *mtd, loff_t from, size_t len,
woff += ws; woff += ws;
} }
return ret; return 0;
}; };
static const struct mchp48_caps mchp48l640_caps = { static const struct mchp48_caps mchp48l640_caps = {

View File

@ -419,6 +419,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
if (tr->discard) { if (tr->discard) {
blk_queue_flag_set(QUEUE_FLAG_DISCARD, new->rq); blk_queue_flag_set(QUEUE_FLAG_DISCARD, new->rq);
blk_queue_max_discard_sectors(new->rq, UINT_MAX); blk_queue_max_discard_sectors(new->rq, UINT_MAX);
new->rq->limits.discard_granularity = tr->blksize;
} }
gd->queue = new->rq; gd->queue = new->rq;
@ -525,14 +526,10 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
if (!blktrans_notifier.list.next) if (!blktrans_notifier.list.next)
register_mtd_user(&blktrans_notifier); register_mtd_user(&blktrans_notifier);
mutex_lock(&mtd_table_mutex);
ret = register_blkdev(tr->major, tr->name); ret = register_blkdev(tr->major, tr->name);
if (ret < 0) { if (ret < 0) {
printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n", printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n",
tr->name, tr->major, ret); tr->name, tr->major, ret);
mutex_unlock(&mtd_table_mutex);
return ret; return ret;
} }
@ -542,12 +539,12 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
tr->blkshift = ffs(tr->blksize) - 1; tr->blkshift = ffs(tr->blksize) - 1;
INIT_LIST_HEAD(&tr->devs); INIT_LIST_HEAD(&tr->devs);
list_add(&tr->list, &blktrans_majors);
mutex_lock(&mtd_table_mutex);
list_add(&tr->list, &blktrans_majors);
mtd_for_each_device(mtd) mtd_for_each_device(mtd)
if (mtd->type != MTD_ABSENT) if (mtd->type != MTD_ABSENT)
tr->add_mtd(tr, mtd); tr->add_mtd(tr, mtd);
mutex_unlock(&mtd_table_mutex); mutex_unlock(&mtd_table_mutex);
return 0; return 0;
} }
@ -564,8 +561,8 @@ int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr)
list_for_each_entry_safe(dev, next, &tr->devs, list) list_for_each_entry_safe(dev, next, &tr->devs, list)
tr->remove_dev(dev); tr->remove_dev(dev);
unregister_blkdev(tr->major, tr->name);
mutex_unlock(&mtd_table_mutex); mutex_unlock(&mtd_table_mutex);
unregister_blkdev(tr->major, tr->name);
BUG_ON(!list_empty(&tr->devs)); BUG_ON(!list_empty(&tr->devs));
return 0; return 0;

View File

@ -806,7 +806,9 @@ static ssize_t mtd_otp_size(struct mtd_info *mtd, bool is_user)
err: err:
kfree(info); kfree(info);
return ret;
/* ENODATA means there is no OTP region. */
return ret == -ENODATA ? 0 : ret;
} }
static struct nvmem_device *mtd_otp_nvmem_register(struct mtd_info *mtd, static struct nvmem_device *mtd_otp_nvmem_register(struct mtd_info *mtd,

View File

@ -5228,12 +5228,18 @@ static bool of_get_nand_on_flash_bbt(struct device_node *np)
static int of_get_nand_secure_regions(struct nand_chip *chip) static int of_get_nand_secure_regions(struct nand_chip *chip)
{ {
struct device_node *dn = nand_get_flash_node(chip); struct device_node *dn = nand_get_flash_node(chip);
struct property *prop;
int nr_elem, i, j; int nr_elem, i, j;
nr_elem = of_property_count_elems_of_size(dn, "secure-regions", sizeof(u64)); /* Only proceed if the "secure-regions" property is present in DT */
if (!nr_elem) prop = of_find_property(dn, "secure-regions", NULL);
if (!prop)
return 0; return 0;
nr_elem = of_property_count_elems_of_size(dn, "secure-regions", sizeof(u64));
if (nr_elem <= 0)
return nr_elem;
chip->nr_secure_regions = nr_elem / 2; chip->nr_secure_regions = nr_elem / 2;
chip->secure_regions = kcalloc(chip->nr_secure_regions, sizeof(*chip->secure_regions), chip->secure_regions = kcalloc(chip->nr_secure_regions, sizeof(*chip->secure_regions),
GFP_KERNEL); GFP_KERNEL);

View File

@ -284,8 +284,7 @@ static int sja1105_mdiobus_base_tx_register(struct sja1105_private *priv,
struct mii_bus *bus; struct mii_bus *bus;
int rc = 0; int rc = 0;
np = of_find_compatible_node(mdio_node, NULL, np = of_get_compatible_child(mdio_node, "nxp,sja1110-base-tx-mdio");
"nxp,sja1110-base-tx-mdio");
if (!np) if (!np)
return 0; return 0;
@ -339,8 +338,7 @@ static int sja1105_mdiobus_base_t1_register(struct sja1105_private *priv,
struct mii_bus *bus; struct mii_bus *bus;
int rc = 0; int rc = 0;
np = of_find_compatible_node(mdio_node, NULL, np = of_get_compatible_child(mdio_node, "nxp,sja1110-base-t1-mdio");
"nxp,sja1110-base-t1-mdio");
if (!np) if (!np)
return 0; return 0;

View File

@ -72,7 +72,8 @@
#include "bnxt_debugfs.h" #include "bnxt_debugfs.h"
#define BNXT_TX_TIMEOUT (5 * HZ) #define BNXT_TX_TIMEOUT (5 * HZ)
#define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW) #define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW | \
NETIF_MSG_TX_ERR)
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Broadcom BCM573xx network driver"); MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
@ -367,6 +368,33 @@ static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
return md_dst->u.port_info.port_id; return md_dst->u.port_info.port_id;
} }
static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
u16 prod)
{
bnxt_db_write(bp, &txr->tx_db, prod);
txr->kick_pending = 0;
}
static bool bnxt_txr_netif_try_stop_queue(struct bnxt *bp,
struct bnxt_tx_ring_info *txr,
struct netdev_queue *txq)
{
netif_tx_stop_queue(txq);
/* netif_tx_stop_queue() must be done before checking
* tx index in bnxt_tx_avail() below, because in
* bnxt_tx_int(), we update tx index before checking for
* netif_tx_queue_stopped().
*/
smp_mb();
if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh) {
netif_tx_wake_queue(txq);
return false;
}
return true;
}
static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
{ {
struct bnxt *bp = netdev_priv(dev); struct bnxt *bp = netdev_priv(dev);
@ -386,6 +414,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
i = skb_get_queue_mapping(skb); i = skb_get_queue_mapping(skb);
if (unlikely(i >= bp->tx_nr_rings)) { if (unlikely(i >= bp->tx_nr_rings)) {
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
atomic_long_inc(&dev->tx_dropped);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
@ -395,8 +424,12 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
free_size = bnxt_tx_avail(bp, txr); free_size = bnxt_tx_avail(bp, txr);
if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) { if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
netif_tx_stop_queue(txq); /* We must have raced with NAPI cleanup */
return NETDEV_TX_BUSY; if (net_ratelimit() && txr->kick_pending)
netif_warn(bp, tx_err, dev,
"bnxt: ring busy w/ flush pending!\n");
if (bnxt_txr_netif_try_stop_queue(bp, txr, txq))
return NETDEV_TX_BUSY;
} }
length = skb->len; length = skb->len;
@ -519,21 +552,16 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
normal_tx: normal_tx:
if (length < BNXT_MIN_PKT_SIZE) { if (length < BNXT_MIN_PKT_SIZE) {
pad = BNXT_MIN_PKT_SIZE - length; pad = BNXT_MIN_PKT_SIZE - length;
if (skb_pad(skb, pad)) { if (skb_pad(skb, pad))
/* SKB already freed. */ /* SKB already freed. */
tx_buf->skb = NULL; goto tx_kick_pending;
return NETDEV_TX_OK;
}
length = BNXT_MIN_PKT_SIZE; length = BNXT_MIN_PKT_SIZE;
} }
mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE); mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(&pdev->dev, mapping))) { if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
dev_kfree_skb_any(skb); goto tx_free;
tx_buf->skb = NULL;
return NETDEV_TX_OK;
}
dma_unmap_addr_set(tx_buf, mapping, mapping); dma_unmap_addr_set(tx_buf, mapping, mapping);
flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD | flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
@ -620,24 +648,17 @@ normal_tx:
txr->tx_prod = prod; txr->tx_prod = prod;
if (!netdev_xmit_more() || netif_xmit_stopped(txq)) if (!netdev_xmit_more() || netif_xmit_stopped(txq))
bnxt_db_write(bp, &txr->tx_db, prod); bnxt_txr_db_kick(bp, txr, prod);
else
txr->kick_pending = 1;
tx_done: tx_done:
if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) { if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
if (netdev_xmit_more() && !tx_buf->is_push) if (netdev_xmit_more() && !tx_buf->is_push)
bnxt_db_write(bp, &txr->tx_db, prod); bnxt_txr_db_kick(bp, txr, prod);
netif_tx_stop_queue(txq); bnxt_txr_netif_try_stop_queue(bp, txr, txq);
/* netif_tx_stop_queue() must be done before checking
* tx index in bnxt_tx_avail() below, because in
* bnxt_tx_int(), we update tx index before checking for
* netif_tx_queue_stopped().
*/
smp_mb();
if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
netif_tx_wake_queue(txq);
} }
return NETDEV_TX_OK; return NETDEV_TX_OK;
@ -650,7 +671,6 @@ tx_dma_error:
/* start back at beginning and unmap skb */ /* start back at beginning and unmap skb */
prod = txr->tx_prod; prod = txr->tx_prod;
tx_buf = &txr->tx_buf_ring[prod]; tx_buf = &txr->tx_buf_ring[prod];
tx_buf->skb = NULL;
dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
skb_headlen(skb), PCI_DMA_TODEVICE); skb_headlen(skb), PCI_DMA_TODEVICE);
prod = NEXT_TX(prod); prod = NEXT_TX(prod);
@ -664,7 +684,13 @@ tx_dma_error:
PCI_DMA_TODEVICE); PCI_DMA_TODEVICE);
} }
tx_free:
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
tx_kick_pending:
if (txr->kick_pending)
bnxt_txr_db_kick(bp, txr, txr->tx_prod);
txr->tx_buf_ring[txr->tx_prod].skb = NULL;
atomic_long_inc(&dev->tx_dropped);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
@ -734,14 +760,9 @@ next_tx_int:
smp_mb(); smp_mb();
if (unlikely(netif_tx_queue_stopped(txq)) && if (unlikely(netif_tx_queue_stopped(txq)) &&
(bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) { bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
__netif_tx_lock(txq, smp_processor_id()); READ_ONCE(txr->dev_state) != BNXT_DEV_STATE_CLOSING)
if (netif_tx_queue_stopped(txq) && netif_tx_wake_queue(txq);
bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
txr->dev_state != BNXT_DEV_STATE_CLOSING)
netif_tx_wake_queue(txq);
__netif_tx_unlock(txq);
}
} }
static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping, static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
@ -1769,6 +1790,10 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
return -EBUSY; return -EBUSY;
/* The valid test of the entry must be done first before
* reading any further.
*/
dma_rmb();
prod = rxr->rx_prod; prod = rxr->rx_prod;
if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) { if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
@ -1991,6 +2016,10 @@ static int bnxt_force_rx_discard(struct bnxt *bp,
if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
return -EBUSY; return -EBUSY;
/* The valid test of the entry must be done first before
* reading any further.
*/
dma_rmb();
cmp_type = RX_CMP_TYPE(rxcmp); cmp_type = RX_CMP_TYPE(rxcmp);
if (cmp_type == CMP_TYPE_RX_L2_CMP) { if (cmp_type == CMP_TYPE_RX_L2_CMP) {
rxcmp1->rx_cmp_cfa_code_errors_v2 |= rxcmp1->rx_cmp_cfa_code_errors_v2 |=
@ -2477,6 +2506,10 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
if (!TX_CMP_VALID(txcmp, raw_cons)) if (!TX_CMP_VALID(txcmp, raw_cons))
break; break;
/* The valid test of the entry must be done first before
* reading any further.
*/
dma_rmb();
if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) { if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
tmp_raw_cons = NEXT_RAW_CMP(raw_cons); tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
cp_cons = RING_CMP(tmp_raw_cons); cp_cons = RING_CMP(tmp_raw_cons);
@ -9230,10 +9263,9 @@ static void bnxt_disable_napi(struct bnxt *bp)
for (i = 0; i < bp->cp_nr_rings; i++) { for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring; struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
napi_disable(&bp->bnapi[i]->napi);
if (bp->bnapi[i]->rx_ring) if (bp->bnapi[i]->rx_ring)
cancel_work_sync(&cpr->dim.work); cancel_work_sync(&cpr->dim.work);
napi_disable(&bp->bnapi[i]->napi);
} }
} }
@ -9267,9 +9299,11 @@ void bnxt_tx_disable(struct bnxt *bp)
if (bp->tx_ring) { if (bp->tx_ring) {
for (i = 0; i < bp->tx_nr_rings; i++) { for (i = 0; i < bp->tx_nr_rings; i++) {
txr = &bp->tx_ring[i]; txr = &bp->tx_ring[i];
txr->dev_state = BNXT_DEV_STATE_CLOSING; WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
} }
} }
/* Make sure napi polls see @dev_state change */
synchronize_net();
/* Drop carrier first to prevent TX timeout */ /* Drop carrier first to prevent TX timeout */
netif_carrier_off(bp->dev); netif_carrier_off(bp->dev);
/* Stop all TX queues */ /* Stop all TX queues */
@ -9283,8 +9317,10 @@ void bnxt_tx_enable(struct bnxt *bp)
for (i = 0; i < bp->tx_nr_rings; i++) { for (i = 0; i < bp->tx_nr_rings; i++) {
txr = &bp->tx_ring[i]; txr = &bp->tx_ring[i];
txr->dev_state = 0; WRITE_ONCE(txr->dev_state, 0);
} }
/* Make sure napi polls see @dev_state change */
synchronize_net();
netif_tx_wake_all_queues(bp->dev); netif_tx_wake_all_queues(bp->dev);
if (bp->link_info.link_up) if (bp->link_info.link_up)
netif_carrier_on(bp->dev); netif_carrier_on(bp->dev);
@ -10863,6 +10899,9 @@ static bool bnxt_rfs_supported(struct bnxt *bp)
return true; return true;
return false; return false;
} }
/* 212 firmware is broken for aRFS */
if (BNXT_FW_MAJ(bp) == 212)
return false;
if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp)) if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
return true; return true;
if (bp->flags & BNXT_FLAG_NEW_RSS_CAP) if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)

View File

@ -799,6 +799,7 @@ struct bnxt_tx_ring_info {
u16 tx_prod; u16 tx_prod;
u16 tx_cons; u16 tx_cons;
u16 txq_index; u16 txq_index;
u8 kick_pending;
struct bnxt_db_info tx_db; struct bnxt_db_info tx_db;
struct tx_bd *tx_desc_ring[MAX_TX_PAGES]; struct tx_bd *tx_desc_ring[MAX_TX_PAGES];

View File

@ -3160,17 +3160,6 @@ static int dpaa2_switch_port_init(struct ethsw_port_priv *port_priv, u16 port)
return err; return err;
} }
static void dpaa2_switch_takedown(struct fsl_mc_device *sw_dev)
{
struct device *dev = &sw_dev->dev;
struct ethsw_core *ethsw = dev_get_drvdata(dev);
int err;
err = dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle);
if (err)
dev_warn(dev, "dpsw_close err %d\n", err);
}
static void dpaa2_switch_ctrl_if_teardown(struct ethsw_core *ethsw) static void dpaa2_switch_ctrl_if_teardown(struct ethsw_core *ethsw)
{ {
dpsw_ctrl_if_disable(ethsw->mc_io, 0, ethsw->dpsw_handle); dpsw_ctrl_if_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
@ -3180,6 +3169,21 @@ static void dpaa2_switch_ctrl_if_teardown(struct ethsw_core *ethsw)
dpaa2_switch_free_dpbp(ethsw); dpaa2_switch_free_dpbp(ethsw);
} }
static void dpaa2_switch_teardown(struct fsl_mc_device *sw_dev)
{
struct device *dev = &sw_dev->dev;
struct ethsw_core *ethsw = dev_get_drvdata(dev);
int err;
dpaa2_switch_ctrl_if_teardown(ethsw);
destroy_workqueue(ethsw->workqueue);
err = dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle);
if (err)
dev_warn(dev, "dpsw_close err %d\n", err);
}
static int dpaa2_switch_remove(struct fsl_mc_device *sw_dev) static int dpaa2_switch_remove(struct fsl_mc_device *sw_dev)
{ {
struct ethsw_port_priv *port_priv; struct ethsw_port_priv *port_priv;
@ -3190,8 +3194,6 @@ static int dpaa2_switch_remove(struct fsl_mc_device *sw_dev)
dev = &sw_dev->dev; dev = &sw_dev->dev;
ethsw = dev_get_drvdata(dev); ethsw = dev_get_drvdata(dev);
dpaa2_switch_ctrl_if_teardown(ethsw);
dpaa2_switch_teardown_irqs(sw_dev); dpaa2_switch_teardown_irqs(sw_dev);
dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle); dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
@ -3207,9 +3209,7 @@ static int dpaa2_switch_remove(struct fsl_mc_device *sw_dev)
kfree(ethsw->filter_blocks); kfree(ethsw->filter_blocks);
kfree(ethsw->ports); kfree(ethsw->ports);
dpaa2_switch_takedown(sw_dev); dpaa2_switch_teardown(sw_dev);
destroy_workqueue(ethsw->workqueue);
fsl_mc_portal_free(ethsw->mc_io); fsl_mc_portal_free(ethsw->mc_io);
@ -3326,7 +3326,7 @@ static int dpaa2_switch_probe(struct fsl_mc_device *sw_dev)
GFP_KERNEL); GFP_KERNEL);
if (!(ethsw->ports)) { if (!(ethsw->ports)) {
err = -ENOMEM; err = -ENOMEM;
goto err_takedown; goto err_teardown;
} }
ethsw->fdbs = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->fdbs), ethsw->fdbs = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->fdbs),
@ -3397,8 +3397,8 @@ err_free_fdbs:
err_free_ports: err_free_ports:
kfree(ethsw->ports); kfree(ethsw->ports);
err_takedown: err_teardown:
dpaa2_switch_takedown(sw_dev); dpaa2_switch_teardown(sw_dev);
err_free_cmdport: err_free_cmdport:
fsl_mc_portal_free(ethsw->mc_io); fsl_mc_portal_free(ethsw->mc_io);

View File

@ -3663,8 +3663,7 @@ u16 i40e_lan_select_queue(struct net_device *netdev,
/* is DCB enabled at all? */ /* is DCB enabled at all? */
if (vsi->tc_config.numtc == 1) if (vsi->tc_config.numtc == 1)
return i40e_swdcb_skb_tx_hash(netdev, skb, return netdev_pick_tx(netdev, skb, sb_dev);
netdev->real_num_tx_queues);
prio = skb->priority; prio = skb->priority;
hw = &vsi->back->hw; hw = &vsi->back->hw;

View File

@ -136,6 +136,7 @@ struct iavf_q_vector {
struct iavf_mac_filter { struct iavf_mac_filter {
struct list_head list; struct list_head list;
u8 macaddr[ETH_ALEN]; u8 macaddr[ETH_ALEN];
bool is_new_mac; /* filter is new, wait for PF decision */
bool remove; /* filter needs to be removed */ bool remove; /* filter needs to be removed */
bool add; /* filter needs to be added */ bool add; /* filter needs to be added */
}; };

View File

@ -772,6 +772,7 @@ struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
list_add_tail(&f->list, &adapter->mac_filter_list); list_add_tail(&f->list, &adapter->mac_filter_list);
f->add = true; f->add = true;
f->is_new_mac = true;
adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER; adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
} else { } else {
f->remove = false; f->remove = false;

View File

@ -540,6 +540,47 @@ void iavf_del_ether_addrs(struct iavf_adapter *adapter)
kfree(veal); kfree(veal);
} }
/**
* iavf_mac_add_ok
* @adapter: adapter structure
*
* Submit list of filters based on PF response.
**/
static void iavf_mac_add_ok(struct iavf_adapter *adapter)
{
struct iavf_mac_filter *f, *ftmp;
spin_lock_bh(&adapter->mac_vlan_list_lock);
list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
f->is_new_mac = false;
}
spin_unlock_bh(&adapter->mac_vlan_list_lock);
}
/**
* iavf_mac_add_reject
* @adapter: adapter structure
*
* Remove filters from list based on PF response.
**/
static void iavf_mac_add_reject(struct iavf_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct iavf_mac_filter *f, *ftmp;
spin_lock_bh(&adapter->mac_vlan_list_lock);
list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
if (f->remove && ether_addr_equal(f->macaddr, netdev->dev_addr))
f->remove = false;
if (f->is_new_mac) {
list_del(&f->list);
kfree(f);
}
}
spin_unlock_bh(&adapter->mac_vlan_list_lock);
}
/** /**
* iavf_add_vlans * iavf_add_vlans
* @adapter: adapter structure * @adapter: adapter structure
@ -1492,6 +1533,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
case VIRTCHNL_OP_ADD_ETH_ADDR: case VIRTCHNL_OP_ADD_ETH_ADDR:
dev_err(&adapter->pdev->dev, "Failed to add MAC filter, error %s\n", dev_err(&adapter->pdev->dev, "Failed to add MAC filter, error %s\n",
iavf_stat_str(&adapter->hw, v_retval)); iavf_stat_str(&adapter->hw, v_retval));
iavf_mac_add_reject(adapter);
/* restore administratively set MAC address */ /* restore administratively set MAC address */
ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
break; break;
@ -1639,10 +1681,11 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
} }
} }
switch (v_opcode) { switch (v_opcode) {
case VIRTCHNL_OP_ADD_ETH_ADDR: { case VIRTCHNL_OP_ADD_ETH_ADDR:
if (!v_retval)
iavf_mac_add_ok(adapter);
if (!ether_addr_equal(netdev->dev_addr, adapter->hw.mac.addr)) if (!ether_addr_equal(netdev->dev_addr, adapter->hw.mac.addr))
ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr); ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
}
break; break;
case VIRTCHNL_OP_GET_STATS: { case VIRTCHNL_OP_GET_STATS: {
struct iavf_eth_stats *stats = struct iavf_eth_stats *stats =

View File

@ -656,7 +656,7 @@ static int ice_ptp_cfg_clkout(struct ice_pf *pf, unsigned int chan,
* maintaining phase * maintaining phase
*/ */
if (start_time < current_time) if (start_time < current_time)
start_time = div64_u64(current_time + NSEC_PER_MSEC - 1, start_time = div64_u64(current_time + NSEC_PER_SEC - 1,
NSEC_PER_SEC) * NSEC_PER_SEC + phase; NSEC_PER_SEC) * NSEC_PER_SEC + phase;
start_time -= E810_OUT_PROP_DELAY_NS; start_time -= E810_OUT_PROP_DELAY_NS;

View File

@ -52,8 +52,11 @@ static int ixgbe_xsk_pool_enable(struct ixgbe_adapter *adapter,
/* Kick start the NAPI context so that receiving will start */ /* Kick start the NAPI context so that receiving will start */
err = ixgbe_xsk_wakeup(adapter->netdev, qid, XDP_WAKEUP_RX); err = ixgbe_xsk_wakeup(adapter->netdev, qid, XDP_WAKEUP_RX);
if (err) if (err) {
clear_bit(qid, adapter->af_xdp_zc_qps);
xsk_pool_dma_unmap(pool, IXGBE_RX_DMA_ATTR);
return err; return err;
}
} }
return 0; return 0;

View File

@ -1358,6 +1358,7 @@ void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot)
struct net_device *bond = ocelot_port->bond; struct net_device *bond = ocelot_port->bond;
mask = ocelot_get_bridge_fwd_mask(ocelot, bridge); mask = ocelot_get_bridge_fwd_mask(ocelot, bridge);
mask |= cpu_fwd_mask;
mask &= ~BIT(port); mask &= ~BIT(port);
if (bond) { if (bond) {
mask &= ~ocelot_get_bond_mask(ocelot, bond, mask &= ~ocelot_get_bond_mask(ocelot, bond,

View File

@ -327,6 +327,9 @@ static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
unsigned long flags; unsigned long flags;
int rc = -EINVAL; int rc = -EINVAL;
if (!p_ll2_conn)
return rc;
spin_lock_irqsave(&p_tx->lock, flags); spin_lock_irqsave(&p_tx->lock, flags);
if (p_tx->b_completing_packet) { if (p_tx->b_completing_packet) {
rc = -EBUSY; rc = -EBUSY;
@ -500,7 +503,16 @@ static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
unsigned long flags = 0; unsigned long flags = 0;
int rc = 0; int rc = 0;
if (!p_ll2_conn)
return rc;
spin_lock_irqsave(&p_rx->lock, flags); spin_lock_irqsave(&p_rx->lock, flags);
if (!QED_LL2_RX_REGISTERED(p_ll2_conn)) {
spin_unlock_irqrestore(&p_rx->lock, flags);
return 0;
}
cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons); cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain); cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
@ -821,6 +833,9 @@ static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie; struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
int rc; int rc;
if (!p_ll2_conn)
return 0;
if (!QED_LL2_RX_REGISTERED(p_ll2_conn)) if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
return 0; return 0;
@ -844,6 +859,9 @@ static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
u16 new_idx = 0, num_bds = 0; u16 new_idx = 0, num_bds = 0;
int rc; int rc;
if (!p_ll2_conn)
return 0;
if (!QED_LL2_TX_REGISTERED(p_ll2_conn)) if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
return 0; return 0;
@ -1728,6 +1746,8 @@ int qed_ll2_post_rx_buffer(void *cxt,
if (!p_ll2_conn) if (!p_ll2_conn)
return -EINVAL; return -EINVAL;
p_rx = &p_ll2_conn->rx_queue; p_rx = &p_ll2_conn->rx_queue;
if (!p_rx->set_prod_addr)
return -EIO;
spin_lock_irqsave(&p_rx->lock, flags); spin_lock_irqsave(&p_rx->lock, flags);
if (!list_empty(&p_rx->free_descq)) if (!list_empty(&p_rx->free_descq))

View File

@ -1285,8 +1285,7 @@ qed_rdma_create_qp(void *rdma_cxt,
if (!rdma_cxt || !in_params || !out_params || if (!rdma_cxt || !in_params || !out_params ||
!p_hwfn->p_rdma_info->active) { !p_hwfn->p_rdma_info->active) {
DP_ERR(p_hwfn->cdev, pr_err("qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n",
"qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n",
rdma_cxt, in_params, out_params); rdma_cxt, in_params, out_params);
return NULL; return NULL;
} }

View File

@ -3156,8 +3156,10 @@ int qlcnic_83xx_flash_read32(struct qlcnic_adapter *adapter, u32 flash_addr,
indirect_addr = QLC_83XX_FLASH_DIRECT_DATA(addr); indirect_addr = QLC_83XX_FLASH_DIRECT_DATA(addr);
ret = QLCRD32(adapter, indirect_addr, &err); ret = QLCRD32(adapter, indirect_addr, &err);
if (err == -EIO) if (err == -EIO) {
qlcnic_83xx_unlock_flash(adapter);
return err; return err;
}
word = ret; word = ret;
*(u32 *)p_data = word; *(u32 *)p_data = word;

View File

@ -827,6 +827,12 @@ static void decode_data(struct sixpack *sp, unsigned char inbyte)
return; return;
} }
if (sp->rx_count_cooked + 2 >= sizeof(sp->cooked_buf)) {
pr_err("6pack: cooked buffer overrun, data loss\n");
sp->rx_count = 0;
return;
}
buf = sp->raw_buf; buf = sp->raw_buf;
sp->cooked_buf[sp->rx_count_cooked++] = sp->cooked_buf[sp->rx_count_cooked++] =
buf[0] | ((buf[1] << 2) & 0xc0); buf[0] | ((buf[1] << 2) & 0xc0);

View File

@ -82,6 +82,17 @@ out:
static int parent_count; static int parent_count;
static void mdio_mux_uninit_children(struct mdio_mux_parent_bus *pb)
{
struct mdio_mux_child_bus *cb = pb->children;
while (cb) {
mdiobus_unregister(cb->mii_bus);
mdiobus_free(cb->mii_bus);
cb = cb->next;
}
}
int mdio_mux_init(struct device *dev, int mdio_mux_init(struct device *dev,
struct device_node *mux_node, struct device_node *mux_node,
int (*switch_fn)(int cur, int desired, void *data), int (*switch_fn)(int cur, int desired, void *data),
@ -144,7 +155,7 @@ int mdio_mux_init(struct device *dev,
cb = devm_kzalloc(dev, sizeof(*cb), GFP_KERNEL); cb = devm_kzalloc(dev, sizeof(*cb), GFP_KERNEL);
if (!cb) { if (!cb) {
ret_val = -ENOMEM; ret_val = -ENOMEM;
continue; goto err_loop;
} }
cb->bus_number = v; cb->bus_number = v;
cb->parent = pb; cb->parent = pb;
@ -152,8 +163,7 @@ int mdio_mux_init(struct device *dev,
cb->mii_bus = mdiobus_alloc(); cb->mii_bus = mdiobus_alloc();
if (!cb->mii_bus) { if (!cb->mii_bus) {
ret_val = -ENOMEM; ret_val = -ENOMEM;
devm_kfree(dev, cb); goto err_loop;
continue;
} }
cb->mii_bus->priv = cb; cb->mii_bus->priv = cb;
@ -165,11 +175,15 @@ int mdio_mux_init(struct device *dev,
cb->mii_bus->write = mdio_mux_write; cb->mii_bus->write = mdio_mux_write;
r = of_mdiobus_register(cb->mii_bus, child_bus_node); r = of_mdiobus_register(cb->mii_bus, child_bus_node);
if (r) { if (r) {
mdiobus_free(cb->mii_bus);
if (r == -EPROBE_DEFER) {
ret_val = r;
goto err_loop;
}
devm_kfree(dev, cb);
dev_err(dev, dev_err(dev,
"Error: Failed to register MDIO bus for child %pOF\n", "Error: Failed to register MDIO bus for child %pOF\n",
child_bus_node); child_bus_node);
mdiobus_free(cb->mii_bus);
devm_kfree(dev, cb);
} else { } else {
cb->next = pb->children; cb->next = pb->children;
pb->children = cb; pb->children = cb;
@ -181,7 +195,10 @@ int mdio_mux_init(struct device *dev,
} }
dev_err(dev, "Error: No acceptable child buses found\n"); dev_err(dev, "Error: No acceptable child buses found\n");
devm_kfree(dev, pb);
err_loop:
mdio_mux_uninit_children(pb);
of_node_put(child_bus_node);
err_pb_kz: err_pb_kz:
put_device(&parent_bus->dev); put_device(&parent_bus->dev);
err_parent_bus: err_parent_bus:
@ -193,14 +210,8 @@ EXPORT_SYMBOL_GPL(mdio_mux_init);
void mdio_mux_uninit(void *mux_handle) void mdio_mux_uninit(void *mux_handle)
{ {
struct mdio_mux_parent_bus *pb = mux_handle; struct mdio_mux_parent_bus *pb = mux_handle;
struct mdio_mux_child_bus *cb = pb->children;
while (cb) {
mdiobus_unregister(cb->mii_bus);
mdiobus_free(cb->mii_bus);
cb = cb->next;
}
mdio_mux_uninit_children(pb);
put_device(&pb->mii_bus->dev); put_device(&pb->mii_bus->dev);
} }
EXPORT_SYMBOL_GPL(mdio_mux_uninit); EXPORT_SYMBOL_GPL(mdio_mux_uninit);

View File

@ -63,6 +63,29 @@ void asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index,
value, index, data, size); value, index, data, size);
} }
static int asix_check_host_enable(struct usbnet *dev, int in_pm)
{
int i, ret;
u8 smsr;
for (i = 0; i < 30; ++i) {
ret = asix_set_sw_mii(dev, in_pm);
if (ret == -ENODEV || ret == -ETIMEDOUT)
break;
usleep_range(1000, 1100);
ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG,
0, 0, 1, &smsr, in_pm);
if (ret == -ENODEV)
break;
else if (ret < 0)
continue;
else if (smsr & AX_HOST_EN)
break;
}
return ret;
}
static void reset_asix_rx_fixup_info(struct asix_rx_fixup_info *rx) static void reset_asix_rx_fixup_info(struct asix_rx_fixup_info *rx)
{ {
/* Reset the variables that have a lifetime outside of /* Reset the variables that have a lifetime outside of
@ -467,19 +490,11 @@ int asix_mdio_read(struct net_device *netdev, int phy_id, int loc)
{ {
struct usbnet *dev = netdev_priv(netdev); struct usbnet *dev = netdev_priv(netdev);
__le16 res; __le16 res;
u8 smsr;
int i = 0;
int ret; int ret;
mutex_lock(&dev->phy_mutex); mutex_lock(&dev->phy_mutex);
do {
ret = asix_set_sw_mii(dev, 0); ret = asix_check_host_enable(dev, 0);
if (ret == -ENODEV || ret == -ETIMEDOUT)
break;
usleep_range(1000, 1100);
ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG,
0, 0, 1, &smsr, 0);
} while (!(smsr & AX_HOST_EN) && (i++ < 30) && (ret != -ENODEV));
if (ret == -ENODEV || ret == -ETIMEDOUT) { if (ret == -ENODEV || ret == -ETIMEDOUT) {
mutex_unlock(&dev->phy_mutex); mutex_unlock(&dev->phy_mutex);
return ret; return ret;
@ -505,23 +520,14 @@ static int __asix_mdio_write(struct net_device *netdev, int phy_id, int loc,
{ {
struct usbnet *dev = netdev_priv(netdev); struct usbnet *dev = netdev_priv(netdev);
__le16 res = cpu_to_le16(val); __le16 res = cpu_to_le16(val);
u8 smsr;
int i = 0;
int ret; int ret;
netdev_dbg(dev->net, "asix_mdio_write() phy_id=0x%02x, loc=0x%02x, val=0x%04x\n", netdev_dbg(dev->net, "asix_mdio_write() phy_id=0x%02x, loc=0x%02x, val=0x%04x\n",
phy_id, loc, val); phy_id, loc, val);
mutex_lock(&dev->phy_mutex); mutex_lock(&dev->phy_mutex);
do {
ret = asix_set_sw_mii(dev, 0);
if (ret == -ENODEV)
break;
usleep_range(1000, 1100);
ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG,
0, 0, 1, &smsr, 0);
} while (!(smsr & AX_HOST_EN) && (i++ < 30) && (ret != -ENODEV));
ret = asix_check_host_enable(dev, 0);
if (ret == -ENODEV) if (ret == -ENODEV)
goto out; goto out;
@ -561,19 +567,11 @@ int asix_mdio_read_nopm(struct net_device *netdev, int phy_id, int loc)
{ {
struct usbnet *dev = netdev_priv(netdev); struct usbnet *dev = netdev_priv(netdev);
__le16 res; __le16 res;
u8 smsr;
int i = 0;
int ret; int ret;
mutex_lock(&dev->phy_mutex); mutex_lock(&dev->phy_mutex);
do {
ret = asix_set_sw_mii(dev, 1); ret = asix_check_host_enable(dev, 1);
if (ret == -ENODEV || ret == -ETIMEDOUT)
break;
usleep_range(1000, 1100);
ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG,
0, 0, 1, &smsr, 1);
} while (!(smsr & AX_HOST_EN) && (i++ < 30) && (ret != -ENODEV));
if (ret == -ENODEV || ret == -ETIMEDOUT) { if (ret == -ENODEV || ret == -ETIMEDOUT) {
mutex_unlock(&dev->phy_mutex); mutex_unlock(&dev->phy_mutex);
return ret; return ret;
@ -595,22 +593,14 @@ asix_mdio_write_nopm(struct net_device *netdev, int phy_id, int loc, int val)
{ {
struct usbnet *dev = netdev_priv(netdev); struct usbnet *dev = netdev_priv(netdev);
__le16 res = cpu_to_le16(val); __le16 res = cpu_to_le16(val);
u8 smsr;
int i = 0;
int ret; int ret;
netdev_dbg(dev->net, "asix_mdio_write() phy_id=0x%02x, loc=0x%02x, val=0x%04x\n", netdev_dbg(dev->net, "asix_mdio_write() phy_id=0x%02x, loc=0x%02x, val=0x%04x\n",
phy_id, loc, val); phy_id, loc, val);
mutex_lock(&dev->phy_mutex); mutex_lock(&dev->phy_mutex);
do {
ret = asix_set_sw_mii(dev, 1); ret = asix_check_host_enable(dev, 1);
if (ret == -ENODEV)
break;
usleep_range(1000, 1100);
ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG,
0, 0, 1, &smsr, 1);
} while (!(smsr & AX_HOST_EN) && (i++ < 30) && (ret != -ENODEV));
if (ret == -ENODEV) { if (ret == -ENODEV) {
mutex_unlock(&dev->phy_mutex); mutex_unlock(&dev->phy_mutex);
return; return;

View File

@ -3955,17 +3955,28 @@ static void rtl_clear_bp(struct r8152 *tp, u16 type)
case RTL_VER_06: case RTL_VER_06:
ocp_write_byte(tp, type, PLA_BP_EN, 0); ocp_write_byte(tp, type, PLA_BP_EN, 0);
break; break;
case RTL_VER_14:
ocp_write_word(tp, type, USB_BP2_EN, 0);
ocp_write_word(tp, type, USB_BP_8, 0);
ocp_write_word(tp, type, USB_BP_9, 0);
ocp_write_word(tp, type, USB_BP_10, 0);
ocp_write_word(tp, type, USB_BP_11, 0);
ocp_write_word(tp, type, USB_BP_12, 0);
ocp_write_word(tp, type, USB_BP_13, 0);
ocp_write_word(tp, type, USB_BP_14, 0);
ocp_write_word(tp, type, USB_BP_15, 0);
break;
case RTL_VER_08: case RTL_VER_08:
case RTL_VER_09: case RTL_VER_09:
case RTL_VER_10: case RTL_VER_10:
case RTL_VER_11: case RTL_VER_11:
case RTL_VER_12: case RTL_VER_12:
case RTL_VER_13: case RTL_VER_13:
case RTL_VER_14:
case RTL_VER_15: case RTL_VER_15:
default: default:
if (type == MCU_TYPE_USB) { if (type == MCU_TYPE_USB) {
ocp_write_byte(tp, MCU_TYPE_USB, USB_BP2_EN, 0); ocp_write_word(tp, MCU_TYPE_USB, USB_BP2_EN, 0);
ocp_write_word(tp, MCU_TYPE_USB, USB_BP_8, 0); ocp_write_word(tp, MCU_TYPE_USB, USB_BP_8, 0);
ocp_write_word(tp, MCU_TYPE_USB, USB_BP_9, 0); ocp_write_word(tp, MCU_TYPE_USB, USB_BP_9, 0);
@ -4331,7 +4342,6 @@ static bool rtl8152_is_fw_mac_ok(struct r8152 *tp, struct fw_mac *mac)
case RTL_VER_11: case RTL_VER_11:
case RTL_VER_12: case RTL_VER_12:
case RTL_VER_13: case RTL_VER_13:
case RTL_VER_14:
case RTL_VER_15: case RTL_VER_15:
fw_reg = 0xf800; fw_reg = 0xf800;
bp_ba_addr = PLA_BP_BA; bp_ba_addr = PLA_BP_BA;
@ -4339,6 +4349,13 @@ static bool rtl8152_is_fw_mac_ok(struct r8152 *tp, struct fw_mac *mac)
bp_start = PLA_BP_0; bp_start = PLA_BP_0;
max_bp = 8; max_bp = 8;
break; break;
case RTL_VER_14:
fw_reg = 0xf800;
bp_ba_addr = PLA_BP_BA;
bp_en_addr = USB_BP2_EN;
bp_start = PLA_BP_0;
max_bp = 16;
break;
default: default:
goto out; goto out;
} }

View File

@ -63,7 +63,7 @@ static const unsigned long guest_offloads[] = {
VIRTIO_NET_F_GUEST_CSUM VIRTIO_NET_F_GUEST_CSUM
}; };
#define GUEST_OFFLOAD_LRO_MASK ((1ULL << VIRTIO_NET_F_GUEST_TSO4) | \ #define GUEST_OFFLOAD_GRO_HW_MASK ((1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
(1ULL << VIRTIO_NET_F_GUEST_TSO6) | \ (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
(1ULL << VIRTIO_NET_F_GUEST_ECN) | \ (1ULL << VIRTIO_NET_F_GUEST_ECN) | \
(1ULL << VIRTIO_NET_F_GUEST_UFO)) (1ULL << VIRTIO_NET_F_GUEST_UFO))
@ -2504,7 +2504,7 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) || virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) ||
virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM))) { virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM))) {
NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing LRO/CSUM, disable LRO/CSUM first"); NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing GRO_HW/CSUM, disable GRO_HW/CSUM first");
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
@ -2635,15 +2635,15 @@ static int virtnet_set_features(struct net_device *dev,
u64 offloads; u64 offloads;
int err; int err;
if ((dev->features ^ features) & NETIF_F_LRO) { if ((dev->features ^ features) & NETIF_F_GRO_HW) {
if (vi->xdp_enabled) if (vi->xdp_enabled)
return -EBUSY; return -EBUSY;
if (features & NETIF_F_LRO) if (features & NETIF_F_GRO_HW)
offloads = vi->guest_offloads_capable; offloads = vi->guest_offloads_capable;
else else
offloads = vi->guest_offloads_capable & offloads = vi->guest_offloads_capable &
~GUEST_OFFLOAD_LRO_MASK; ~GUEST_OFFLOAD_GRO_HW_MASK;
err = virtnet_set_guest_offloads(vi, offloads); err = virtnet_set_guest_offloads(vi, offloads);
if (err) if (err)
@ -3123,9 +3123,9 @@ static int virtnet_probe(struct virtio_device *vdev)
dev->features |= NETIF_F_RXCSUM; dev->features |= NETIF_F_RXCSUM;
if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6)) virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6))
dev->features |= NETIF_F_LRO; dev->features |= NETIF_F_GRO_HW;
if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS))
dev->hw_features |= NETIF_F_LRO; dev->hw_features |= NETIF_F_GRO_HW;
dev->vlan_features = dev->features; dev->vlan_features = dev->features;

View File

@ -1360,6 +1360,8 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
bool need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr); bool need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr);
bool is_ndisc = ipv6_ndisc_frame(skb); bool is_ndisc = ipv6_ndisc_frame(skb);
nf_reset_ct(skb);
/* loopback, multicast & non-ND link-local traffic; do not push through /* loopback, multicast & non-ND link-local traffic; do not push through
* packet taps again. Reset pkt_type for upper layers to process skb. * packet taps again. Reset pkt_type for upper layers to process skb.
* For strict packets with a source LLA, determine the dst using the * For strict packets with a source LLA, determine the dst using the
@ -1422,6 +1424,8 @@ static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
skb->skb_iif = vrf_dev->ifindex; skb->skb_iif = vrf_dev->ifindex;
IPCB(skb)->flags |= IPSKB_L3SLAVE; IPCB(skb)->flags |= IPSKB_L3SLAVE;
nf_reset_ct(skb);
if (ipv4_is_multicast(ip_hdr(skb)->daddr)) if (ipv4_is_multicast(ip_hdr(skb)->daddr))
goto out; goto out;

View File

@ -37,6 +37,7 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
u32 sha1 = 0; u32 sha1 = 0;
u16 mac_type = 0, rf_id = 0; u16 mac_type = 0, rf_id = 0;
u8 *pnvm_data = NULL, *tmp; u8 *pnvm_data = NULL, *tmp;
bool hw_match = false;
u32 size = 0; u32 size = 0;
int ret; int ret;
@ -83,6 +84,9 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
break; break;
} }
if (hw_match)
break;
mac_type = le16_to_cpup((__le16 *)data); mac_type = le16_to_cpup((__le16 *)data);
rf_id = le16_to_cpup((__le16 *)(data + sizeof(__le16))); rf_id = le16_to_cpup((__le16 *)(data + sizeof(__le16)));
@ -90,15 +94,9 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
"Got IWL_UCODE_TLV_HW_TYPE mac_type 0x%0x rf_id 0x%0x\n", "Got IWL_UCODE_TLV_HW_TYPE mac_type 0x%0x rf_id 0x%0x\n",
mac_type, rf_id); mac_type, rf_id);
if (mac_type != CSR_HW_REV_TYPE(trans->hw_rev) || if (mac_type == CSR_HW_REV_TYPE(trans->hw_rev) &&
rf_id != CSR_HW_RFID_TYPE(trans->hw_rf_id)) { rf_id == CSR_HW_RFID_TYPE(trans->hw_rf_id))
IWL_DEBUG_FW(trans, hw_match = true;
"HW mismatch, skipping PNVM section, mac_type 0x%0x, rf_id 0x%0x.\n",
CSR_HW_REV_TYPE(trans->hw_rev), trans->hw_rf_id);
ret = -ENOENT;
goto out;
}
break; break;
case IWL_UCODE_TLV_SEC_RT: { case IWL_UCODE_TLV_SEC_RT: {
struct iwl_pnvm_section *section = (void *)data; struct iwl_pnvm_section *section = (void *)data;
@ -149,6 +147,15 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
} }
done: done:
if (!hw_match) {
IWL_DEBUG_FW(trans,
"HW mismatch, skipping PNVM section (need mac_type 0x%x rf_id 0x%x)\n",
CSR_HW_REV_TYPE(trans->hw_rev),
CSR_HW_RFID_TYPE(trans->hw_rf_id));
ret = -ENOENT;
goto out;
}
if (!size) { if (!size) {
IWL_DEBUG_FW(trans, "Empty PNVM, skipping.\n"); IWL_DEBUG_FW(trans, "Empty PNVM, skipping.\n");
ret = -ENOENT; ret = -ENOENT;

View File

@ -1110,12 +1110,80 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_cfg_bz_a0_mr_a0, iwl_ax211_name), iwl_cfg_bz_a0_mr_a0, iwl_ax211_name),
/* SoF with JF2 */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwlax210_2ax_cfg_so_jf_b0, iwl9560_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwlax210_2ax_cfg_so_jf_b0, iwl9560_name),
/* SoF with JF */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwlax210_2ax_cfg_so_jf_b0, iwl9461_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwlax210_2ax_cfg_so_jf_b0, iwl9462_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwlax210_2ax_cfg_so_jf_b0, iwl9461_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwlax210_2ax_cfg_so_jf_b0, iwl9462_name),
/* So with GF */ /* So with GF */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY,
IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_name) iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_name),
/* So with JF2 */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwlax210_2ax_cfg_so_jf_b0, iwl9560_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwlax210_2ax_cfg_so_jf_b0, iwl9560_name),
/* So with JF */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwlax210_2ax_cfg_so_jf_b0, iwl9461_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwlax210_2ax_cfg_so_jf_b0, iwl9462_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwlax210_2ax_cfg_so_jf_b0, iwl9461_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwlax210_2ax_cfg_so_jf_b0, iwl9462_name)
#endif /* CONFIG_IWLMVM */ #endif /* CONFIG_IWLMVM */
}; };

View File

@ -111,7 +111,7 @@ mt7915_mcu_get_cipher(int cipher)
case WLAN_CIPHER_SUITE_SMS4: case WLAN_CIPHER_SUITE_SMS4:
return MCU_CIPHER_WAPI; return MCU_CIPHER_WAPI;
default: default:
return MT_CIPHER_NONE; return MCU_CIPHER_NONE;
} }
} }

View File

@ -1073,7 +1073,8 @@ enum {
}; };
enum mcu_cipher_type { enum mcu_cipher_type {
MCU_CIPHER_WEP40 = 1, MCU_CIPHER_NONE = 0,
MCU_CIPHER_WEP40,
MCU_CIPHER_WEP104, MCU_CIPHER_WEP104,
MCU_CIPHER_WEP128, MCU_CIPHER_WEP128,
MCU_CIPHER_TKIP, MCU_CIPHER_TKIP,

View File

@ -111,7 +111,7 @@ mt7921_mcu_get_cipher(int cipher)
case WLAN_CIPHER_SUITE_SMS4: case WLAN_CIPHER_SUITE_SMS4:
return MCU_CIPHER_WAPI; return MCU_CIPHER_WAPI;
default: default:
return MT_CIPHER_NONE; return MCU_CIPHER_NONE;
} }
} }

View File

@ -199,7 +199,8 @@ struct sta_rec_sec {
} __packed; } __packed;
enum mcu_cipher_type { enum mcu_cipher_type {
MCU_CIPHER_WEP40 = 1, MCU_CIPHER_NONE = 0,
MCU_CIPHER_WEP40,
MCU_CIPHER_WEP104, MCU_CIPHER_WEP104,
MCU_CIPHER_WEP128, MCU_CIPHER_WEP128,
MCU_CIPHER_TKIP, MCU_CIPHER_TKIP,

View File

@ -64,10 +64,9 @@ static struct ipc_chnl_cfg modem_cfg[] = {
int ipc_chnl_cfg_get(struct ipc_chnl_cfg *chnl_cfg, int index) int ipc_chnl_cfg_get(struct ipc_chnl_cfg *chnl_cfg, int index)
{ {
int array_size = ARRAY_SIZE(modem_cfg); if (index >= ARRAY_SIZE(modem_cfg)) {
pr_err("index: %d and array size %zu", index,
if (index >= array_size) { ARRAY_SIZE(modem_cfg));
pr_err("index: %d and array_size %d", index, array_size);
return -ECHRNG; return -ECHRNG;
} }

View File

@ -2527,7 +2527,7 @@ static void deactivate_labels(void *region)
static int init_active_labels(struct nd_region *nd_region) static int init_active_labels(struct nd_region *nd_region)
{ {
int i; int i, rc = 0;
for (i = 0; i < nd_region->ndr_mappings; i++) { for (i = 0; i < nd_region->ndr_mappings; i++) {
struct nd_mapping *nd_mapping = &nd_region->mapping[i]; struct nd_mapping *nd_mapping = &nd_region->mapping[i];
@ -2546,13 +2546,14 @@ static int init_active_labels(struct nd_region *nd_region)
else if (test_bit(NDD_LABELING, &nvdimm->flags)) else if (test_bit(NDD_LABELING, &nvdimm->flags))
/* fail, labels needed to disambiguate dpa */; /* fail, labels needed to disambiguate dpa */;
else else
return 0; continue;
dev_err(&nd_region->dev, "%s: is %s, failing probe\n", dev_err(&nd_region->dev, "%s: is %s, failing probe\n",
dev_name(&nd_mapping->nvdimm->dev), dev_name(&nd_mapping->nvdimm->dev),
test_bit(NDD_LOCKED, &nvdimm->flags) test_bit(NDD_LOCKED, &nvdimm->flags)
? "locked" : "disabled"); ? "locked" : "disabled");
return -ENXIO; rc = -ENXIO;
goto out;
} }
nd_mapping->ndd = ndd; nd_mapping->ndd = ndd;
atomic_inc(&nvdimm->busy); atomic_inc(&nvdimm->busy);
@ -2586,13 +2587,17 @@ static int init_active_labels(struct nd_region *nd_region)
break; break;
} }
if (i < nd_region->ndr_mappings) { if (i < nd_region->ndr_mappings)
rc = -ENOMEM;
out:
if (rc) {
deactivate_labels(nd_region); deactivate_labels(nd_region);
return -ENOMEM; return rc;
} }
return devm_add_action_or_reset(&nd_region->dev, deactivate_labels, return devm_add_action_or_reset(&nd_region->dev, deactivate_labels,
nd_region); nd_region);
} }
int nd_region_register_namespaces(struct nd_region *nd_region, int *err) int nd_region_register_namespaces(struct nd_region *nd_region, int *err)

View File

@ -143,24 +143,25 @@ static inline __attribute_const__ u32 msi_mask(unsigned x)
* reliably as devices without an INTx disable bit will then generate a * reliably as devices without an INTx disable bit will then generate a
* level IRQ which will never be cleared. * level IRQ which will never be cleared.
*/ */
u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) void __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
{ {
u32 mask_bits = desc->masked; raw_spinlock_t *lock = &desc->dev->msi_lock;
unsigned long flags;
if (pci_msi_ignore_mask || !desc->msi_attrib.maskbit) if (pci_msi_ignore_mask || !desc->msi_attrib.maskbit)
return 0; return;
mask_bits &= ~mask; raw_spin_lock_irqsave(lock, flags);
mask_bits |= flag; desc->masked &= ~mask;
desc->masked |= flag;
pci_write_config_dword(msi_desc_to_pci_dev(desc), desc->mask_pos, pci_write_config_dword(msi_desc_to_pci_dev(desc), desc->mask_pos,
mask_bits); desc->masked);
raw_spin_unlock_irqrestore(lock, flags);
return mask_bits;
} }
static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
{ {
desc->masked = __pci_msi_desc_mask_irq(desc, mask, flag); __pci_msi_desc_mask_irq(desc, mask, flag);
} }
static void __iomem *pci_msix_desc_addr(struct msi_desc *desc) static void __iomem *pci_msix_desc_addr(struct msi_desc *desc)
@ -289,13 +290,31 @@ void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
/* Don't touch the hardware now */ /* Don't touch the hardware now */
} else if (entry->msi_attrib.is_msix) { } else if (entry->msi_attrib.is_msix) {
void __iomem *base = pci_msix_desc_addr(entry); void __iomem *base = pci_msix_desc_addr(entry);
bool unmasked = !(entry->masked & PCI_MSIX_ENTRY_CTRL_MASKBIT);
if (!base) if (!base)
goto skip; goto skip;
/*
* The specification mandates that the entry is masked
* when the message is modified:
*
* "If software changes the Address or Data value of an
* entry while the entry is unmasked, the result is
* undefined."
*/
if (unmasked)
__pci_msix_desc_mask_irq(entry, PCI_MSIX_ENTRY_CTRL_MASKBIT);
writel(msg->address_lo, base + PCI_MSIX_ENTRY_LOWER_ADDR); writel(msg->address_lo, base + PCI_MSIX_ENTRY_LOWER_ADDR);
writel(msg->address_hi, base + PCI_MSIX_ENTRY_UPPER_ADDR); writel(msg->address_hi, base + PCI_MSIX_ENTRY_UPPER_ADDR);
writel(msg->data, base + PCI_MSIX_ENTRY_DATA); writel(msg->data, base + PCI_MSIX_ENTRY_DATA);
if (unmasked)
__pci_msix_desc_mask_irq(entry, 0);
/* Ensure that the writes are visible in the device */
readl(base + PCI_MSIX_ENTRY_DATA);
} else { } else {
int pos = dev->msi_cap; int pos = dev->msi_cap;
u16 msgctl; u16 msgctl;
@ -316,6 +335,8 @@ void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
pci_write_config_word(dev, pos + PCI_MSI_DATA_32, pci_write_config_word(dev, pos + PCI_MSI_DATA_32,
msg->data); msg->data);
} }
/* Ensure that the writes are visible in the device */
pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &msgctl);
} }
skip: skip:
@ -636,21 +657,21 @@ static int msi_capability_init(struct pci_dev *dev, int nvec,
/* Configure MSI capability structure */ /* Configure MSI capability structure */
ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI); ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI);
if (ret) { if (ret) {
msi_mask_irq(entry, mask, ~mask); msi_mask_irq(entry, mask, 0);
free_msi_irqs(dev); free_msi_irqs(dev);
return ret; return ret;
} }
ret = msi_verify_entries(dev); ret = msi_verify_entries(dev);
if (ret) { if (ret) {
msi_mask_irq(entry, mask, ~mask); msi_mask_irq(entry, mask, 0);
free_msi_irqs(dev); free_msi_irqs(dev);
return ret; return ret;
} }
ret = populate_msi_sysfs(dev); ret = populate_msi_sysfs(dev);
if (ret) { if (ret) {
msi_mask_irq(entry, mask, ~mask); msi_mask_irq(entry, mask, 0);
free_msi_irqs(dev); free_msi_irqs(dev);
return ret; return ret;
} }
@ -691,6 +712,7 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
{ {
struct irq_affinity_desc *curmsk, *masks = NULL; struct irq_affinity_desc *curmsk, *masks = NULL;
struct msi_desc *entry; struct msi_desc *entry;
void __iomem *addr;
int ret, i; int ret, i;
int vec_count = pci_msix_vec_count(dev); int vec_count = pci_msix_vec_count(dev);
@ -711,6 +733,7 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
entry->msi_attrib.is_msix = 1; entry->msi_attrib.is_msix = 1;
entry->msi_attrib.is_64 = 1; entry->msi_attrib.is_64 = 1;
if (entries) if (entries)
entry->msi_attrib.entry_nr = entries[i].entry; entry->msi_attrib.entry_nr = entries[i].entry;
else else
@ -722,6 +745,10 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
entry->msi_attrib.default_irq = dev->irq; entry->msi_attrib.default_irq = dev->irq;
entry->mask_base = base; entry->mask_base = base;
addr = pci_msix_desc_addr(entry);
if (addr)
entry->masked = readl(addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
list_add_tail(&entry->list, dev_to_msi_list(&dev->dev)); list_add_tail(&entry->list, dev_to_msi_list(&dev->dev));
if (masks) if (masks)
curmsk++; curmsk++;
@ -732,28 +759,27 @@ out:
return ret; return ret;
} }
static void msix_program_entries(struct pci_dev *dev, static void msix_update_entries(struct pci_dev *dev, struct msix_entry *entries)
struct msix_entry *entries)
{ {
struct msi_desc *entry; struct msi_desc *entry;
int i = 0;
void __iomem *desc_addr;
for_each_pci_msi_entry(entry, dev) { for_each_pci_msi_entry(entry, dev) {
if (entries) if (entries) {
entries[i++].vector = entry->irq; entries->vector = entry->irq;
entries++;
desc_addr = pci_msix_desc_addr(entry); }
if (desc_addr)
entry->masked = readl(desc_addr +
PCI_MSIX_ENTRY_VECTOR_CTRL);
else
entry->masked = 0;
msix_mask_irq(entry, 1);
} }
} }
static void msix_mask_all(void __iomem *base, int tsize)
{
u32 ctrl = PCI_MSIX_ENTRY_CTRL_MASKBIT;
int i;
for (i = 0; i < tsize; i++, base += PCI_MSIX_ENTRY_SIZE)
writel(ctrl, base + PCI_MSIX_ENTRY_VECTOR_CTRL);
}
/** /**
* msix_capability_init - configure device's MSI-X capability * msix_capability_init - configure device's MSI-X capability
* @dev: pointer to the pci_dev data structure of MSI-X device function * @dev: pointer to the pci_dev data structure of MSI-X device function
@ -768,22 +794,33 @@ static void msix_program_entries(struct pci_dev *dev,
static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries, static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
int nvec, struct irq_affinity *affd) int nvec, struct irq_affinity *affd)
{ {
int ret;
u16 control;
void __iomem *base; void __iomem *base;
int ret, tsize;
u16 control;
/* Ensure MSI-X is disabled while it is set up */ /*
pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); * Some devices require MSI-X to be enabled before the MSI-X
* registers can be accessed. Mask all the vectors to prevent
* interrupts coming in before they're fully set up.
*/
pci_msix_clear_and_set_ctrl(dev, 0, PCI_MSIX_FLAGS_MASKALL |
PCI_MSIX_FLAGS_ENABLE);
pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control); pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control);
/* Request & Map MSI-X table region */ /* Request & Map MSI-X table region */
base = msix_map_region(dev, msix_table_size(control)); tsize = msix_table_size(control);
if (!base) base = msix_map_region(dev, tsize);
return -ENOMEM; if (!base) {
ret = -ENOMEM;
goto out_disable;
}
/* Ensure that all table entries are masked. */
msix_mask_all(base, tsize);
ret = msix_setup_entries(dev, base, entries, nvec, affd); ret = msix_setup_entries(dev, base, entries, nvec, affd);
if (ret) if (ret)
return ret; goto out_disable;
ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX); ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX);
if (ret) if (ret)
@ -794,15 +831,7 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
if (ret) if (ret)
goto out_free; goto out_free;
/* msix_update_entries(dev, entries);
* Some devices require MSI-X to be enabled before we can touch the
* MSI-X registers. We need to mask all the vectors to prevent
* interrupts coming in before they're fully set up.
*/
pci_msix_clear_and_set_ctrl(dev, 0,
PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE);
msix_program_entries(dev, entries);
ret = populate_msi_sysfs(dev); ret = populate_msi_sysfs(dev);
if (ret) if (ret)
@ -836,6 +865,9 @@ out_avail:
out_free: out_free:
free_msi_irqs(dev); free_msi_irqs(dev);
out_disable:
pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
return ret; return ret;
} }
@ -930,8 +962,7 @@ static void pci_msi_shutdown(struct pci_dev *dev)
/* Return the device with MSI unmasked as initial states */ /* Return the device with MSI unmasked as initial states */
mask = msi_mask(desc->msi_attrib.multi_cap); mask = msi_mask(desc->msi_attrib.multi_cap);
/* Keep cached state to be restored */ msi_mask_irq(desc, mask, 0);
__pci_msi_desc_mask_irq(desc, mask, ~mask);
/* Restore dev->irq to its default pin-assertion IRQ */ /* Restore dev->irq to its default pin-assertion IRQ */
dev->irq = desc->msi_attrib.default_irq; dev->irq = desc->msi_attrib.default_irq;
@ -1016,10 +1047,8 @@ static void pci_msix_shutdown(struct pci_dev *dev)
} }
/* Return the device with MSI-X masked as initial states */ /* Return the device with MSI-X masked as initial states */
for_each_pci_msi_entry(entry, dev) { for_each_pci_msi_entry(entry, dev)
/* Keep cached states to be restored */
__pci_msix_desc_mask_irq(entry, 1); __pci_msix_desc_mask_irq(entry, 1);
}
pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
pci_intx_for_msi(dev, 1); pci_intx_for_msi(dev, 1);

View File

@ -701,32 +701,32 @@ static const struct pinctrl_pin_desc tglh_pins[] = {
static const struct intel_padgroup tglh_community0_gpps[] = { static const struct intel_padgroup tglh_community0_gpps[] = {
TGL_GPP(0, 0, 24, 0), /* GPP_A */ TGL_GPP(0, 0, 24, 0), /* GPP_A */
TGL_GPP(1, 25, 44, 128), /* GPP_R */ TGL_GPP(1, 25, 44, 32), /* GPP_R */
TGL_GPP(2, 45, 70, 32), /* GPP_B */ TGL_GPP(2, 45, 70, 64), /* GPP_B */
TGL_GPP(3, 71, 78, INTEL_GPIO_BASE_NOMAP), /* vGPIO_0 */ TGL_GPP(3, 71, 78, 96), /* vGPIO_0 */
}; };
static const struct intel_padgroup tglh_community1_gpps[] = { static const struct intel_padgroup tglh_community1_gpps[] = {
TGL_GPP(0, 79, 104, 96), /* GPP_D */ TGL_GPP(0, 79, 104, 128), /* GPP_D */
TGL_GPP(1, 105, 128, 64), /* GPP_C */ TGL_GPP(1, 105, 128, 160), /* GPP_C */
TGL_GPP(2, 129, 136, 160), /* GPP_S */ TGL_GPP(2, 129, 136, 192), /* GPP_S */
TGL_GPP(3, 137, 153, 192), /* GPP_G */ TGL_GPP(3, 137, 153, 224), /* GPP_G */
TGL_GPP(4, 154, 180, 224), /* vGPIO */ TGL_GPP(4, 154, 180, 256), /* vGPIO */
}; };
static const struct intel_padgroup tglh_community3_gpps[] = { static const struct intel_padgroup tglh_community3_gpps[] = {
TGL_GPP(0, 181, 193, 256), /* GPP_E */ TGL_GPP(0, 181, 193, 288), /* GPP_E */
TGL_GPP(1, 194, 217, 288), /* GPP_F */ TGL_GPP(1, 194, 217, 320), /* GPP_F */
}; };
static const struct intel_padgroup tglh_community4_gpps[] = { static const struct intel_padgroup tglh_community4_gpps[] = {
TGL_GPP(0, 218, 241, 320), /* GPP_H */ TGL_GPP(0, 218, 241, 352), /* GPP_H */
TGL_GPP(1, 242, 251, 384), /* GPP_J */ TGL_GPP(1, 242, 251, 384), /* GPP_J */
TGL_GPP(2, 252, 266, 352), /* GPP_K */ TGL_GPP(2, 252, 266, 416), /* GPP_K */
}; };
static const struct intel_padgroup tglh_community5_gpps[] = { static const struct intel_padgroup tglh_community5_gpps[] = {
TGL_GPP(0, 267, 281, 416), /* GPP_I */ TGL_GPP(0, 267, 281, 448), /* GPP_I */
TGL_GPP(1, 282, 290, INTEL_GPIO_BASE_NOMAP), /* JTAG */ TGL_GPP(1, 282, 290, INTEL_GPIO_BASE_NOMAP), /* JTAG */
}; };

View File

@ -925,12 +925,10 @@ int mtk_pinconf_adv_pull_set(struct mtk_pinctrl *hw,
err = hw->soc->bias_set(hw, desc, pullup); err = hw->soc->bias_set(hw, desc, pullup);
if (err) if (err)
return err; return err;
} else if (hw->soc->bias_set_combo) {
err = hw->soc->bias_set_combo(hw, desc, pullup, arg);
if (err)
return err;
} else { } else {
return -ENOTSUPP; err = mtk_pinconf_bias_set_rev1(hw, desc, pullup);
if (err)
err = mtk_pinconf_bias_set(hw, desc, pullup);
} }
} }

View File

@ -444,8 +444,7 @@ static int amd_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
unsigned long flags; unsigned long flags;
struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct amd_gpio *gpio_dev = gpiochip_get_data(gc); struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
u32 wake_mask = BIT(WAKE_CNTRL_OFF_S0I3) | BIT(WAKE_CNTRL_OFF_S3) | u32 wake_mask = BIT(WAKE_CNTRL_OFF_S0I3) | BIT(WAKE_CNTRL_OFF_S3);
BIT(WAKE_CNTRL_OFF_S4);
raw_spin_lock_irqsave(&gpio_dev->lock, flags); raw_spin_lock_irqsave(&gpio_dev->lock, flags);
pin_reg = readl(gpio_dev->base + (d->hwirq)*4); pin_reg = readl(gpio_dev->base + (d->hwirq)*4);

View File

@ -950,23 +950,37 @@ static int k210_fpioa_probe(struct platform_device *pdev)
return ret; return ret;
pdata->pclk = devm_clk_get_optional(dev, "pclk"); pdata->pclk = devm_clk_get_optional(dev, "pclk");
if (!IS_ERR(pdata->pclk)) if (!IS_ERR(pdata->pclk)) {
clk_prepare_enable(pdata->pclk); ret = clk_prepare_enable(pdata->pclk);
if (ret)
goto disable_clk;
}
pdata->sysctl_map = pdata->sysctl_map =
syscon_regmap_lookup_by_phandle_args(np, syscon_regmap_lookup_by_phandle_args(np,
"canaan,k210-sysctl-power", "canaan,k210-sysctl-power",
1, &pdata->power_offset); 1, &pdata->power_offset);
if (IS_ERR(pdata->sysctl_map)) if (IS_ERR(pdata->sysctl_map)) {
return PTR_ERR(pdata->sysctl_map); ret = PTR_ERR(pdata->sysctl_map);
goto disable_pclk;
}
k210_fpioa_init_ties(pdata); k210_fpioa_init_ties(pdata);
pdata->pctl = pinctrl_register(&k210_pinctrl_desc, dev, (void *)pdata); pdata->pctl = pinctrl_register(&k210_pinctrl_desc, dev, (void *)pdata);
if (IS_ERR(pdata->pctl)) if (IS_ERR(pdata->pctl)) {
return PTR_ERR(pdata->pctl); ret = PTR_ERR(pdata->pctl);
goto disable_pclk;
}
return 0; return 0;
disable_pclk:
clk_disable_unprepare(pdata->pclk);
disable_clk:
clk_disable_unprepare(pdata->clk);
return ret;
} }
static const struct of_device_id k210_fpioa_dt_ids[] = { static const struct of_device_id k210_fpioa_dt_ids[] = {

View File

@ -13,7 +13,7 @@ config PINCTRL_MSM
config PINCTRL_APQ8064 config PINCTRL_APQ8064
tristate "Qualcomm APQ8064 pin controller driver" tristate "Qualcomm APQ8064 pin controller driver"
depends on GPIOLIB && OF depends on OF
depends on PINCTRL_MSM depends on PINCTRL_MSM
help help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@ -21,7 +21,7 @@ config PINCTRL_APQ8064
config PINCTRL_APQ8084 config PINCTRL_APQ8084
tristate "Qualcomm APQ8084 pin controller driver" tristate "Qualcomm APQ8084 pin controller driver"
depends on GPIOLIB && OF depends on OF
depends on PINCTRL_MSM depends on PINCTRL_MSM
help help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@ -29,7 +29,7 @@ config PINCTRL_APQ8084
config PINCTRL_IPQ4019 config PINCTRL_IPQ4019
tristate "Qualcomm IPQ4019 pin controller driver" tristate "Qualcomm IPQ4019 pin controller driver"
depends on GPIOLIB && OF depends on OF
depends on PINCTRL_MSM depends on PINCTRL_MSM
help help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@ -37,7 +37,7 @@ config PINCTRL_IPQ4019
config PINCTRL_IPQ8064 config PINCTRL_IPQ8064
tristate "Qualcomm IPQ8064 pin controller driver" tristate "Qualcomm IPQ8064 pin controller driver"
depends on GPIOLIB && OF depends on OF
depends on PINCTRL_MSM depends on PINCTRL_MSM
help help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@ -45,7 +45,7 @@ config PINCTRL_IPQ8064
config PINCTRL_IPQ8074 config PINCTRL_IPQ8074
tristate "Qualcomm Technologies, Inc. IPQ8074 pin controller driver" tristate "Qualcomm Technologies, Inc. IPQ8074 pin controller driver"
depends on GPIOLIB && OF depends on OF
depends on PINCTRL_MSM depends on PINCTRL_MSM
help help
This is the pinctrl, pinmux, pinconf and gpiolib driver for This is the pinctrl, pinmux, pinconf and gpiolib driver for
@ -55,7 +55,7 @@ config PINCTRL_IPQ8074
config PINCTRL_IPQ6018 config PINCTRL_IPQ6018
tristate "Qualcomm Technologies, Inc. IPQ6018 pin controller driver" tristate "Qualcomm Technologies, Inc. IPQ6018 pin controller driver"
depends on GPIOLIB && OF depends on OF
depends on PINCTRL_MSM depends on PINCTRL_MSM
help help
This is the pinctrl, pinmux, pinconf and gpiolib driver for This is the pinctrl, pinmux, pinconf and gpiolib driver for
@ -65,7 +65,7 @@ config PINCTRL_IPQ6018
config PINCTRL_MSM8226 config PINCTRL_MSM8226
tristate "Qualcomm 8226 pin controller driver" tristate "Qualcomm 8226 pin controller driver"
depends on GPIOLIB && OF depends on OF
depends on PINCTRL_MSM depends on PINCTRL_MSM
help help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@ -74,7 +74,7 @@ config PINCTRL_MSM8226
config PINCTRL_MSM8660 config PINCTRL_MSM8660
tristate "Qualcomm 8660 pin controller driver" tristate "Qualcomm 8660 pin controller driver"
depends on GPIOLIB && OF depends on OF
depends on PINCTRL_MSM depends on PINCTRL_MSM
help help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@ -82,7 +82,7 @@ config PINCTRL_MSM8660
config PINCTRL_MSM8960 config PINCTRL_MSM8960
tristate "Qualcomm 8960 pin controller driver" tristate "Qualcomm 8960 pin controller driver"
depends on GPIOLIB && OF depends on OF
depends on PINCTRL_MSM depends on PINCTRL_MSM
help help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@ -90,7 +90,7 @@ config PINCTRL_MSM8960
config PINCTRL_MDM9615 config PINCTRL_MDM9615
tristate "Qualcomm 9615 pin controller driver" tristate "Qualcomm 9615 pin controller driver"
depends on GPIOLIB && OF depends on OF
depends on PINCTRL_MSM depends on PINCTRL_MSM
help help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@ -98,7 +98,7 @@ config PINCTRL_MDM9615
config PINCTRL_MSM8X74 config PINCTRL_MSM8X74
tristate "Qualcomm 8x74 pin controller driver" tristate "Qualcomm 8x74 pin controller driver"
depends on GPIOLIB && OF depends on OF
depends on PINCTRL_MSM depends on PINCTRL_MSM
help help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@ -106,7 +106,7 @@ config PINCTRL_MSM8X74
config PINCTRL_MSM8916 config PINCTRL_MSM8916
tristate "Qualcomm 8916 pin controller driver" tristate "Qualcomm 8916 pin controller driver"
depends on GPIOLIB && OF depends on OF
depends on PINCTRL_MSM depends on PINCTRL_MSM
help help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@ -114,7 +114,7 @@ config PINCTRL_MSM8916
config PINCTRL_MSM8953 config PINCTRL_MSM8953
tristate "Qualcomm 8953 pin controller driver" tristate "Qualcomm 8953 pin controller driver"
depends on GPIOLIB && OF depends on OF
depends on PINCTRL_MSM depends on PINCTRL_MSM
help help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@ -124,7 +124,7 @@ config PINCTRL_MSM8953
config PINCTRL_MSM8976 config PINCTRL_MSM8976
tristate "Qualcomm 8976 pin controller driver" tristate "Qualcomm 8976 pin controller driver"
depends on GPIOLIB && OF depends on OF
depends on PINCTRL_MSM depends on PINCTRL_MSM
help help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@ -134,7 +134,7 @@ config PINCTRL_MSM8976
config PINCTRL_MSM8994 config PINCTRL_MSM8994
tristate "Qualcomm 8994 pin controller driver" tristate "Qualcomm 8994 pin controller driver"
depends on GPIOLIB && OF depends on OF
depends on PINCTRL_MSM depends on PINCTRL_MSM
help help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@ -143,7 +143,7 @@ config PINCTRL_MSM8994
config PINCTRL_MSM8996 config PINCTRL_MSM8996
tristate "Qualcomm MSM8996 pin controller driver" tristate "Qualcomm MSM8996 pin controller driver"
depends on GPIOLIB && OF depends on OF
depends on PINCTRL_MSM depends on PINCTRL_MSM
help help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@ -151,7 +151,7 @@ config PINCTRL_MSM8996
config PINCTRL_MSM8998 config PINCTRL_MSM8998
tristate "Qualcomm MSM8998 pin controller driver" tristate "Qualcomm MSM8998 pin controller driver"
depends on GPIOLIB && OF depends on OF
depends on PINCTRL_MSM depends on PINCTRL_MSM
help help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@ -159,7 +159,7 @@ config PINCTRL_MSM8998
config PINCTRL_QCS404 config PINCTRL_QCS404
tristate "Qualcomm QCS404 pin controller driver" tristate "Qualcomm QCS404 pin controller driver"
depends on GPIOLIB && OF depends on OF
depends on PINCTRL_MSM depends on PINCTRL_MSM
help help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@ -167,7 +167,7 @@ config PINCTRL_QCS404
config PINCTRL_QDF2XXX config PINCTRL_QDF2XXX
tristate "Qualcomm Technologies QDF2xxx pin controller driver" tristate "Qualcomm Technologies QDF2xxx pin controller driver"
depends on GPIOLIB && ACPI depends on ACPI
depends on PINCTRL_MSM depends on PINCTRL_MSM
help help
This is the GPIO driver for the TLMM block found on the This is the GPIO driver for the TLMM block found on the
@ -175,7 +175,7 @@ config PINCTRL_QDF2XXX
config PINCTRL_QCOM_SPMI_PMIC config PINCTRL_QCOM_SPMI_PMIC
tristate "Qualcomm SPMI PMIC pin controller driver" tristate "Qualcomm SPMI PMIC pin controller driver"
depends on GPIOLIB && OF && SPMI depends on OF && SPMI
select REGMAP_SPMI select REGMAP_SPMI
select PINMUX select PINMUX
select PINCONF select PINCONF
@ -190,7 +190,7 @@ config PINCTRL_QCOM_SPMI_PMIC
config PINCTRL_QCOM_SSBI_PMIC config PINCTRL_QCOM_SSBI_PMIC
tristate "Qualcomm SSBI PMIC pin controller driver" tristate "Qualcomm SSBI PMIC pin controller driver"
depends on GPIOLIB && OF depends on OF
select PINMUX select PINMUX
select PINCONF select PINCONF
select GENERIC_PINCONF select GENERIC_PINCONF
@ -204,7 +204,7 @@ config PINCTRL_QCOM_SSBI_PMIC
config PINCTRL_SC7180 config PINCTRL_SC7180
tristate "Qualcomm Technologies Inc SC7180 pin controller driver" tristate "Qualcomm Technologies Inc SC7180 pin controller driver"
depends on GPIOLIB && OF depends on OF
depends on PINCTRL_MSM depends on PINCTRL_MSM
help help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@ -213,7 +213,7 @@ config PINCTRL_SC7180
config PINCTRL_SC7280 config PINCTRL_SC7280
tristate "Qualcomm Technologies Inc SC7280 pin controller driver" tristate "Qualcomm Technologies Inc SC7280 pin controller driver"
depends on GPIOLIB && OF depends on OF
depends on PINCTRL_MSM depends on PINCTRL_MSM
help help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@ -222,7 +222,7 @@ config PINCTRL_SC7280
config PINCTRL_SC8180X config PINCTRL_SC8180X
tristate "Qualcomm Technologies Inc SC8180x pin controller driver" tristate "Qualcomm Technologies Inc SC8180x pin controller driver"
depends on GPIOLIB && (OF || ACPI) depends on (OF || ACPI)
depends on PINCTRL_MSM depends on PINCTRL_MSM
help help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@ -231,7 +231,7 @@ config PINCTRL_SC8180X
config PINCTRL_SDM660 config PINCTRL_SDM660
tristate "Qualcomm Technologies Inc SDM660 pin controller driver" tristate "Qualcomm Technologies Inc SDM660 pin controller driver"
depends on GPIOLIB && OF depends on OF
depends on PINCTRL_MSM depends on PINCTRL_MSM
help help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@ -240,7 +240,7 @@ config PINCTRL_SDM660
config PINCTRL_SDM845 config PINCTRL_SDM845
tristate "Qualcomm Technologies Inc SDM845 pin controller driver" tristate "Qualcomm Technologies Inc SDM845 pin controller driver"
depends on GPIOLIB && (OF || ACPI) depends on (OF || ACPI)
depends on PINCTRL_MSM depends on PINCTRL_MSM
help help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@ -249,7 +249,7 @@ config PINCTRL_SDM845
config PINCTRL_SDX55 config PINCTRL_SDX55
tristate "Qualcomm Technologies Inc SDX55 pin controller driver" tristate "Qualcomm Technologies Inc SDX55 pin controller driver"
depends on GPIOLIB && OF depends on OF
depends on PINCTRL_MSM depends on PINCTRL_MSM
help help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@ -258,7 +258,7 @@ config PINCTRL_SDX55
config PINCTRL_SM6125 config PINCTRL_SM6125
tristate "Qualcomm Technologies Inc SM6125 pin controller driver" tristate "Qualcomm Technologies Inc SM6125 pin controller driver"
depends on GPIOLIB && OF depends on OF
depends on PINCTRL_MSM depends on PINCTRL_MSM
help help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@ -267,7 +267,7 @@ config PINCTRL_SM6125
config PINCTRL_SM8150 config PINCTRL_SM8150
tristate "Qualcomm Technologies Inc SM8150 pin controller driver" tristate "Qualcomm Technologies Inc SM8150 pin controller driver"
depends on GPIOLIB && OF depends on OF
depends on PINCTRL_MSM depends on PINCTRL_MSM
help help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@ -276,7 +276,7 @@ config PINCTRL_SM8150
config PINCTRL_SM8250 config PINCTRL_SM8250
tristate "Qualcomm Technologies Inc SM8250 pin controller driver" tristate "Qualcomm Technologies Inc SM8250 pin controller driver"
depends on GPIOLIB && OF depends on OF
depends on PINCTRL_MSM depends on PINCTRL_MSM
help help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@ -285,8 +285,7 @@ config PINCTRL_SM8250
config PINCTRL_SM8350 config PINCTRL_SM8350
tristate "Qualcomm Technologies Inc SM8350 pin controller driver" tristate "Qualcomm Technologies Inc SM8350 pin controller driver"
depends on GPIOLIB && OF depends on PINCTRL_MSM
select PINCTRL_MSM
help help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the This is the pinctrl, pinmux, pinconf and gpiolib driver for the
Qualcomm Technologies Inc TLMM block found on the Qualcomm Qualcomm Technologies Inc TLMM block found on the Qualcomm

View File

@ -1219,10 +1219,12 @@ static int sunxi_pinctrl_build_state(struct platform_device *pdev)
} }
/* /*
* We suppose that we won't have any more functions than pins, * Find an upper bound for the maximum number of functions: in
* we'll reallocate that later anyway * the worst case we have gpio_in, gpio_out, irq and up to four
* special functions per pin, plus one entry for the sentinel.
* We'll reallocate that later anyway.
*/ */
pctl->functions = kcalloc(pctl->ngroups, pctl->functions = kcalloc(4 * pctl->ngroups + 4,
sizeof(*pctl->functions), sizeof(*pctl->functions),
GFP_KERNEL); GFP_KERNEL);
if (!pctl->functions) if (!pctl->functions)

View File

@ -41,6 +41,10 @@ static int wapf = -1;
module_param(wapf, uint, 0444); module_param(wapf, uint, 0444);
MODULE_PARM_DESC(wapf, "WAPF value"); MODULE_PARM_DESC(wapf, "WAPF value");
static int tablet_mode_sw = -1;
module_param(tablet_mode_sw, uint, 0444);
MODULE_PARM_DESC(tablet_mode_sw, "Tablet mode detect: -1:auto 0:disable 1:kbd-dock 2:lid-flip");
static struct quirk_entry *quirks; static struct quirk_entry *quirks;
static bool asus_q500a_i8042_filter(unsigned char data, unsigned char str, static bool asus_q500a_i8042_filter(unsigned char data, unsigned char str,
@ -458,6 +462,15 @@ static const struct dmi_system_id asus_quirks[] = {
}, },
.driver_data = &quirk_asus_use_lid_flip_devid, .driver_data = &quirk_asus_use_lid_flip_devid,
}, },
{
.callback = dmi_matched,
.ident = "ASUS TP200s / E205SA",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "E205SA"),
},
.driver_data = &quirk_asus_use_lid_flip_devid,
},
{}, {},
}; };
@ -477,6 +490,21 @@ static void asus_nb_wmi_quirks(struct asus_wmi_driver *driver)
else else
wapf = quirks->wapf; wapf = quirks->wapf;
switch (tablet_mode_sw) {
case 0:
quirks->use_kbd_dock_devid = false;
quirks->use_lid_flip_devid = false;
break;
case 1:
quirks->use_kbd_dock_devid = true;
quirks->use_lid_flip_devid = false;
break;
case 2:
quirks->use_kbd_dock_devid = false;
quirks->use_lid_flip_devid = true;
break;
}
if (quirks->i8042_filter) { if (quirks->i8042_filter) {
ret = i8042_install_filter(quirks->i8042_filter); ret = i8042_install_filter(quirks->i8042_filter);
if (ret) { if (ret) {

Some files were not shown because too many files have changed in this diff Show More