Merge 6.2-rc8 into usb-next

We need the USB fixes in here for testing.

Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Greg Kroah-Hartman 2023-02-14 13:44:08 +01:00
commit c4a07e264d
190 changed files with 1669 additions and 860 deletions

View File

@ -2,3 +2,8 @@
*.example.dts
/processed-schema*.yaml
/processed-schema*.json
#
# We don't want to ignore the following even if they are dot-files
#
!.yamllint

View File

@ -108,7 +108,7 @@ properties:
msi-controller:
description:
Only present if the Message Based Interrupt functionnality is
Only present if the Message Based Interrupt functionality is
being exposed by the HW, and the mbi-ranges property present.
mbi-ranges:

View File

@ -16,5 +16,5 @@ Contents
Support
=======
If you got any problem, contact Wangxun support team via support@trustnetic.com
If you got any problem, contact Wangxun support team via nic-support@net-swift.com
and Cc: netdev.

View File

@ -16120,7 +16120,7 @@ F: drivers/pci/controller/pci-v3-semi.c
PCI ENDPOINT SUBSYSTEM
M: Lorenzo Pieralisi <lpieralisi@kernel.org>
R: Krzysztof Wilczyński <kw@linux.com>
M: Krzysztof Wilczyński <kw@linux.com>
R: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
R: Kishon Vijay Abraham I <kishon@kernel.org>
L: linux-pci@vger.kernel.org
@ -16128,7 +16128,7 @@ S: Supported
Q: https://patchwork.kernel.org/project/linux-pci/list/
B: https://bugzilla.kernel.org
C: irc://irc.oftc.net/linux-pci
T: git git://git.kernel.org/pub/scm/linux/kernel/git/lpieralisi/pci.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/pci/pci.git
F: Documentation/PCI/endpoint/*
F: Documentation/misc-devices/pci-endpoint-test.rst
F: drivers/misc/pci_endpoint_test.c
@ -16163,7 +16163,7 @@ S: Supported
Q: https://patchwork.kernel.org/project/linux-pci/list/
B: https://bugzilla.kernel.org
C: irc://irc.oftc.net/linux-pci
T: git git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/pci/pci.git
F: Documentation/driver-api/pci/p2pdma.rst
F: drivers/pci/p2pdma.c
F: include/linux/pci-p2pdma.h
@ -16185,14 +16185,14 @@ F: drivers/pci/controller/pci-xgene-msi.c
PCI NATIVE HOST BRIDGE AND ENDPOINT DRIVERS
M: Lorenzo Pieralisi <lpieralisi@kernel.org>
M: Krzysztof Wilczyński <kw@linux.com>
R: Rob Herring <robh@kernel.org>
R: Krzysztof Wilczyński <kw@linux.com>
L: linux-pci@vger.kernel.org
S: Supported
Q: https://patchwork.kernel.org/project/linux-pci/list/
B: https://bugzilla.kernel.org
C: irc://irc.oftc.net/linux-pci
T: git git://git.kernel.org/pub/scm/linux/kernel/git/lpieralisi/pci.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/pci/pci.git
F: Documentation/devicetree/bindings/pci/
F: drivers/pci/controller/
F: drivers/pci/pci-bridge-emul.c
@ -16205,7 +16205,7 @@ S: Supported
Q: https://patchwork.kernel.org/project/linux-pci/list/
B: https://bugzilla.kernel.org
C: irc://irc.oftc.net/linux-pci
T: git git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/pci/pci.git
F: Documentation/PCI/
F: Documentation/devicetree/bindings/pci/
F: arch/x86/kernel/early-quirks.c
@ -20098,6 +20098,7 @@ F: drivers/watchdog/sunplus_wdt.c
SUPERH
M: Yoshinori Sato <ysato@users.sourceforge.jp>
M: Rich Felker <dalias@libc.org>
M: John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
L: linux-sh@vger.kernel.org
S: Maintained
Q: http://patchwork.kernel.org/project/linux-sh/list/

View File

@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 2
SUBLEVEL = 0
EXTRAVERSION = -rc7
EXTRAVERSION = -rc8
NAME = Hurr durr I'ma ninja sloth
# *DOCUMENTATION*

View File

@ -1181,6 +1181,7 @@
clock-names = "dp", "pclk";
phys = <&edp_phy>;
phy-names = "dp";
power-domains = <&power RK3288_PD_VIO>;
resets = <&cru SRST_EDP>;
reset-names = "dp";
rockchip,grf = <&grf>;

View File

@ -178,7 +178,7 @@
tsin-num = <0>;
serial-not-parallel;
i2c-bus = <&ssc2>;
reset-gpios = <&pio15 4 GPIO_ACTIVE_HIGH>;
reset-gpios = <&pio15 4 GPIO_ACTIVE_LOW>;
dvb-card = <STV0367_TDA18212_NIMA_1>;
};
};

View File

@ -1886,7 +1886,7 @@
sd_emmc_b: sd@5000 {
compatible = "amlogic,meson-axg-mmc";
reg = <0x0 0x5000 0x0 0x800>;
interrupts = <GIC_SPI 217 IRQ_TYPE_EDGE_RISING>;
interrupts = <GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
clocks = <&clkc CLKID_SD_EMMC_B>,
<&clkc CLKID_SD_EMMC_B_CLK0>,
@ -1898,7 +1898,7 @@
sd_emmc_c: mmc@7000 {
compatible = "amlogic,meson-axg-mmc";
reg = <0x0 0x7000 0x0 0x800>;
interrupts = <GIC_SPI 218 IRQ_TYPE_EDGE_RISING>;
interrupts = <GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
clocks = <&clkc CLKID_SD_EMMC_C>,
<&clkc CLKID_SD_EMMC_C_CLK0>,

View File

@ -2324,7 +2324,7 @@
sd_emmc_a: sd@ffe03000 {
compatible = "amlogic,meson-axg-mmc";
reg = <0x0 0xffe03000 0x0 0x800>;
interrupts = <GIC_SPI 189 IRQ_TYPE_EDGE_RISING>;
interrupts = <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
clocks = <&clkc CLKID_SD_EMMC_A>,
<&clkc CLKID_SD_EMMC_A_CLK0>,
@ -2336,7 +2336,7 @@
sd_emmc_b: sd@ffe05000 {
compatible = "amlogic,meson-axg-mmc";
reg = <0x0 0xffe05000 0x0 0x800>;
interrupts = <GIC_SPI 190 IRQ_TYPE_EDGE_RISING>;
interrupts = <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
clocks = <&clkc CLKID_SD_EMMC_B>,
<&clkc CLKID_SD_EMMC_B_CLK0>,
@ -2348,7 +2348,7 @@
sd_emmc_c: mmc@ffe07000 {
compatible = "amlogic,meson-axg-mmc";
reg = <0x0 0xffe07000 0x0 0x800>;
interrupts = <GIC_SPI 191 IRQ_TYPE_EDGE_RISING>;
interrupts = <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
clocks = <&clkc CLKID_SD_EMMC_C>,
<&clkc CLKID_SD_EMMC_C_CLK0>,

View File

@ -603,21 +603,21 @@
sd_emmc_a: mmc@70000 {
compatible = "amlogic,meson-gx-mmc", "amlogic,meson-gxbb-mmc";
reg = <0x0 0x70000 0x0 0x800>;
interrupts = <GIC_SPI 216 IRQ_TYPE_EDGE_RISING>;
interrupts = <GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
sd_emmc_b: mmc@72000 {
compatible = "amlogic,meson-gx-mmc", "amlogic,meson-gxbb-mmc";
reg = <0x0 0x72000 0x0 0x800>;
interrupts = <GIC_SPI 217 IRQ_TYPE_EDGE_RISING>;
interrupts = <GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
sd_emmc_c: mmc@74000 {
compatible = "amlogic,meson-gx-mmc", "amlogic,meson-gxbb-mmc";
reg = <0x0 0x74000 0x0 0x800>;
interrupts = <GIC_SPI 218 IRQ_TYPE_EDGE_RISING>;
interrupts = <GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
};

View File

@ -2146,7 +2146,7 @@
};
vdosys0: syscon@1c01a000 {
compatible = "mediatek,mt8195-mmsys", "syscon";
compatible = "mediatek,mt8195-vdosys0", "mediatek,mt8195-mmsys", "syscon";
reg = <0 0x1c01a000 0 0x1000>;
mboxes = <&gce0 0 CMDQ_THR_PRIO_4>;
#clock-cells = <1>;
@ -2292,7 +2292,7 @@
};
vdosys1: syscon@1c100000 {
compatible = "mediatek,mt8195-mmsys", "syscon";
compatible = "mediatek,mt8195-vdosys1", "syscon";
reg = <0 0x1c100000 0 0x1000>;
#clock-cells = <1>;
};

View File

@ -96,7 +96,6 @@
linux,default-trigger = "heartbeat";
gpios = <&rk805 1 GPIO_ACTIVE_LOW>;
default-state = "on";
mode = <0x23>;
};
user_led: led-1 {
@ -104,7 +103,6 @@
linux,default-trigger = "mmc1";
gpios = <&rk805 0 GPIO_ACTIVE_LOW>;
default-state = "off";
mode = <0x05>;
};
};
};

View File

@ -111,7 +111,7 @@
};
};
dmc_opp_table: dmc_opp_table {
dmc_opp_table: opp-table-3 {
compatible = "operating-points-v2";
opp00 {

View File

@ -104,6 +104,13 @@
};
};
&cpu_alert0 {
temperature = <65000>;
};
&cpu_alert1 {
temperature = <68000>;
};
&cpu_l0 {
cpu-supply = <&vdd_cpu_l>;
};

View File

@ -589,7 +589,7 @@
clocks = <&cru HCLK_M_CRYPTO0>, <&cru HCLK_S_CRYPTO0>, <&cru SCLK_CRYPTO0>;
clock-names = "hclk_master", "hclk_slave", "sclk";
resets = <&cru SRST_CRYPTO0>, <&cru SRST_CRYPTO0_S>, <&cru SRST_CRYPTO0_M>;
reset-names = "master", "lave", "crypto";
reset-names = "master", "slave", "crypto-rst";
};
crypto1: crypto@ff8b8000 {
@ -599,7 +599,7 @@
clocks = <&cru HCLK_M_CRYPTO1>, <&cru HCLK_S_CRYPTO1>, <&cru SCLK_CRYPTO1>;
clock-names = "hclk_master", "hclk_slave", "sclk";
resets = <&cru SRST_CRYPTO1>, <&cru SRST_CRYPTO1_S>, <&cru SRST_CRYPTO1_M>;
reset-names = "master", "slave", "crypto";
reset-names = "master", "slave", "crypto-rst";
};
i2c1: i2c@ff110000 {
@ -2241,13 +2241,11 @@
pcfg_input_pull_up: pcfg-input-pull-up {
input-enable;
bias-pull-up;
drive-strength = <2>;
};
pcfg_input_pull_down: pcfg-input-pull-down {
input-enable;
bias-pull-down;
drive-strength = <2>;
};
clock {

View File

@ -353,6 +353,17 @@
};
};
&pmu_io_domains {
pmuio2-supply = <&vcc_3v3>;
vccio1-supply = <&vcc_3v3>;
vccio3-supply = <&vcc_3v3>;
vccio4-supply = <&vcca_1v8>;
vccio5-supply = <&vcc_3v3>;
vccio6-supply = <&vcca_1v8>;
vccio7-supply = <&vcc_3v3>;
status = "okay";
};
&pwm0 {
status = "okay";
};

View File

@ -571,6 +571,8 @@
};
&i2s1_8ch {
pinctrl-names = "default";
pinctrl-0 = <&i2s1m0_sclktx &i2s1m0_lrcktx &i2s1m0_sdi0 &i2s1m0_sdo0>;
rockchip,trcm-sync-tx-only;
status = "okay";
};
@ -730,14 +732,13 @@
disable-wp;
pinctrl-names = "default";
pinctrl-0 = <&sdmmc0_bus4 &sdmmc0_clk &sdmmc0_cmd &sdmmc0_det>;
sd-uhs-sdr104;
sd-uhs-sdr50;
vmmc-supply = <&vcc3v3_sd>;
vqmmc-supply = <&vccio_sd>;
status = "okay";
};
&sdmmc2 {
supports-sdio;
bus-width = <4>;
disable-wp;
cap-sd-highspeed;

View File

@ -966,6 +966,7 @@
clock-names = "aclk_mst", "aclk_slv",
"aclk_dbi", "pclk", "aux";
device_type = "pci";
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 7>;
interrupt-map = <0 0 0 1 &pcie_intc 0>,
<0 0 0 2 &pcie_intc 1>,

View File

@ -163,7 +163,6 @@ config PPC
select ARCH_WANT_IRQS_OFF_ACTIVATE_MM
select ARCH_WANT_LD_ORPHAN_WARN
select ARCH_WANTS_MODULES_DATA_IN_VMALLOC if PPC_BOOK3S_32 || PPC_8xx
select ARCH_WANTS_NO_INSTR
select ARCH_WEAK_RELEASE_ACQUIRE
select BINFMT_ELF
select BUILDTIME_TABLE_SORT

View File

@ -50,16 +50,18 @@ static inline bool exit_must_hard_disable(void)
*/
static notrace __always_inline bool prep_irq_for_enabled_exit(bool restartable)
{
bool must_hard_disable = (exit_must_hard_disable() || !restartable);
/* This must be done with RI=1 because tracing may touch vmaps */
trace_hardirqs_on();
if (exit_must_hard_disable() || !restartable)
if (must_hard_disable)
__hard_EE_RI_disable();
#ifdef CONFIG_PPC64
/* This pattern matches prep_irq_for_idle */
if (unlikely(lazy_irq_pending_nocheck())) {
if (exit_must_hard_disable() || !restartable) {
if (must_hard_disable) {
local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
__hard_RI_enable();
}

View File

@ -26,6 +26,7 @@
#include <asm/firmware.h>
#include <asm/kexec_ranges.h>
#include <asm/crashdump-ppc64.h>
#include <asm/mmzone.h>
#include <asm/prom.h>
struct umem_info {

View File

@ -721,6 +721,10 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
page_table_check_pmd_set(vma->vm_mm, address, pmdp, pmd);
return __pmd(atomic_long_xchg((atomic_long_t *)pmdp, pmd_val(pmd)));
}
#define pmdp_collapse_flush pmdp_collapse_flush
extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
/*

View File

@ -65,16 +65,18 @@ static bool __kprobes arch_check_kprobe(struct kprobe *p)
int __kprobes arch_prepare_kprobe(struct kprobe *p)
{
unsigned long probe_addr = (unsigned long)p->addr;
u16 *insn = (u16 *)p->addr;
if (probe_addr & 0x1)
if ((unsigned long)insn & 0x1)
return -EILSEQ;
if (!arch_check_kprobe(p))
return -EILSEQ;
/* copy instruction */
p->opcode = *p->addr;
p->opcode = (kprobe_opcode_t)(*insn++);
if (GET_INSN_LENGTH(p->opcode) == 4)
p->opcode |= (kprobe_opcode_t)(*insn) << 16;
/* decode instruction */
switch (riscv_probe_decode_insn(p->addr, &p->ainsn.api)) {

View File

@ -32,6 +32,7 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
fp = (unsigned long)__builtin_frame_address(0);
sp = current_stack_pointer;
pc = (unsigned long)walk_stackframe;
level = -1;
} else {
/* task blocked in __switch_to */
fp = task->thread.s[0];
@ -43,7 +44,7 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
unsigned long low, high;
struct stackframe *frame;
if (unlikely(!__kernel_text_address(pc) || (level++ >= 1 && !fn(arg, pc))))
if (unlikely(!__kernel_text_address(pc) || (level++ >= 0 && !fn(arg, pc))))
break;
/* Validate frame pointer */

View File

@ -90,8 +90,10 @@ void flush_icache_pte(pte_t pte)
if (PageHuge(page))
page = compound_head(page);
if (!test_and_set_bit(PG_dcache_clean, &page->flags))
if (!test_bit(PG_dcache_clean, &page->flags)) {
flush_icache_all();
set_bit(PG_dcache_clean, &page->flags);
}
}
#endif /* CONFIG_MMU */

View File

@ -81,3 +81,23 @@ int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
}
#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp)
{
pmd_t pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
VM_BUG_ON(pmd_trans_huge(*pmdp));
/*
* When leaf PTE entries (regular pages) are collapsed into a leaf
* PMD entry (huge page), a valid non-leaf PTE is converted into a
* valid leaf PTE at the level 1 page table. Since the sfence.vma
* forms that specify an address only apply to leaf PTEs, we need a
* global flush here. collapse_huge_page() assumes these flushes are
* eager, so just do the fence here.
*/
flush_tlb_mm(vma->vm_mm);
return pmd;
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */

View File

@ -123,6 +123,8 @@
#define INTEL_FAM6_METEORLAKE 0xAC
#define INTEL_FAM6_METEORLAKE_L 0xAA
#define INTEL_FAM6_LUNARLAKE_M 0xBD
/* "Small Core" Processors (Atom/E-Core) */
#define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */

View File

@ -625,7 +625,7 @@ static int prepare_emulation(struct kprobe *p, struct insn *insn)
/* 1 byte conditional jump */
p->ainsn.emulate_op = kprobe_emulate_jcc;
p->ainsn.jcc.type = opcode & 0xf;
p->ainsn.rel32 = *(char *)insn->immediate.bytes;
p->ainsn.rel32 = insn->immediate.value;
break;
case 0x0f:
opcode = insn->opcode.bytes[1];

View File

@ -3297,8 +3297,8 @@ void acpi_nfit_shutdown(void *data)
mutex_lock(&acpi_desc->init_mutex);
set_bit(ARS_CANCEL, &acpi_desc->scrub_flags);
cancel_delayed_work_sync(&acpi_desc->dwork);
mutex_unlock(&acpi_desc->init_mutex);
cancel_delayed_work_sync(&acpi_desc->dwork);
/*
* Bounce the nvdimm bus lock to make sure any in-flight

View File

@ -58,7 +58,7 @@ jz4760_cgu_calc_m_n_od(const struct ingenic_cgu_pll_info *pll_info,
unsigned long rate, unsigned long parent_rate,
unsigned int *pm, unsigned int *pn, unsigned int *pod)
{
unsigned int m, n, od, m_max = (1 << pll_info->m_bits) - 2;
unsigned int m, n, od, m_max = (1 << pll_info->m_bits) - 1;
/* The frequency after the N divider must be between 1 and 50 MHz. */
n = parent_rate / (1 * MHZ);
@ -66,19 +66,17 @@ jz4760_cgu_calc_m_n_od(const struct ingenic_cgu_pll_info *pll_info,
/* The N divider must be >= 2. */
n = clamp_val(n, 2, 1 << pll_info->n_bits);
for (;; n >>= 1) {
od = (unsigned int)-1;
rate /= MHZ;
parent_rate /= MHZ;
do {
m = (rate / MHZ) * (1 << ++od) * n / (parent_rate / MHZ);
} while ((m > m_max || m & 1) && (od < 4));
if (od < 4 && m >= 4 && m <= m_max)
break;
for (m = m_max; m >= m_max && n >= 2; n--) {
m = rate * n / parent_rate;
od = m & 1;
m <<= od;
}
*pm = m;
*pn = n;
*pn = n + 1;
*pod = 1 << od;
}

View File

@ -164,12 +164,11 @@ static int mpfs_ccc_register_outputs(struct device *dev, struct mpfs_ccc_out_hw_
for (unsigned int i = 0; i < num_clks; i++) {
struct mpfs_ccc_out_hw_clock *out_hw = &out_hws[i];
char *name = devm_kzalloc(dev, 23, GFP_KERNEL);
char *name = devm_kasprintf(dev, GFP_KERNEL, "%s_out%u", parent->name, i);
if (!name)
return -ENOMEM;
snprintf(name, 23, "%s_out%u", parent->name, i);
out_hw->divider.hw.init = CLK_HW_INIT_HW(name, &parent->hw, &clk_divider_ops, 0);
out_hw->divider.reg = data->pll_base[i / MPFS_CCC_OUTPUTS_PER_PLL] +
out_hw->reg_offset;
@ -201,14 +200,13 @@ static int mpfs_ccc_register_plls(struct device *dev, struct mpfs_ccc_pll_hw_clo
for (unsigned int i = 0; i < num_clks; i++) {
struct mpfs_ccc_pll_hw_clock *pll_hw = &pll_hws[i];
char *name = devm_kzalloc(dev, 18, GFP_KERNEL);
if (!name)
pll_hw->name = devm_kasprintf(dev, GFP_KERNEL, "ccc%s_pll%u",
strchrnul(dev->of_node->full_name, '@'), i);
if (!pll_hw->name)
return -ENOMEM;
pll_hw->base = data->pll_base[i];
snprintf(name, 18, "ccc%s_pll%u", strchrnul(dev->of_node->full_name, '@'), i);
pll_hw->name = (const char *)name;
pll_hw->hw.init = CLK_HW_INIT_PARENTS_DATA_FIXED_SIZE(pll_hw->name,
pll_hw->parents,
&mpfs_ccc_pll_ops, 0);

View File

@ -143,21 +143,6 @@ static unsigned long qcom_lmh_get_throttle_freq(struct qcom_cpufreq_data *data)
return lval * xo_rate;
}
/* Get the current frequency of the CPU (after throttling) */
static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
{
struct qcom_cpufreq_data *data;
struct cpufreq_policy *policy;
policy = cpufreq_cpu_get_raw(cpu);
if (!policy)
return 0;
data = policy->driver_data;
return qcom_lmh_get_throttle_freq(data) / HZ_PER_KHZ;
}
/* Get the frequency requested by the cpufreq core for the CPU */
static unsigned int qcom_cpufreq_get_freq(unsigned int cpu)
{
@ -179,6 +164,23 @@ static unsigned int qcom_cpufreq_get_freq(unsigned int cpu)
return policy->freq_table[index].frequency;
}
static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
{
struct qcom_cpufreq_data *data;
struct cpufreq_policy *policy;
policy = cpufreq_cpu_get_raw(cpu);
if (!policy)
return 0;
data = policy->driver_data;
if (data->throttle_irq >= 0)
return qcom_lmh_get_throttle_freq(data) / HZ_PER_KHZ;
return qcom_cpufreq_get_freq(cpu);
}
static unsigned int qcom_cpufreq_hw_fast_switch(struct cpufreq_policy *policy,
unsigned int target_freq)
{

View File

@ -131,7 +131,7 @@ static int cxl_region_decode_reset(struct cxl_region *cxlr, int count)
struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
struct cxl_port *iter = cxled_to_port(cxled);
struct cxl_ep *ep;
int rc;
int rc = 0;
while (!is_cxl_root(to_cxl_port(iter->dev.parent)))
iter = to_cxl_port(iter->dev.parent);
@ -143,7 +143,8 @@ static int cxl_region_decode_reset(struct cxl_region *cxlr, int count)
cxl_rr = cxl_rr_load(iter, cxlr);
cxld = cxl_rr->decoder;
rc = cxld->reset(cxld);
if (cxld->reset)
rc = cxld->reset(cxld);
if (rc)
return rc;
}
@ -186,7 +187,8 @@ static int cxl_region_decode_commit(struct cxl_region *cxlr)
iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) {
cxl_rr = cxl_rr_load(iter, cxlr);
cxld = cxl_rr->decoder;
cxld->reset(cxld);
if (cxld->reset)
cxld->reset(cxld);
}
cxled->cxld.reset(&cxled->cxld);
@ -991,10 +993,10 @@ static int cxl_port_setup_targets(struct cxl_port *port,
int i, distance;
/*
* Passthrough ports impose no distance requirements between
* Passthrough decoders impose no distance requirements between
* peers
*/
if (port->nr_dports == 1)
if (cxl_rr->nr_targets == 1)
distance = 0;
else
distance = p->nr_targets / cxl_rr->nr_targets;

View File

@ -475,7 +475,7 @@ EXPORT_SYMBOL_GPL(put_dax);
/**
* dax_holder() - obtain the holder of a dax device
* @dax_dev: a dax_device instance
*
* Return: the holder's data which represents the holder if registered,
* otherwize NULL.
*/

View File

@ -19,10 +19,13 @@ static bool system_needs_vamap(void)
const u8 *type1_family = efi_get_smbios_string(1, family);
/*
* Ampere Altra machines crash in SetTime() if SetVirtualAddressMap()
* has not been called prior.
* Ampere eMAG, Altra, and Altra Max machines crash in SetTime() if
* SetVirtualAddressMap() has not been called prior.
*/
if (!type1_family || strcmp(type1_family, "Altra"))
if (!type1_family || (
strcmp(type1_family, "eMAG") &&
strcmp(type1_family, "Altra") &&
strcmp(type1_family, "Altra Max")))
return false;
efi_warn("Working around broken SetVirtualAddressMap()\n");

View File

@ -243,6 +243,7 @@ extern int amdgpu_num_kcq;
#define AMDGPU_VCNFW_LOG_SIZE (32 * 1024)
extern int amdgpu_vcnfw_log;
extern int amdgpu_sg_display;
#define AMDGPU_VM_MAX_NUM_CTX 4096
#define AMDGPU_SG_THRESHOLD (256*1024*1024)

View File

@ -1220,10 +1220,13 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
* next job actually sees the results from the previous one
* before we start executing on the same scheduler ring.
*/
if (!s_fence || s_fence->sched != sched)
if (!s_fence || s_fence->sched != sched) {
dma_fence_put(fence);
continue;
}
r = amdgpu_sync_fence(&p->gang_leader->explicit_sync, fence);
dma_fence_put(fence);
if (r)
return r;
}

View File

@ -186,6 +186,7 @@ int amdgpu_num_kcq = -1;
int amdgpu_smartshift_bias;
int amdgpu_use_xgmi_p2p = 1;
int amdgpu_vcnfw_log;
int amdgpu_sg_display = -1; /* auto */
static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work);
@ -931,6 +932,16 @@ module_param_named(num_kcq, amdgpu_num_kcq, int, 0444);
MODULE_PARM_DESC(vcnfw_log, "Enable vcnfw log(0 = disable (default value), 1 = enable)");
module_param_named(vcnfw_log, amdgpu_vcnfw_log, int, 0444);
/**
* DOC: sg_display (int)
* Disable S/G (scatter/gather) display (i.e., display from system memory).
* This option is only relevant on APUs. Set this option to 0 to disable
* S/G display if you experience flickering or other issues under memory
* pressure and report the issue.
*/
MODULE_PARM_DESC(sg_display, "S/G Display (-1 = auto (default), 0 = disable)");
module_param_named(sg_display, amdgpu_sg_display, int, 0444);
/**
* DOC: smu_pptable_id (int)
* Used to override pptable id. id = 0 use VBIOS pptable.

View File

@ -618,7 +618,13 @@ void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev)
if (!ring || !ring->fence_drv.initialized)
continue;
if (!ring->no_scheduler)
/*
* Notice we check for sched.ops since there's some
* override on the meaning of sched.ready by amdgpu.
* The natural check would be sched.ready, which is
* set as drm_sched_init() finishes...
*/
if (ring->sched.ops)
drm_sched_fini(&ring->sched);
for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)

View File

@ -295,7 +295,7 @@ struct amdgpu_ring {
#define amdgpu_ring_parse_cs(r, p, job, ib) ((r)->funcs->parse_cs((p), (job), (ib)))
#define amdgpu_ring_patch_cs_in_place(r, p, job, ib) ((r)->funcs->patch_cs_in_place((p), (job), (ib)))
#define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
#define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t))
#define amdgpu_ring_test_ib(r, t) ((r)->funcs->test_ib ? (r)->funcs->test_ib((r), (t)) : 0)
#define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
#define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
#define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))

View File

@ -974,7 +974,7 @@ int amdgpu_vm_ptes_update(struct amdgpu_vm_update_params *params,
trace_amdgpu_vm_update_ptes(params, frag_start, upd_end,
min(nptes, 32u), dst, incr,
upd_flags,
vm->task_info.pid,
vm->task_info.tgid,
vm->immediate.fence_context);
amdgpu_vm_pte_update_flags(params, to_amdgpu_bo_vm(pt),
cursor.level, pe_start, dst,

View File

@ -6877,7 +6877,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
.emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
.emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
.test_ring = gfx_v9_0_ring_test_ring,
.test_ib = gfx_v9_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_switch_buffer = gfx_v9_ring_emit_sb,

View File

@ -641,7 +641,9 @@ static int soc21_common_early_init(void *handle)
AMD_CG_SUPPORT_GFX_CGLS |
AMD_CG_SUPPORT_REPEATER_FGCG |
AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_HDP_SD;
AMD_CG_SUPPORT_HDP_SD |
AMD_CG_SUPPORT_ATHUB_MGCG |
AMD_CG_SUPPORT_ATHUB_LS;
adev->pg_flags = AMD_PG_SUPPORT_VCN |
AMD_PG_SUPPORT_VCN_DPG |
AMD_PG_SUPPORT_JPEG;

View File

@ -1184,24 +1184,38 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
memset(pa_config, 0, sizeof(*pa_config));
logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
if (adev->apu_flags & AMD_APU_IS_RAVEN2)
/*
* Raven2 has a HW issue that it is unable to use the vram which
* is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
* workaround that increase system aperture high address (add 1)
* to get rid of the VM fault and hardware hang.
*/
logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
else
logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
agp_base = 0;
agp_bot = adev->gmc.agp_start >> 24;
agp_top = adev->gmc.agp_end >> 24;
/* AGP aperture is disabled */
if (agp_bot == agp_top) {
logical_addr_low = adev->gmc.vram_start >> 18;
if (adev->apu_flags & AMD_APU_IS_RAVEN2)
/*
* Raven2 has a HW issue that it is unable to use the vram which
* is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
* workaround that increase system aperture high address (add 1)
* to get rid of the VM fault and hardware hang.
*/
logical_addr_high = (adev->gmc.fb_end >> 18) + 0x1;
else
logical_addr_high = adev->gmc.vram_end >> 18;
} else {
logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
if (adev->apu_flags & AMD_APU_IS_RAVEN2)
/*
* Raven2 has a HW issue that it is unable to use the vram which
* is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
* workaround that increase system aperture high address (add 1)
* to get rid of the VM fault and hardware hang.
*/
logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
else
logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
}
pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
@ -1503,6 +1517,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
case IP_VERSION(3, 0, 1):
case IP_VERSION(3, 1, 2):
case IP_VERSION(3, 1, 3):
case IP_VERSION(3, 1, 4):
case IP_VERSION(3, 1, 5):
case IP_VERSION(3, 1, 6):
init_data.flags.gpu_vm_support = true;
break;
@ -1511,6 +1527,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
}
break;
}
if (init_data.flags.gpu_vm_support &&
(amdgpu_sg_display == 0))
init_data.flags.gpu_vm_support = false;
if (init_data.flags.gpu_vm_support)
adev->mode_info.gpu_vm_support = true;

View File

@ -3626,7 +3626,7 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
(int)hubp->curs_attr.width || pos_cpy.x
<= (int)hubp->curs_attr.width +
pipe_ctx->plane_state->src_rect.x) {
pos_cpy.x = temp_x + viewport_width;
pos_cpy.x = 2 * viewport_width - temp_x;
}
}
} else {

View File

@ -1991,6 +1991,8 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
case IP_VERSION(9, 4, 2):
case IP_VERSION(10, 3, 0):
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 2):
*states = ATTR_STATE_SUPPORTED;
break;
default:

View File

@ -123,7 +123,8 @@
(1 << FEATURE_DS_FCLK_BIT) | \
(1 << FEATURE_DS_LCLK_BIT) | \
(1 << FEATURE_DS_DCFCLK_BIT) | \
(1 << FEATURE_DS_UCLK_BIT))
(1 << FEATURE_DS_UCLK_BIT) | \
(1ULL << FEATURE_DS_VCN_BIT))
//For use with feature control messages
typedef enum {
@ -522,9 +523,9 @@ typedef enum {
TEMP_HOTSPOT_M,
TEMP_MEM,
TEMP_VR_GFX,
TEMP_VR_SOC,
TEMP_VR_MEM0,
TEMP_VR_MEM1,
TEMP_VR_SOC,
TEMP_VR_U,
TEMP_LIQUID0,
TEMP_LIQUID1,

View File

@ -113,20 +113,21 @@
#define NUM_FEATURES 64
#define ALLOWED_FEATURE_CTRL_DEFAULT 0xFFFFFFFFFFFFFFFFULL
#define ALLOWED_FEATURE_CTRL_SCPM (1 << FEATURE_DPM_GFXCLK_BIT) | \
(1 << FEATURE_DPM_GFX_POWER_OPTIMIZER_BIT) | \
(1 << FEATURE_DPM_UCLK_BIT) | \
(1 << FEATURE_DPM_FCLK_BIT) | \
(1 << FEATURE_DPM_SOCCLK_BIT) | \
(1 << FEATURE_DPM_MP0CLK_BIT) | \
(1 << FEATURE_DPM_LINK_BIT) | \
(1 << FEATURE_DPM_DCN_BIT) | \
(1 << FEATURE_DS_GFXCLK_BIT) | \
(1 << FEATURE_DS_SOCCLK_BIT) | \
(1 << FEATURE_DS_FCLK_BIT) | \
(1 << FEATURE_DS_LCLK_BIT) | \
(1 << FEATURE_DS_DCFCLK_BIT) | \
(1 << FEATURE_DS_UCLK_BIT)
#define ALLOWED_FEATURE_CTRL_SCPM ((1 << FEATURE_DPM_GFXCLK_BIT) | \
(1 << FEATURE_DPM_GFX_POWER_OPTIMIZER_BIT) | \
(1 << FEATURE_DPM_UCLK_BIT) | \
(1 << FEATURE_DPM_FCLK_BIT) | \
(1 << FEATURE_DPM_SOCCLK_BIT) | \
(1 << FEATURE_DPM_MP0CLK_BIT) | \
(1 << FEATURE_DPM_LINK_BIT) | \
(1 << FEATURE_DPM_DCN_BIT) | \
(1 << FEATURE_DS_GFXCLK_BIT) | \
(1 << FEATURE_DS_SOCCLK_BIT) | \
(1 << FEATURE_DS_FCLK_BIT) | \
(1 << FEATURE_DS_LCLK_BIT) | \
(1 << FEATURE_DS_DCFCLK_BIT) | \
(1 << FEATURE_DS_UCLK_BIT) | \
(1ULL << FEATURE_DS_VCN_BIT))
//For use with feature control messages
typedef enum {

View File

@ -28,11 +28,11 @@
#define SMU13_DRIVER_IF_VERSION_INV 0xFFFFFFFF
#define SMU13_DRIVER_IF_VERSION_YELLOW_CARP 0x04
#define SMU13_DRIVER_IF_VERSION_ALDE 0x08
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_0 0x34
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_0 0x37
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x07
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_5 0x04
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_10 0x32
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_7 0x35
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_7 0x37
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_10 0x1D
#define SMU13_MODE1_RESET_WAIT_TIME_IN_MS 500 //500ms

View File

@ -407,6 +407,9 @@ static int smu_v13_0_0_setup_pptable(struct smu_context *smu)
struct amdgpu_device *adev = smu->adev;
int ret = 0;
if (amdgpu_sriov_vf(smu->adev))
return 0;
ret = smu_v13_0_0_get_pptable_from_pmfw(smu,
&smu_table->power_play_table,
&smu_table->power_play_table_size);
@ -1257,6 +1260,9 @@ static int smu_v13_0_0_get_thermal_temperature_range(struct smu_context *smu,
table_context->power_play_table;
PPTable_t *pptable = smu->smu_table.driver_pptable;
if (amdgpu_sriov_vf(smu->adev))
return 0;
if (!range)
return -EINVAL;

View File

@ -124,6 +124,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_7_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(DFCstateControl, PPSMC_MSG_SetExternalClientDfCstateAllow, 0),
MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 0),
MSG_MAP(AllowGpo, PPSMC_MSG_SetGpoAllow, 0),
MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 0),
};
static struct cmn2asic_mapping smu_v13_0_7_clk_map[SMU_CLK_COUNT] = {

View File

@ -233,21 +233,17 @@ void drm_client_dev_restore(struct drm_device *dev)
static void drm_client_buffer_delete(struct drm_client_buffer *buffer)
{
struct drm_device *dev = buffer->client->dev;
if (buffer->gem) {
drm_gem_vunmap_unlocked(buffer->gem, &buffer->map);
drm_gem_object_put(buffer->gem);
}
if (buffer->handle)
drm_mode_destroy_dumb(dev, buffer->handle, buffer->client->file);
kfree(buffer);
}
static struct drm_client_buffer *
drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format)
drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height,
u32 format, u32 *handle)
{
const struct drm_format_info *info = drm_format_info(format);
struct drm_mode_create_dumb dumb_args = { };
@ -269,16 +265,15 @@ drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u
if (ret)
goto err_delete;
buffer->handle = dumb_args.handle;
buffer->pitch = dumb_args.pitch;
obj = drm_gem_object_lookup(client->file, dumb_args.handle);
if (!obj) {
ret = -ENOENT;
goto err_delete;
}
buffer->pitch = dumb_args.pitch;
buffer->gem = obj;
*handle = dumb_args.handle;
return buffer;
@ -365,7 +360,8 @@ static void drm_client_buffer_rmfb(struct drm_client_buffer *buffer)
}
static int drm_client_buffer_addfb(struct drm_client_buffer *buffer,
u32 width, u32 height, u32 format)
u32 width, u32 height, u32 format,
u32 handle)
{
struct drm_client_dev *client = buffer->client;
struct drm_mode_fb_cmd fb_req = { };
@ -377,7 +373,7 @@ static int drm_client_buffer_addfb(struct drm_client_buffer *buffer,
fb_req.depth = info->depth;
fb_req.width = width;
fb_req.height = height;
fb_req.handle = buffer->handle;
fb_req.handle = handle;
fb_req.pitch = buffer->pitch;
ret = drm_mode_addfb(client->dev, &fb_req, client->file);
@ -414,13 +410,24 @@ struct drm_client_buffer *
drm_client_framebuffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format)
{
struct drm_client_buffer *buffer;
u32 handle;
int ret;
buffer = drm_client_buffer_create(client, width, height, format);
buffer = drm_client_buffer_create(client, width, height, format,
&handle);
if (IS_ERR(buffer))
return buffer;
ret = drm_client_buffer_addfb(buffer, width, height, format);
ret = drm_client_buffer_addfb(buffer, width, height, format, handle);
/*
* The handle is only needed for creating the framebuffer, destroy it
* again to solve a circular dependency should anybody export the GEM
* object as DMA-buf. The framebuffer and our buffer structure are still
* holding references to the GEM object to prevent its destruction.
*/
drm_mode_destroy_dumb(client->dev, handle, client->file);
if (ret) {
drm_client_buffer_delete(buffer);
return ERR_PTR(ret);

View File

@ -2466,6 +2466,22 @@ static enum port dvo_port_to_port(struct drm_i915_private *i915,
dvo_port);
}
static enum port
dsi_dvo_port_to_port(struct drm_i915_private *i915, u8 dvo_port)
{
switch (dvo_port) {
case DVO_PORT_MIPIA:
return PORT_A;
case DVO_PORT_MIPIC:
if (DISPLAY_VER(i915) >= 11)
return PORT_B;
else
return PORT_C;
default:
return PORT_NONE;
}
}
static int parse_bdb_230_dp_max_link_rate(const int vbt_max_link_rate)
{
switch (vbt_max_link_rate) {
@ -3414,19 +3430,16 @@ bool intel_bios_is_dsi_present(struct drm_i915_private *i915,
dvo_port = child->dvo_port;
if (dvo_port == DVO_PORT_MIPIA ||
(dvo_port == DVO_PORT_MIPIB && DISPLAY_VER(i915) >= 11) ||
(dvo_port == DVO_PORT_MIPIC && DISPLAY_VER(i915) < 11)) {
if (port)
*port = dvo_port - DVO_PORT_MIPIA;
return true;
} else if (dvo_port == DVO_PORT_MIPIB ||
dvo_port == DVO_PORT_MIPIC ||
dvo_port == DVO_PORT_MIPID) {
if (dsi_dvo_port_to_port(i915, dvo_port) == PORT_NONE) {
drm_dbg_kms(&i915->drm,
"VBT has unsupported DSI port %c\n",
port_name(dvo_port - DVO_PORT_MIPIA));
continue;
}
if (port)
*port = dsi_dvo_port_to_port(i915, dvo_port);
return true;
}
return false;
@ -3511,7 +3524,7 @@ bool intel_bios_get_dsc_params(struct intel_encoder *encoder,
if (!(child->device_type & DEVICE_TYPE_MIPI_OUTPUT))
continue;
if (child->dvo_port - DVO_PORT_MIPIA == encoder->port) {
if (dsi_dvo_port_to_port(i915, child->dvo_port) == encoder->port) {
if (!devdata->dsc)
return false;

View File

@ -328,8 +328,20 @@ out_unlock:
return ret;
}
static int intelfb_dirty(struct drm_fb_helper *helper, struct drm_clip_rect *clip)
{
if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2))
return 0;
if (helper->fb->funcs->dirty)
return helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
return 0;
}
static const struct drm_fb_helper_funcs intel_fb_helper_funcs = {
.fb_probe = intelfb_create,
.fb_dirty = intelfb_dirty,
};
static void intel_fbdev_destroy(struct intel_fbdev *ifbdev)

View File

@ -1587,7 +1587,8 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
skl_check_wm_level(&wm->wm[level], ddb);
if (icl_need_wm1_wa(i915, plane_id) &&
level == 1 && wm->wm[0].enable) {
level == 1 && !wm->wm[level].enable &&
wm->wm[0].enable) {
wm->wm[level].blocks = wm->wm[0].blocks;
wm->wm[level].lines = wm->wm[0].lines;
wm->wm[level].ignore_lines = wm->wm[0].ignore_lines;

View File

@ -3483,6 +3483,13 @@ err_request:
eb.composite_fence :
&eb.requests[0]->fence);
if (unlikely(eb.gem_context->syncobj)) {
drm_syncobj_replace_fence(eb.gem_context->syncobj,
eb.composite_fence ?
eb.composite_fence :
&eb.requests[0]->fence);
}
if (out_fence) {
if (err == 0) {
fd_install(out_fence_fd, out_fence->file);
@ -3494,13 +3501,6 @@ err_request:
}
}
if (unlikely(eb.gem_context->syncobj)) {
drm_syncobj_replace_fence(eb.gem_context->syncobj,
eb.composite_fence ?
eb.composite_fence :
&eb.requests[0]->fence);
}
if (!out_fence && eb.composite_fence)
dma_fence_put(eb.composite_fence);

View File

@ -579,7 +579,7 @@ static int shmem_object_init(struct intel_memory_region *mem,
mapping_set_gfp_mask(mapping, mask);
GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
i915_gem_object_init(obj, &i915_gem_shmem_ops, &lock_class, 0);
i915_gem_object_init(obj, &i915_gem_shmem_ops, &lock_class, flags);
obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE;
obj->write_domain = I915_GEM_DOMAIN_CPU;
obj->read_domains = I915_GEM_DOMAIN_CPU;

View File

@ -126,7 +126,6 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
void __user *user_bo_handles = NULL;
struct virtio_gpu_object_array *buflist = NULL;
struct sync_file *sync_file;
int in_fence_fd = exbuf->fence_fd;
int out_fence_fd = -1;
void *buf;
uint64_t fence_ctx;
@ -152,13 +151,11 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
ring_idx = exbuf->ring_idx;
}
exbuf->fence_fd = -1;
virtio_gpu_create_context(dev, file);
if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_IN) {
struct dma_fence *in_fence;
in_fence = sync_file_get_fence(in_fence_fd);
in_fence = sync_file_get_fence(exbuf->fence_fd);
if (!in_fence)
return -EINVAL;

View File

@ -227,6 +227,7 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
cl_data->num_hid_devices = amd_mp2_get_sensor_num(privdata, &cl_data->sensor_idx[0]);
if (cl_data->num_hid_devices == 0)
return -ENODEV;
cl_data->is_any_sensor_enabled = false;
INIT_DELAYED_WORK(&cl_data->work, amd_sfh_work);
INIT_DELAYED_WORK(&cl_data->work_buffer, amd_sfh_work_buffer);
@ -287,6 +288,7 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
status = amd_sfh_wait_for_response
(privdata, cl_data->sensor_idx[i], SENSOR_ENABLED);
if (status == SENSOR_ENABLED) {
cl_data->is_any_sensor_enabled = true;
cl_data->sensor_sts[i] = SENSOR_ENABLED;
rc = amdtp_hid_probe(cl_data->cur_hid_dev, cl_data);
if (rc) {
@ -301,19 +303,26 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
cl_data->sensor_sts[i]);
goto cleanup;
}
} else {
cl_data->sensor_sts[i] = SENSOR_DISABLED;
dev_dbg(dev, "sid 0x%x (%s) status 0x%x\n",
cl_data->sensor_idx[i],
get_sensor_name(cl_data->sensor_idx[i]),
cl_data->sensor_sts[i]);
}
dev_dbg(dev, "sid 0x%x (%s) status 0x%x\n",
cl_data->sensor_idx[i], get_sensor_name(cl_data->sensor_idx[i]),
cl_data->sensor_sts[i]);
}
if (mp2_ops->discovery_status && mp2_ops->discovery_status(privdata) == 0) {
if (!cl_data->is_any_sensor_enabled ||
(mp2_ops->discovery_status && mp2_ops->discovery_status(privdata) == 0)) {
amd_sfh_hid_client_deinit(privdata);
for (i = 0; i < cl_data->num_hid_devices; i++) {
devm_kfree(dev, cl_data->feature_report[i]);
devm_kfree(dev, in_data->input_report[i]);
devm_kfree(dev, cl_data->report_descr[i]);
}
dev_warn(dev, "Failed to discover, sensors not enabled\n");
dev_warn(dev, "Failed to discover, sensors not enabled is %d\n", cl_data->is_any_sensor_enabled);
return -EOPNOTSUPP;
}
schedule_delayed_work(&cl_data->work_buffer, msecs_to_jiffies(AMD_SFH_IDLE_LOOP));

View File

@ -32,6 +32,7 @@ struct amd_input_data {
struct amdtp_cl_data {
u8 init_done;
u32 cur_hid_dev;
bool is_any_sensor_enabled;
u32 hid_dev_count;
u32 num_hid_devices;
struct device_info *hid_devices;

View File

@ -1202,6 +1202,7 @@ int hid_open_report(struct hid_device *device)
__u8 *end;
__u8 *next;
int ret;
int i;
static int (*dispatch_type[])(struct hid_parser *parser,
struct hid_item *item) = {
hid_parser_main,
@ -1252,6 +1253,8 @@ int hid_open_report(struct hid_device *device)
goto err;
}
device->collection_size = HID_DEFAULT_NUM_COLLECTIONS;
for (i = 0; i < HID_DEFAULT_NUM_COLLECTIONS; i++)
device->collection[i].parent_idx = -1;
ret = -EINVAL;
while ((next = fetch_item(start, end, &item)) != NULL) {

View File

@ -12,6 +12,7 @@
* Copyright (c) 2017 Alex Manoussakis <amanou@gnu.org>
* Copyright (c) 2017 Tomasz Kramkowski <tk@the-tk.com>
* Copyright (c) 2020 YOSHIOKA Takuma <lo48576@hard-wi.red>
* Copyright (c) 2022 Takahiro Fujii <fujii@xaxxi.net>
*/
/*
@ -89,7 +90,7 @@ static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
case USB_DEVICE_ID_ELECOM_M_DT1URBK:
case USB_DEVICE_ID_ELECOM_M_DT1DRBK:
case USB_DEVICE_ID_ELECOM_M_HT1URBK:
case USB_DEVICE_ID_ELECOM_M_HT1DRBK:
case USB_DEVICE_ID_ELECOM_M_HT1DRBK_010D:
/*
* Report descriptor format:
* 12: button bit count
@ -99,6 +100,16 @@ static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
*/
mouse_button_fixup(hdev, rdesc, *rsize, 12, 30, 14, 20, 8);
break;
case USB_DEVICE_ID_ELECOM_M_HT1DRBK_011C:
/*
* Report descriptor format:
* 22: button bit count
* 30: padding bit count
* 24: button report size
* 16: button usage maximum
*/
mouse_button_fixup(hdev, rdesc, *rsize, 22, 30, 24, 16, 8);
break;
}
return rdesc;
}
@ -112,7 +123,8 @@ static const struct hid_device_id elecom_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_DT1URBK) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_DT1DRBK) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1URBK) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1DRBK) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1DRBK_010D) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1DRBK_011C) },
{ }
};
MODULE_DEVICE_TABLE(hid, elecom_devices);

View File

@ -413,6 +413,8 @@
#define I2C_DEVICE_ID_HP_ENVY_X360_15T_DR100 0x29CF
#define I2C_DEVICE_ID_HP_ENVY_X360_EU0009NV 0x2CF9
#define I2C_DEVICE_ID_HP_SPECTRE_X360_15 0x2817
#define I2C_DEVICE_ID_HP_SPECTRE_X360_13_AW0020NG 0x29DF
#define I2C_DEVICE_ID_ASUS_TP420IA_TOUCHSCREEN 0x2BC8
#define USB_DEVICE_ID_ASUS_UX550VE_TOUCHSCREEN 0x2544
#define USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN 0x2706
#define I2C_DEVICE_ID_SURFACE_GO_TOUCHSCREEN 0x261A
@ -428,7 +430,8 @@
#define USB_DEVICE_ID_ELECOM_M_DT1URBK 0x00fe
#define USB_DEVICE_ID_ELECOM_M_DT1DRBK 0x00ff
#define USB_DEVICE_ID_ELECOM_M_HT1URBK 0x010c
#define USB_DEVICE_ID_ELECOM_M_HT1DRBK 0x010d
#define USB_DEVICE_ID_ELECOM_M_HT1DRBK_010D 0x010d
#define USB_DEVICE_ID_ELECOM_M_HT1DRBK_011C 0x011c
#define USB_VENDOR_ID_DREAM_CHEEKY 0x1d34
#define USB_DEVICE_ID_DREAM_CHEEKY_WN 0x0004

View File

@ -370,6 +370,8 @@ static const struct hid_device_id hid_battery_quirks[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH,
USB_DEVICE_ID_LOGITECH_DINOVO_EDGE_KBD),
HID_BATTERY_QUIRK_IGNORE },
{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_ASUS_TP420IA_TOUCHSCREEN),
HID_BATTERY_QUIRK_IGNORE },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN),
HID_BATTERY_QUIRK_IGNORE },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550VE_TOUCHSCREEN),
@ -384,6 +386,8 @@ static const struct hid_device_id hid_battery_quirks[] = {
HID_BATTERY_QUIRK_IGNORE },
{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_SPECTRE_X360_15),
HID_BATTERY_QUIRK_IGNORE },
{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_SPECTRE_X360_13_AW0020NG),
HID_BATTERY_QUIRK_IGNORE },
{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_SURFACE_GO_TOUCHSCREEN),
HID_BATTERY_QUIRK_IGNORE },
{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_SURFACE_GO2_TOUCHSCREEN),

View File

@ -3978,7 +3978,8 @@ static void hidpp_connect_event(struct hidpp_device *hidpp)
}
hidpp_initialize_battery(hidpp);
hidpp_initialize_hires_scroll(hidpp);
if (!hid_is_usb(hidpp->hid_dev))
hidpp_initialize_hires_scroll(hidpp);
/* forward current battery state */
if (hidpp->capabilities & HIDPP_CAPABILITY_HIDPP10_BATTERY) {

View File

@ -393,7 +393,8 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_DT1URBK) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_DT1DRBK) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1URBK) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1DRBK) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1DRBK_010D) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1DRBK_011C) },
#endif
#if IS_ENABLED(CONFIG_HID_ELO)
{ HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0009) },

View File

@ -26,8 +26,8 @@ int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
if (umem_dmabuf->sgt)
goto wait_fence;
sgt = dma_buf_map_attachment_unlocked(umem_dmabuf->attach,
DMA_BIDIRECTIONAL);
sgt = dma_buf_map_attachment(umem_dmabuf->attach,
DMA_BIDIRECTIONAL);
if (IS_ERR(sgt))
return PTR_ERR(sgt);
@ -103,8 +103,8 @@ void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf)
umem_dmabuf->last_sg_trim = 0;
}
dma_buf_unmap_attachment_unlocked(umem_dmabuf->attach, umem_dmabuf->sgt,
DMA_BIDIRECTIONAL);
dma_buf_unmap_attachment(umem_dmabuf->attach, umem_dmabuf->sgt,
DMA_BIDIRECTIONAL);
umem_dmabuf->sgt = NULL;
}

View File

@ -1318,12 +1318,15 @@ static int user_exp_rcv_setup(struct hfi1_filedata *fd, unsigned long arg,
addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
sizeof(tinfo.tidcnt)))
return -EFAULT;
ret = -EFAULT;
addr = arg + offsetof(struct hfi1_tid_info, length);
if (copy_to_user((void __user *)addr, &tinfo.length,
if (!ret && copy_to_user((void __user *)addr, &tinfo.length,
sizeof(tinfo.length)))
ret = -EFAULT;
if (ret)
hfi1_user_exp_rcv_invalid(fd, &tinfo);
}
return ret;

View File

@ -160,16 +160,11 @@ static void unpin_rcv_pages(struct hfi1_filedata *fd,
static int pin_rcv_pages(struct hfi1_filedata *fd, struct tid_user_buf *tidbuf)
{
int pinned;
unsigned int npages;
unsigned int npages = tidbuf->npages;
unsigned long vaddr = tidbuf->vaddr;
struct page **pages = NULL;
struct hfi1_devdata *dd = fd->uctxt->dd;
/* Get the number of pages the user buffer spans */
npages = num_user_pages(vaddr, tidbuf->length);
if (!npages)
return -EINVAL;
if (npages > fd->uctxt->expected_count) {
dd_dev_err(dd, "Expected buffer too big\n");
return -EINVAL;
@ -196,7 +191,6 @@ static int pin_rcv_pages(struct hfi1_filedata *fd, struct tid_user_buf *tidbuf)
return pinned;
}
tidbuf->pages = pages;
tidbuf->npages = npages;
fd->tid_n_pinned += pinned;
return pinned;
}
@ -274,6 +268,7 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
mutex_init(&tidbuf->cover_mutex);
tidbuf->vaddr = tinfo->vaddr;
tidbuf->length = tinfo->length;
tidbuf->npages = num_user_pages(tidbuf->vaddr, tidbuf->length);
tidbuf->psets = kcalloc(uctxt->expected_count, sizeof(*tidbuf->psets),
GFP_KERNEL);
if (!tidbuf->psets) {

View File

@ -1722,6 +1722,9 @@ static int irdma_add_mqh_4(struct irdma_device *iwdev,
continue;
idev = in_dev_get(ip_dev);
if (!idev)
continue;
in_dev_for_each_ifa_rtnl(ifa, idev) {
ibdev_dbg(&iwdev->ibdev,
"CM: Allocating child CM Listener forIP=%pI4, vlan_id=%d, MAC=%pM\n",

View File

@ -289,7 +289,7 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
/* IB ports start with 1, MANA Ethernet ports start with 0 */
port = ucmd.port;
if (ucmd.port > mc->num_ports)
if (port < 1 || port > mc->num_ports)
return -EINVAL;
if (attr->cap.max_send_wr > MAX_SEND_BUFFERS_PER_QUEUE) {

View File

@ -276,8 +276,8 @@ iter_chunk:
size = pa_end - pa_start + PAGE_SIZE;
usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x",
va_start, &pa_start, size, flags);
err = iommu_map(pd->domain, va_start, pa_start,
size, flags);
err = iommu_map_atomic(pd->domain, va_start,
pa_start, size, flags);
if (err) {
usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
va_start, &pa_start, size, err);
@ -293,8 +293,8 @@ iter_chunk:
size = pa - pa_start + PAGE_SIZE;
usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x\n",
va_start, &pa_start, size, flags);
err = iommu_map(pd->domain, va_start, pa_start,
size, flags);
err = iommu_map_atomic(pd->domain, va_start,
pa_start, size, flags);
if (err) {
usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
va_start, &pa_start, size, err);

View File

@ -2200,6 +2200,14 @@ int ipoib_intf_init(struct ib_device *hca, u32 port, const char *name,
rn->attach_mcast = ipoib_mcast_attach;
rn->detach_mcast = ipoib_mcast_detach;
rn->hca = hca;
rc = netif_set_real_num_tx_queues(dev, 1);
if (rc)
goto out;
rc = netif_set_real_num_rx_queues(dev, 1);
if (rc)
goto out;
}
priv->rn_ops = dev->netdev_ops;

View File

@ -312,9 +312,8 @@ void rtrs_srv_destroy_path_files(struct rtrs_srv_path *srv_path)
if (srv_path->kobj.state_in_sysfs) {
sysfs_remove_group(&srv_path->kobj, &rtrs_srv_path_attr_group);
kobject_del(&srv_path->kobj);
kobject_put(&srv_path->kobj);
rtrs_srv_destroy_once_sysfs_root_folders(srv_path);
}
rtrs_srv_destroy_once_sysfs_root_folders(srv_path);
}

View File

@ -76,7 +76,7 @@ void bond_debug_reregister(struct bonding *bond)
d = debugfs_rename(bonding_debug_root, bond->debug_dir,
bonding_debug_root, bond->dev->name);
if (d) {
if (!IS_ERR(d)) {
bond->debug_dir = d;
} else {
netdev_warn(bond->dev, "failed to reregister, so just unregister old one\n");

View File

@ -1309,14 +1309,26 @@ mt7530_port_set_vlan_aware(struct dsa_switch *ds, int port)
if (!priv->ports[port].pvid)
mt7530_rmw(priv, MT7530_PVC_P(port), ACC_FRM_MASK,
MT7530_VLAN_ACC_TAGGED);
}
/* Set the port as a user port which is to be able to recognize VID
* from incoming packets before fetching entry within the VLAN table.
*/
mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK | PVC_EG_TAG_MASK,
VLAN_ATTR(MT7530_VLAN_USER) |
PVC_EG_TAG(MT7530_VLAN_EG_DISABLED));
/* Set the port as a user port which is to be able to recognize
* VID from incoming packets before fetching entry within the
* VLAN table.
*/
mt7530_rmw(priv, MT7530_PVC_P(port),
VLAN_ATTR_MASK | PVC_EG_TAG_MASK,
VLAN_ATTR(MT7530_VLAN_USER) |
PVC_EG_TAG(MT7530_VLAN_EG_DISABLED));
} else {
/* Also set CPU ports to the "user" VLAN port attribute, to
* allow VLAN classification, but keep the EG_TAG attribute as
* "consistent" (i.o.w. don't change its value) for packets
* received by the switch from the CPU, so that tagged packets
* are forwarded to user ports as tagged, and untagged as
* untagged.
*/
mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK,
VLAN_ATTR(MT7530_VLAN_USER));
}
}
static void

View File

@ -4627,25 +4627,26 @@ static int init_reset_optional(struct platform_device *pdev)
if (ret)
return dev_err_probe(&pdev->dev, ret,
"failed to init SGMII PHY\n");
}
ret = zynqmp_pm_is_function_supported(PM_IOCTL, IOCTL_SET_GEM_CONFIG);
if (!ret) {
u32 pm_info[2];
ret = zynqmp_pm_is_function_supported(PM_IOCTL, IOCTL_SET_GEM_CONFIG);
if (!ret) {
u32 pm_info[2];
ret = of_property_read_u32_array(pdev->dev.of_node, "power-domains",
pm_info, ARRAY_SIZE(pm_info));
if (ret) {
dev_err(&pdev->dev, "Failed to read power management information\n");
goto err_out_phy_exit;
ret = of_property_read_u32_array(pdev->dev.of_node, "power-domains",
pm_info, ARRAY_SIZE(pm_info));
if (ret) {
dev_err(&pdev->dev, "Failed to read power management information\n");
goto err_out_phy_exit;
}
ret = zynqmp_pm_set_gem_config(pm_info[1], GEM_CONFIG_FIXED, 0);
if (ret)
goto err_out_phy_exit;
ret = zynqmp_pm_set_gem_config(pm_info[1], GEM_CONFIG_SGMII_MODE, 1);
if (ret)
goto err_out_phy_exit;
}
ret = zynqmp_pm_set_gem_config(pm_info[1], GEM_CONFIG_FIXED, 0);
if (ret)
goto err_out_phy_exit;
ret = zynqmp_pm_set_gem_config(pm_info[1], GEM_CONFIG_SGMII_MODE, 1);
if (ret)
goto err_out_phy_exit;
}
/* Fully reset controller at hardware level if mapped in device tree */

View File

@ -5524,7 +5524,7 @@ bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw)
* returned by the firmware is a 16 bit * value, but is indexed
* by [fls(speed) - 1]
*/
static const u32 ice_aq_to_link_speed[15] = {
static const u32 ice_aq_to_link_speed[] = {
SPEED_10, /* BIT(0) */
SPEED_100,
SPEED_1000,
@ -5536,10 +5536,6 @@ static const u32 ice_aq_to_link_speed[15] = {
SPEED_40000,
SPEED_50000,
SPEED_100000, /* BIT(10) */
0,
0,
0,
0 /* BIT(14) */
};
/**
@ -5550,5 +5546,8 @@ static const u32 ice_aq_to_link_speed[15] = {
*/
u32 ice_get_link_speed(u16 index)
{
if (index >= ARRAY_SIZE(ice_aq_to_link_speed))
return 0;
return ice_aq_to_link_speed[index];
}

View File

@ -5541,7 +5541,7 @@ static int __init ice_module_init(void)
pr_info("%s\n", ice_driver_string);
pr_info("%s\n", ice_copyright);
ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME);
ice_wq = alloc_workqueue("%s", 0, 0, KBUILD_MODNAME);
if (!ice_wq) {
pr_err("Failed to create workqueue\n");
return -ENOMEM;

View File

@ -5420,7 +5420,7 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
*/
status = ice_add_special_words(rinfo, lkup_exts, ice_is_dvm_ena(hw));
if (status)
goto err_free_lkup_exts;
goto err_unroll;
/* Group match words into recipes using preferred recipe grouping
* criteria.

View File

@ -1681,7 +1681,7 @@ ice_tc_forward_to_queue(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr,
struct ice_vsi *ch_vsi = NULL;
u16 queue = act->rx_queue;
if (queue > vsi->num_rxq) {
if (queue >= vsi->num_rxq) {
NL_SET_ERR_MSG_MOD(fltr->extack,
"Unable to add filter because specified queue is invalid");
return -EINVAL;

View File

@ -39,7 +39,7 @@ ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
return ice_sq_send_cmd(hw, &hw->mailboxq, &desc, msg, msglen, cd);
}
static const u32 ice_legacy_aq_to_vc_speed[15] = {
static const u32 ice_legacy_aq_to_vc_speed[] = {
VIRTCHNL_LINK_SPEED_100MB, /* BIT(0) */
VIRTCHNL_LINK_SPEED_100MB,
VIRTCHNL_LINK_SPEED_1GB,
@ -51,10 +51,6 @@ static const u32 ice_legacy_aq_to_vc_speed[15] = {
VIRTCHNL_LINK_SPEED_40GB,
VIRTCHNL_LINK_SPEED_40GB,
VIRTCHNL_LINK_SPEED_40GB,
VIRTCHNL_LINK_SPEED_UNKNOWN,
VIRTCHNL_LINK_SPEED_UNKNOWN,
VIRTCHNL_LINK_SPEED_UNKNOWN,
VIRTCHNL_LINK_SPEED_UNKNOWN /* BIT(14) */
};
/**
@ -71,21 +67,20 @@ static const u32 ice_legacy_aq_to_vc_speed[15] = {
*/
u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed)
{
u32 speed;
/* convert a BIT() value into an array index */
u32 index = fls(link_speed) - 1;
if (adv_link_support) {
/* convert a BIT() value into an array index */
speed = ice_get_link_speed(fls(link_speed) - 1);
} else {
if (adv_link_support)
return ice_get_link_speed(index);
else if (index < ARRAY_SIZE(ice_legacy_aq_to_vc_speed))
/* Virtchnl speeds are not defined for every speed supported in
* the hardware. To maintain compatibility with older AVF
* drivers, while reporting the speed the new speed values are
* resolved to the closest known virtchnl speeds
*/
speed = ice_legacy_aq_to_vc_speed[fls(link_speed) - 1];
}
return ice_legacy_aq_to_vc_speed[index];
return speed;
return VIRTCHNL_LINK_SPEED_UNKNOWN;
}
/* The mailbox overflow detection algorithm helps to check if there

View File

@ -44,13 +44,17 @@ void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi)
/* outer VLAN ops regardless of port VLAN config */
vlan_ops->add_vlan = ice_vsi_add_vlan;
vlan_ops->dis_rx_filtering = ice_vsi_dis_rx_vlan_filtering;
vlan_ops->ena_tx_filtering = ice_vsi_ena_tx_vlan_filtering;
vlan_ops->dis_tx_filtering = ice_vsi_dis_tx_vlan_filtering;
if (ice_vf_is_port_vlan_ena(vf)) {
/* setup outer VLAN ops */
vlan_ops->set_port_vlan = ice_vsi_set_outer_port_vlan;
/* all Rx traffic should be in the domain of the
* assigned port VLAN, so prevent disabling Rx VLAN
* filtering
*/
vlan_ops->dis_rx_filtering = noop_vlan;
vlan_ops->ena_rx_filtering =
ice_vsi_ena_rx_vlan_filtering;
@ -63,6 +67,9 @@ void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi)
vlan_ops->ena_insertion = ice_vsi_ena_inner_insertion;
vlan_ops->dis_insertion = ice_vsi_dis_inner_insertion;
} else {
vlan_ops->dis_rx_filtering =
ice_vsi_dis_rx_vlan_filtering;
if (!test_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags))
vlan_ops->ena_rx_filtering = noop_vlan;
else
@ -96,7 +103,14 @@ void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi)
vlan_ops->set_port_vlan = ice_vsi_set_inner_port_vlan;
vlan_ops->ena_rx_filtering =
ice_vsi_ena_rx_vlan_filtering;
/* all Rx traffic should be in the domain of the
* assigned port VLAN, so prevent disabling Rx VLAN
* filtering
*/
vlan_ops->dis_rx_filtering = noop_vlan;
} else {
vlan_ops->dis_rx_filtering =
ice_vsi_dis_rx_vlan_filtering;
if (!test_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags))
vlan_ops->ena_rx_filtering = noop_vlan;
else

View File

@ -2942,7 +2942,9 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
if (tx_buffer->next_to_watch &&
time_after(jiffies, tx_buffer->time_stamp +
(adapter->tx_timeout_factor * HZ)) &&
!(rd32(IGC_STATUS) & IGC_STATUS_TXOFF)) {
!(rd32(IGC_STATUS) & IGC_STATUS_TXOFF) &&
(rd32(IGC_TDH(tx_ring->reg_idx)) !=
readl(tx_ring->tail))) {
/* detected Tx unit hang */
netdev_err(tx_ring->netdev,
"Detected Tx Unit Hang\n"
@ -5068,6 +5070,24 @@ static int igc_change_mtu(struct net_device *netdev, int new_mtu)
return 0;
}
/**
* igc_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
* @txqueue: queue number that timed out
**/
static void igc_tx_timeout(struct net_device *netdev,
unsigned int __always_unused txqueue)
{
struct igc_adapter *adapter = netdev_priv(netdev);
struct igc_hw *hw = &adapter->hw;
/* Do the reset outside of interrupt context */
adapter->tx_timeout_count++;
schedule_work(&adapter->reset_task);
wr32(IGC_EICS,
(adapter->eims_enable_mask & ~adapter->eims_other));
}
/**
* igc_get_stats64 - Get System Network Statistics
* @netdev: network interface device structure
@ -5495,7 +5515,7 @@ static void igc_watchdog_task(struct work_struct *work)
case SPEED_100:
case SPEED_1000:
case SPEED_2500:
adapter->tx_timeout_factor = 7;
adapter->tx_timeout_factor = 1;
break;
}
@ -6320,6 +6340,7 @@ static const struct net_device_ops igc_netdev_ops = {
.ndo_set_rx_mode = igc_set_rx_mode,
.ndo_set_mac_address = igc_set_mac,
.ndo_change_mtu = igc_change_mtu,
.ndo_tx_timeout = igc_tx_timeout,
.ndo_get_stats64 = igc_get_stats64,
.ndo_fix_features = igc_fix_features,
.ndo_set_features = igc_set_features,

View File

@ -1570,8 +1570,8 @@ static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth,
if (IS_ERR(pp))
return pp;
err = __xdp_rxq_info_reg(xdp_q, &eth->dummy_dev, eth->rx_napi.napi_id,
id, PAGE_SIZE);
err = __xdp_rxq_info_reg(xdp_q, &eth->dummy_dev, id,
eth->rx_napi.napi_id, PAGE_SIZE);
if (err < 0)
goto err_free_pp;
@ -1870,7 +1870,9 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
while (done < budget) {
unsigned int pktlen, *rxdcsum;
bool has_hwaccel_tag = false;
struct net_device *netdev;
u16 vlan_proto, vlan_tci;
dma_addr_t dma_addr;
u32 hash, reason;
int mac = 0;
@ -2010,27 +2012,29 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
if (trxd.rxd3 & RX_DMA_VTAG_V2)
__vlan_hwaccel_put_tag(skb,
htons(RX_DMA_VPID(trxd.rxd4)),
RX_DMA_VID(trxd.rxd4));
if (trxd.rxd3 & RX_DMA_VTAG_V2) {
vlan_proto = RX_DMA_VPID(trxd.rxd4);
vlan_tci = RX_DMA_VID(trxd.rxd4);
has_hwaccel_tag = true;
}
} else if (trxd.rxd2 & RX_DMA_VTAG) {
__vlan_hwaccel_put_tag(skb, htons(RX_DMA_VPID(trxd.rxd3)),
RX_DMA_VID(trxd.rxd3));
vlan_proto = RX_DMA_VPID(trxd.rxd3);
vlan_tci = RX_DMA_VID(trxd.rxd3);
has_hwaccel_tag = true;
}
}
/* When using VLAN untagging in combination with DSA, the
* hardware treats the MTK special tag as a VLAN and untags it.
*/
if (skb_vlan_tag_present(skb) && netdev_uses_dsa(netdev)) {
unsigned int port = ntohs(skb->vlan_proto) & GENMASK(2, 0);
if (has_hwaccel_tag && netdev_uses_dsa(netdev)) {
unsigned int port = vlan_proto & GENMASK(2, 0);
if (port < ARRAY_SIZE(eth->dsa_meta) &&
eth->dsa_meta[port])
skb_dst_set_noref(skb, &eth->dsa_meta[port]->dst);
__vlan_hwaccel_clear_tag(skb);
} else if (has_hwaccel_tag) {
__vlan_hwaccel_put_tag(skb, htons(vlan_proto), vlan_tci);
}
skb_record_rx_queue(skb, 0);
@ -3111,7 +3115,7 @@ static void mtk_gdm_config(struct mtk_eth *eth, u32 config)
val |= config;
if (!i && eth->netdev[0] && netdev_uses_dsa(eth->netdev[0]))
if (eth->netdev[i] && netdev_uses_dsa(eth->netdev[i]))
val |= MTK_GDMA_SPECIAL_TAG;
mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
@ -3177,8 +3181,7 @@ static int mtk_open(struct net_device *dev)
struct mtk_eth *eth = mac->hw;
int i, err;
if ((mtk_uses_dsa(dev) && !eth->prog) &&
!(mac->id == 1 && MTK_HAS_CAPS(eth->soc->caps, MTK_GMAC1_TRGMII))) {
if (mtk_uses_dsa(dev) && !eth->prog) {
for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) {
struct metadata_dst *md_dst = eth->dsa_meta[i];
@ -3195,8 +3198,7 @@ static int mtk_open(struct net_device *dev)
}
} else {
/* Hardware special tag parsing needs to be disabled if at least
* one MAC does not use DSA, or the second MAC of the MT7621 and
* MT7623 SoCs is being used.
* one MAC does not use DSA.
*/
u32 val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
val &= ~MTK_CDMP_STAG_EN;

View File

@ -245,8 +245,9 @@ void mlx5_pages_debugfs_init(struct mlx5_core_dev *dev)
pages = dev->priv.dbg.pages_debugfs;
debugfs_create_u32("fw_pages_total", 0400, pages, &dev->priv.fw_pages);
debugfs_create_u32("fw_pages_vfs", 0400, pages, &dev->priv.vfs_pages);
debugfs_create_u32("fw_pages_host_pf", 0400, pages, &dev->priv.host_pf_pages);
debugfs_create_u32("fw_pages_vfs", 0400, pages, &dev->priv.page_counters[MLX5_VF]);
debugfs_create_u32("fw_pages_sfs", 0400, pages, &dev->priv.page_counters[MLX5_SF]);
debugfs_create_u32("fw_pages_host_pf", 0400, pages, &dev->priv.page_counters[MLX5_HOST_PF]);
debugfs_create_u32("fw_pages_alloc_failed", 0400, pages, &dev->priv.fw_pages_alloc_failed);
debugfs_create_u32("fw_pages_give_dropped", 0400, pages, &dev->priv.give_pages_dropped);
debugfs_create_u32("fw_pages_reclaim_discard", 0400, pages,

View File

@ -64,6 +64,7 @@ static int mlx5_query_mtrc_caps(struct mlx5_fw_tracer *tracer)
MLX5_GET(mtrc_cap, out, num_string_trace);
tracer->str_db.num_string_db = MLX5_GET(mtrc_cap, out, num_string_db);
tracer->owner = !!MLX5_GET(mtrc_cap, out, trace_owner);
tracer->str_db.loaded = false;
for (i = 0; i < tracer->str_db.num_string_db; i++) {
mtrc_cap_sp = MLX5_ADDR_OF(mtrc_cap, out, string_db_param[i]);
@ -756,6 +757,7 @@ static int mlx5_fw_tracer_set_mtrc_conf(struct mlx5_fw_tracer *tracer)
if (err)
mlx5_core_warn(dev, "FWTracer: Failed to set tracer configurations %d\n", err);
tracer->buff.consumer_index = 0;
return err;
}
@ -820,7 +822,6 @@ static void mlx5_fw_tracer_ownership_change(struct work_struct *work)
mlx5_core_dbg(tracer->dev, "FWTracer: ownership changed, current=(%d)\n", tracer->owner);
if (tracer->owner) {
tracer->owner = false;
tracer->buff.consumer_index = 0;
return;
}

View File

@ -87,7 +87,7 @@ void mlx5_ec_cleanup(struct mlx5_core_dev *dev)
mlx5_host_pf_cleanup(dev);
err = mlx5_wait_for_pages(dev, &dev->priv.host_pf_pages);
err = mlx5_wait_for_pages(dev, &dev->priv.page_counters[MLX5_HOST_PF]);
if (err)
mlx5_core_warn(dev, "Timeout reclaiming external host PF pages err(%d)\n", err);
}

View File

@ -438,10 +438,6 @@ static int mlx5_esw_bridge_switchdev_event(struct notifier_block *nb,
switch (event) {
case SWITCHDEV_FDB_ADD_TO_BRIDGE:
/* only handle the event on native eswtich of representor */
if (!mlx5_esw_bridge_is_local(dev, rep, esw))
break;
fdb_info = container_of(info,
struct switchdev_notifier_fdb_info,
info);

View File

@ -443,7 +443,7 @@ void mlx5e_enable_cvlan_filter(struct mlx5e_flow_steering *fs, bool promisc)
void mlx5e_disable_cvlan_filter(struct mlx5e_flow_steering *fs, bool promisc)
{
if (fs->vlan->cvlan_filter_disabled)
if (!fs->vlan || fs->vlan->cvlan_filter_disabled)
return;
fs->vlan->cvlan_filter_disabled = true;

View File

@ -591,7 +591,8 @@ static int mlx5e_init_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *param
rq->ix = c->ix;
rq->channel = c;
rq->mdev = mdev;
rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
rq->hw_mtu =
MLX5E_SW2HW_MTU(params, params->sw_mtu) - ETH_FCS_LEN * !params->scatter_fcs_en;
rq->xdpsq = &c->rq_xdpsq;
rq->stats = &c->priv->channel_stats[c->ix]->rq;
rq->ptp_cyc2time = mlx5_rq_ts_translator(mdev);
@ -1014,35 +1015,6 @@ int mlx5e_flush_rq(struct mlx5e_rq *rq, int curr_state)
return mlx5e_rq_to_ready(rq, curr_state);
}
static int mlx5e_modify_rq_scatter_fcs(struct mlx5e_rq *rq, bool enable)
{
struct mlx5_core_dev *mdev = rq->mdev;
void *in;
void *rqc;
int inlen;
int err;
inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY);
MLX5_SET64(modify_rq_in, in, modify_bitmask,
MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_SCATTER_FCS);
MLX5_SET(rqc, rqc, scatter_fcs, enable);
MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
err = mlx5_core_modify_rq(mdev, rq->rqn, in);
kvfree(in);
return err;
}
static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
{
struct mlx5_core_dev *mdev = rq->mdev;
@ -3314,20 +3286,6 @@ static void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
mlx5e_destroy_tises(priv);
}
static int mlx5e_modify_channels_scatter_fcs(struct mlx5e_channels *chs, bool enable)
{
int err = 0;
int i;
for (i = 0; i < chs->num; i++) {
err = mlx5e_modify_rq_scatter_fcs(&chs->c[i]->rq, enable);
if (err)
return err;
}
return 0;
}
static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd)
{
int err;
@ -3903,41 +3861,27 @@ static int mlx5e_set_rx_port_ts(struct mlx5_core_dev *mdev, bool enable)
return mlx5_set_ports_check(mdev, in, sizeof(in));
}
static int mlx5e_set_rx_port_ts_wrap(struct mlx5e_priv *priv, void *ctx)
{
struct mlx5_core_dev *mdev = priv->mdev;
bool enable = *(bool *)ctx;
return mlx5e_set_rx_port_ts(mdev, enable);
}
static int set_feature_rx_fcs(struct net_device *netdev, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_channels *chs = &priv->channels;
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_params new_params;
int err;
mutex_lock(&priv->state_lock);
if (enable) {
err = mlx5e_set_rx_port_ts(mdev, false);
if (err)
goto out;
chs->params.scatter_fcs_en = true;
err = mlx5e_modify_channels_scatter_fcs(chs, true);
if (err) {
chs->params.scatter_fcs_en = false;
mlx5e_set_rx_port_ts(mdev, true);
}
} else {
chs->params.scatter_fcs_en = false;
err = mlx5e_modify_channels_scatter_fcs(chs, false);
if (err) {
chs->params.scatter_fcs_en = true;
goto out;
}
err = mlx5e_set_rx_port_ts(mdev, true);
if (err) {
mlx5_core_warn(mdev, "Failed to set RX port timestamp %d\n", err);
err = 0;
}
}
out:
new_params = chs->params;
new_params.scatter_fcs_en = enable;
err = mlx5e_safe_switch_params(priv, &new_params, mlx5e_set_rx_port_ts_wrap,
&new_params.scatter_fcs_en, true);
mutex_unlock(&priv->state_lock);
return err;
}
@ -4074,6 +4018,10 @@ static netdev_features_t mlx5e_fix_uplink_rep_features(struct net_device *netdev
if (netdev->features & NETIF_F_GRO_HW)
netdev_warn(netdev, "Disabling HW_GRO, not supported in switchdev mode\n");
features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
if (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
netdev_warn(netdev, "Disabling HW_VLAN CTAG FILTERING, not supported in switchdev mode\n");
return features;
}

View File

@ -1715,7 +1715,7 @@ void mlx5_esw_bridge_fdb_update_used(struct net_device *dev, u16 vport_num, u16
struct mlx5_esw_bridge *bridge;
port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
if (!port || port->flags & MLX5_ESW_BRIDGE_PORT_FLAG_PEER)
if (!port)
return;
bridge = port->bridge;

View File

@ -189,16 +189,16 @@ static inline int mlx5_ptys_rate_enum_to_int(enum mlx5_ptys_rate rate)
}
}
static int mlx5i_get_speed_settings(u16 ib_link_width_oper, u16 ib_proto_oper)
static u32 mlx5i_get_speed_settings(u16 ib_link_width_oper, u16 ib_proto_oper)
{
int rate, width;
rate = mlx5_ptys_rate_enum_to_int(ib_proto_oper);
if (rate < 0)
return -EINVAL;
return SPEED_UNKNOWN;
width = mlx5_ptys_width_enum_to_int(ib_link_width_oper);
if (width < 0)
return -EINVAL;
return SPEED_UNKNOWN;
return rate * width;
}
@ -221,16 +221,13 @@ static int mlx5i_get_link_ksettings(struct net_device *netdev,
ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
speed = mlx5i_get_speed_settings(ib_link_width_oper, ib_proto_oper);
if (speed < 0)
return -EINVAL;
link_ksettings->base.speed = speed;
link_ksettings->base.duplex = speed == SPEED_UNKNOWN ? DUPLEX_UNKNOWN : DUPLEX_FULL;
link_ksettings->base.duplex = DUPLEX_FULL;
link_ksettings->base.port = PORT_OTHER;
link_ksettings->base.autoneg = AUTONEG_DISABLE;
link_ksettings->base.speed = speed;
return 0;
}

View File

@ -2110,7 +2110,7 @@ static int __init mlx5_init(void)
mlx5_core_verify_params();
mlx5_register_debugfs();
err = pci_register_driver(&mlx5_core_driver);
err = mlx5e_init();
if (err)
goto err_debug;
@ -2118,16 +2118,16 @@ static int __init mlx5_init(void)
if (err)
goto err_sf;
err = mlx5e_init();
err = pci_register_driver(&mlx5_core_driver);
if (err)
goto err_en;
goto err_pci;
return 0;
err_en:
err_pci:
mlx5_sf_driver_unregister();
err_sf:
pci_unregister_driver(&mlx5_core_driver);
mlx5e_cleanup();
err_debug:
mlx5_unregister_debugfs();
return err;
@ -2135,9 +2135,9 @@ err_debug:
static void __exit mlx5_cleanup(void)
{
mlx5e_cleanup();
mlx5_sf_driver_unregister();
pci_unregister_driver(&mlx5_core_driver);
mlx5_sf_driver_unregister();
mlx5e_cleanup();
mlx5_unregister_debugfs();
}

View File

@ -74,6 +74,14 @@ static u32 get_function(u16 func_id, bool ec_function)
return (u32)func_id | (ec_function << 16);
}
static u16 func_id_to_type(struct mlx5_core_dev *dev, u16 func_id, bool ec_function)
{
if (!func_id)
return mlx5_core_is_ecpf(dev) && !ec_function ? MLX5_HOST_PF : MLX5_PF;
return func_id <= mlx5_core_max_vfs(dev) ? MLX5_VF : MLX5_SF;
}
static struct rb_root *page_root_per_function(struct mlx5_core_dev *dev, u32 function)
{
struct rb_root *root;
@ -332,6 +340,7 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
int inlen = MLX5_ST_SZ_BYTES(manage_pages_in);
int notify_fail = event;
u16 func_type;
u64 addr;
int err;
u32 *in;
@ -383,11 +392,9 @@ retry:
goto out_dropped;
}
func_type = func_id_to_type(dev, func_id, ec_function);
dev->priv.page_counters[func_type] += npages;
dev->priv.fw_pages += npages;
if (func_id)
dev->priv.vfs_pages += npages;
else if (mlx5_core_is_ecpf(dev) && !ec_function)
dev->priv.host_pf_pages += npages;
mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x, err %d\n",
npages, ec_function, func_id, err);
@ -414,6 +421,7 @@ static void release_all_pages(struct mlx5_core_dev *dev, u16 func_id,
struct rb_root *root;
struct rb_node *p;
int npages = 0;
u16 func_type;
root = xa_load(&dev->priv.page_root_xa, function);
if (WARN_ON_ONCE(!root))
@ -428,11 +436,9 @@ static void release_all_pages(struct mlx5_core_dev *dev, u16 func_id,
free_fwp(dev, fwp, fwp->free_count);
}
func_type = func_id_to_type(dev, func_id, ec_function);
dev->priv.page_counters[func_type] -= npages;
dev->priv.fw_pages -= npages;
if (func_id)
dev->priv.vfs_pages -= npages;
else if (mlx5_core_is_ecpf(dev) && !ec_function)
dev->priv.host_pf_pages -= npages;
mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x\n",
npages, ec_function, func_id);
@ -498,6 +504,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
int outlen = MLX5_ST_SZ_BYTES(manage_pages_out);
u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {};
int num_claimed;
u16 func_type;
u32 *out;
int err;
int i;
@ -549,11 +556,9 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
if (nclaimed)
*nclaimed = num_claimed;
func_type = func_id_to_type(dev, func_id, ec_function);
dev->priv.page_counters[func_type] -= num_claimed;
dev->priv.fw_pages -= num_claimed;
if (func_id)
dev->priv.vfs_pages -= num_claimed;
else if (mlx5_core_is_ecpf(dev) && !ec_function)
dev->priv.host_pf_pages -= num_claimed;
out_free:
kvfree(out);
@ -706,12 +711,12 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
WARN(dev->priv.fw_pages,
"FW pages counter is %d after reclaiming all pages\n",
dev->priv.fw_pages);
WARN(dev->priv.vfs_pages,
WARN(dev->priv.page_counters[MLX5_VF],
"VFs FW pages counter is %d after reclaiming all pages\n",
dev->priv.vfs_pages);
WARN(dev->priv.host_pf_pages,
dev->priv.page_counters[MLX5_VF]);
WARN(dev->priv.page_counters[MLX5_HOST_PF],
"External host PF FW pages counter is %d after reclaiming all pages\n",
dev->priv.host_pf_pages);
dev->priv.page_counters[MLX5_HOST_PF]);
return 0;
}

View File

@ -147,7 +147,7 @@ mlx5_device_disable_sriov(struct mlx5_core_dev *dev, int num_vfs, bool clear_vf)
mlx5_eswitch_disable_sriov(dev->priv.eswitch, clear_vf);
if (mlx5_wait_for_pages(dev, &dev->priv.vfs_pages))
if (mlx5_wait_for_pages(dev, &dev->priv.page_counters[MLX5_VF]))
mlx5_core_warn(dev, "timeout reclaiming VFs pages\n");
}

View File

@ -1138,12 +1138,14 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
rule->flow_source))
return 0;
mlx5dr_domain_nic_lock(nic_dmn);
ret = mlx5dr_matcher_select_builders(matcher,
nic_matcher,
dr_rule_get_ipv(&param->outer),
dr_rule_get_ipv(&param->inner));
if (ret)
return ret;
goto err_unlock;
hw_ste_arr_is_opt = nic_matcher->num_of_builders <= DR_RULE_MAX_STES_OPTIMIZED;
if (likely(hw_ste_arr_is_opt)) {
@ -1152,12 +1154,12 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
hw_ste_arr = kzalloc((nic_matcher->num_of_builders + DR_ACTION_MAX_STES) *
DR_STE_SIZE, GFP_KERNEL);
if (!hw_ste_arr)
return -ENOMEM;
if (!hw_ste_arr) {
ret = -ENOMEM;
goto err_unlock;
}
}
mlx5dr_domain_nic_lock(nic_dmn);
ret = mlx5dr_matcher_add_to_tbl_nic(dmn, nic_matcher);
if (ret)
goto free_hw_ste;
@ -1223,7 +1225,10 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
mlx5dr_domain_nic_unlock(nic_dmn);
goto out;
if (unlikely(!hw_ste_arr_is_opt))
kfree(hw_ste_arr);
return 0;
free_rule:
dr_rule_clean_rule_members(rule, nic_rule);
@ -1238,12 +1243,12 @@ remove_from_nic_tbl:
mlx5dr_matcher_remove_from_tbl_nic(dmn, nic_matcher);
free_hw_ste:
mlx5dr_domain_nic_unlock(nic_dmn);
out:
if (unlikely(!hw_ste_arr_is_opt))
if (!hw_ste_arr_is_opt)
kfree(hw_ste_arr);
err_unlock:
mlx5dr_domain_nic_unlock(nic_dmn);
return ret;
}

View File

@ -633,7 +633,7 @@ int sparx5_ptp_init(struct sparx5 *sparx5)
/* Enable master counters */
spx5_wr(PTP_PTP_DOM_CFG_PTP_ENA_SET(0x7), sparx5, PTP_PTP_DOM_CFG);
for (i = 0; i < sparx5->port_count; i++) {
for (i = 0; i < SPX5_PORTS; i++) {
port = sparx5->ports[i];
if (!port)
continue;
@ -649,7 +649,7 @@ void sparx5_ptp_deinit(struct sparx5 *sparx5)
struct sparx5_port *port;
int i;
for (i = 0; i < sparx5->port_count; i++) {
for (i = 0; i < SPX5_PORTS; i++) {
port = sparx5->ports[i];
if (!port)
continue;

View File

@ -1217,9 +1217,7 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
unsigned int max_queues_per_port = num_online_cpus();
struct gdma_context *gc = pci_get_drvdata(pdev);
struct gdma_irq_context *gic;
unsigned int max_irqs;
u16 *cpus;
cpumask_var_t req_mask;
unsigned int max_irqs, cpu;
int nvec, irq;
int err, i = 0, j;
@ -1240,21 +1238,7 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
goto free_irq_vector;
}
if (!zalloc_cpumask_var(&req_mask, GFP_KERNEL)) {
err = -ENOMEM;
goto free_irq;
}
cpus = kcalloc(nvec, sizeof(*cpus), GFP_KERNEL);
if (!cpus) {
err = -ENOMEM;
goto free_mask;
}
for (i = 0; i < nvec; i++)
cpus[i] = cpumask_local_spread(i, gc->numa_node);
for (i = 0; i < nvec; i++) {
cpumask_set_cpu(cpus[i], req_mask);
gic = &gc->irq_contexts[i];
gic->handler = NULL;
gic->arg = NULL;
@ -1269,17 +1253,16 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
irq = pci_irq_vector(pdev, i);
if (irq < 0) {
err = irq;
goto free_mask;
goto free_irq;
}
err = request_irq(irq, mana_gd_intr, 0, gic->name, gic);
if (err)
goto free_mask;
irq_set_affinity_and_hint(irq, req_mask);
cpumask_clear(req_mask);
goto free_irq;
cpu = cpumask_local_spread(i, gc->numa_node);
irq_set_affinity_and_hint(irq, cpumask_of(cpu));
}
free_cpumask_var(req_mask);
kfree(cpus);
err = mana_gd_alloc_res_map(nvec, &gc->msix_resource);
if (err)
@ -1290,13 +1273,12 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
return 0;
free_mask:
free_cpumask_var(req_mask);
kfree(cpus);
free_irq:
for (j = i - 1; j >= 0; j--) {
irq = pci_irq_vector(pdev, j);
gic = &gc->irq_contexts[j];
irq_update_affinity_hint(irq, NULL);
free_irq(irq, gic);
}
@ -1324,6 +1306,9 @@ static void mana_gd_remove_irqs(struct pci_dev *pdev)
continue;
gic = &gc->irq_contexts[i];
/* Need to clear the hint before free_irq */
irq_update_affinity_hint(irq, NULL);
free_irq(irq, gic);
}

View File

@ -605,6 +605,18 @@ ocelot_flower_parse_key(struct ocelot *ocelot, int port, bool ingress,
flow_rule_match_control(rule, &match);
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
struct flow_match_vlan match;
flow_rule_match_vlan(rule, &match);
filter->key_type = OCELOT_VCAP_KEY_ANY;
filter->vlan.vid.value = match.key->vlan_id;
filter->vlan.vid.mask = match.mask->vlan_id;
filter->vlan.pcp.value[0] = match.key->vlan_priority;
filter->vlan.pcp.mask[0] = match.mask->vlan_priority;
match_protocol = false;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
struct flow_match_eth_addrs match;
@ -737,18 +749,6 @@ ocelot_flower_parse_key(struct ocelot *ocelot, int port, bool ingress,
match_protocol = false;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
struct flow_match_vlan match;
flow_rule_match_vlan(rule, &match);
filter->key_type = OCELOT_VCAP_KEY_ANY;
filter->vlan.vid.value = match.key->vlan_id;
filter->vlan.vid.mask = match.mask->vlan_id;
filter->vlan.pcp.value[0] = match.key->vlan_priority;
filter->vlan.pcp.mask[0] = match.mask->vlan_priority;
match_protocol = false;
}
finished_key_parsing:
if (match_protocol && proto != ETH_P_ALL) {
if (filter->block_id == VCAP_ES0) {

Some files were not shown because too many files have changed in this diff Show More