forked from Minki/linux
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
A function in kernel/bpf/syscall.c which got a bug fix in 'net' was moved to kernel/bpf/verifier.c in 'net-next'. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
7b9f6da175
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
|||||||
VERSION = 4
|
VERSION = 4
|
||||||
PATCHLEVEL = 11
|
PATCHLEVEL = 11
|
||||||
SUBLEVEL = 0
|
SUBLEVEL = 0
|
||||||
EXTRAVERSION = -rc6
|
EXTRAVERSION = -rc7
|
||||||
NAME = Fearless Coyote
|
NAME = Fearless Coyote
|
||||||
|
|
||||||
# *DOCUMENTATION*
|
# *DOCUMENTATION*
|
||||||
|
@ -371,6 +371,8 @@
|
|||||||
|
|
||||||
phy1: ethernet-phy@1 {
|
phy1: ethernet-phy@1 {
|
||||||
reg = <7>;
|
reg = <7>;
|
||||||
|
eee-broken-100tx;
|
||||||
|
eee-broken-1000t;
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -672,6 +672,7 @@
|
|||||||
ti,non-removable;
|
ti,non-removable;
|
||||||
bus-width = <4>;
|
bus-width = <4>;
|
||||||
cap-power-off-card;
|
cap-power-off-card;
|
||||||
|
keep-power-in-suspend;
|
||||||
pinctrl-names = "default";
|
pinctrl-names = "default";
|
||||||
pinctrl-0 = <&mmc2_pins>;
|
pinctrl-0 = <&mmc2_pins>;
|
||||||
|
|
||||||
|
@ -283,6 +283,7 @@
|
|||||||
device_type = "pci";
|
device_type = "pci";
|
||||||
ranges = <0x81000000 0 0 0x03000 0 0x00010000
|
ranges = <0x81000000 0 0 0x03000 0 0x00010000
|
||||||
0x82000000 0 0x20013000 0x13000 0 0xffed000>;
|
0x82000000 0 0x20013000 0x13000 0 0xffed000>;
|
||||||
|
bus-range = <0x00 0xff>;
|
||||||
#interrupt-cells = <1>;
|
#interrupt-cells = <1>;
|
||||||
num-lanes = <1>;
|
num-lanes = <1>;
|
||||||
linux,pci-domain = <0>;
|
linux,pci-domain = <0>;
|
||||||
@ -319,6 +320,7 @@
|
|||||||
device_type = "pci";
|
device_type = "pci";
|
||||||
ranges = <0x81000000 0 0 0x03000 0 0x00010000
|
ranges = <0x81000000 0 0 0x03000 0 0x00010000
|
||||||
0x82000000 0 0x30013000 0x13000 0 0xffed000>;
|
0x82000000 0 0x30013000 0x13000 0 0xffed000>;
|
||||||
|
bus-range = <0x00 0xff>;
|
||||||
#interrupt-cells = <1>;
|
#interrupt-cells = <1>;
|
||||||
num-lanes = <1>;
|
num-lanes = <1>;
|
||||||
linux,pci-domain = <1>;
|
linux,pci-domain = <1>;
|
||||||
|
@ -121,7 +121,7 @@
|
|||||||
&i2c3 {
|
&i2c3 {
|
||||||
clock-frequency = <400000>;
|
clock-frequency = <400000>;
|
||||||
at24@50 {
|
at24@50 {
|
||||||
compatible = "at24,24c02";
|
compatible = "atmel,24c64";
|
||||||
readonly;
|
readonly;
|
||||||
reg = <0x50>;
|
reg = <0x50>;
|
||||||
};
|
};
|
||||||
|
@ -66,12 +66,6 @@
|
|||||||
opp-microvolt = <1200000>;
|
opp-microvolt = <1200000>;
|
||||||
clock-latency-ns = <244144>; /* 8 32k periods */
|
clock-latency-ns = <244144>; /* 8 32k periods */
|
||||||
};
|
};
|
||||||
|
|
||||||
opp@1200000000 {
|
|
||||||
opp-hz = /bits/ 64 <1200000000>;
|
|
||||||
opp-microvolt = <1320000>;
|
|
||||||
clock-latency-ns = <244144>; /* 8 32k periods */
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
|
|
||||||
cpus {
|
cpus {
|
||||||
@ -81,16 +75,22 @@
|
|||||||
operating-points-v2 = <&cpu0_opp_table>;
|
operating-points-v2 = <&cpu0_opp_table>;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
cpu@1 {
|
||||||
|
operating-points-v2 = <&cpu0_opp_table>;
|
||||||
|
};
|
||||||
|
|
||||||
cpu@2 {
|
cpu@2 {
|
||||||
compatible = "arm,cortex-a7";
|
compatible = "arm,cortex-a7";
|
||||||
device_type = "cpu";
|
device_type = "cpu";
|
||||||
reg = <2>;
|
reg = <2>;
|
||||||
|
operating-points-v2 = <&cpu0_opp_table>;
|
||||||
};
|
};
|
||||||
|
|
||||||
cpu@3 {
|
cpu@3 {
|
||||||
compatible = "arm,cortex-a7";
|
compatible = "arm,cortex-a7";
|
||||||
device_type = "cpu";
|
device_type = "cpu";
|
||||||
reg = <3>;
|
reg = <3>;
|
||||||
|
operating-points-v2 = <&cpu0_opp_table>;
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -270,6 +270,7 @@ extern const struct smp_operations omap4_smp_ops;
|
|||||||
extern int omap4_mpuss_init(void);
|
extern int omap4_mpuss_init(void);
|
||||||
extern int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state);
|
extern int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state);
|
||||||
extern int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state);
|
extern int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state);
|
||||||
|
extern u32 omap4_get_cpu1_ns_pa_addr(void);
|
||||||
#else
|
#else
|
||||||
static inline int omap4_enter_lowpower(unsigned int cpu,
|
static inline int omap4_enter_lowpower(unsigned int cpu,
|
||||||
unsigned int power_state)
|
unsigned int power_state)
|
||||||
|
@ -50,7 +50,7 @@ void omap4_cpu_die(unsigned int cpu)
|
|||||||
omap4_hotplug_cpu(cpu, PWRDM_POWER_OFF);
|
omap4_hotplug_cpu(cpu, PWRDM_POWER_OFF);
|
||||||
|
|
||||||
if (omap_secure_apis_support())
|
if (omap_secure_apis_support())
|
||||||
boot_cpu = omap_read_auxcoreboot0();
|
boot_cpu = omap_read_auxcoreboot0() >> 9;
|
||||||
else
|
else
|
||||||
boot_cpu =
|
boot_cpu =
|
||||||
readl_relaxed(base + OMAP_AUX_CORE_BOOT_0) >> 5;
|
readl_relaxed(base + OMAP_AUX_CORE_BOOT_0) >> 5;
|
||||||
|
@ -64,6 +64,7 @@
|
|||||||
#include "prm-regbits-44xx.h"
|
#include "prm-regbits-44xx.h"
|
||||||
|
|
||||||
static void __iomem *sar_base;
|
static void __iomem *sar_base;
|
||||||
|
static u32 old_cpu1_ns_pa_addr;
|
||||||
|
|
||||||
#if defined(CONFIG_PM) && defined(CONFIG_SMP)
|
#if defined(CONFIG_PM) && defined(CONFIG_SMP)
|
||||||
|
|
||||||
@ -212,6 +213,11 @@ static void __init save_l2x0_context(void)
|
|||||||
{}
|
{}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
u32 omap4_get_cpu1_ns_pa_addr(void)
|
||||||
|
{
|
||||||
|
return old_cpu1_ns_pa_addr;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* omap4_enter_lowpower: OMAP4 MPUSS Low Power Entry Function
|
* omap4_enter_lowpower: OMAP4 MPUSS Low Power Entry Function
|
||||||
* The purpose of this function is to manage low power programming
|
* The purpose of this function is to manage low power programming
|
||||||
@ -460,22 +466,30 @@ int __init omap4_mpuss_init(void)
|
|||||||
void __init omap4_mpuss_early_init(void)
|
void __init omap4_mpuss_early_init(void)
|
||||||
{
|
{
|
||||||
unsigned long startup_pa;
|
unsigned long startup_pa;
|
||||||
|
void __iomem *ns_pa_addr;
|
||||||
|
|
||||||
if (!(cpu_is_omap44xx() || soc_is_omap54xx()))
|
if (!(soc_is_omap44xx() || soc_is_omap54xx()))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
sar_base = omap4_get_sar_ram_base();
|
sar_base = omap4_get_sar_ram_base();
|
||||||
|
|
||||||
if (cpu_is_omap443x())
|
/* Save old NS_PA_ADDR for validity checks later on */
|
||||||
|
if (soc_is_omap44xx())
|
||||||
|
ns_pa_addr = sar_base + CPU1_WAKEUP_NS_PA_ADDR_OFFSET;
|
||||||
|
else
|
||||||
|
ns_pa_addr = sar_base + OMAP5_CPU1_WAKEUP_NS_PA_ADDR_OFFSET;
|
||||||
|
old_cpu1_ns_pa_addr = readl_relaxed(ns_pa_addr);
|
||||||
|
|
||||||
|
if (soc_is_omap443x())
|
||||||
startup_pa = __pa_symbol(omap4_secondary_startup);
|
startup_pa = __pa_symbol(omap4_secondary_startup);
|
||||||
else if (cpu_is_omap446x())
|
else if (soc_is_omap446x())
|
||||||
startup_pa = __pa_symbol(omap4460_secondary_startup);
|
startup_pa = __pa_symbol(omap4460_secondary_startup);
|
||||||
else if ((__boot_cpu_mode & MODE_MASK) == HYP_MODE)
|
else if ((__boot_cpu_mode & MODE_MASK) == HYP_MODE)
|
||||||
startup_pa = __pa_symbol(omap5_secondary_hyp_startup);
|
startup_pa = __pa_symbol(omap5_secondary_hyp_startup);
|
||||||
else
|
else
|
||||||
startup_pa = __pa_symbol(omap5_secondary_startup);
|
startup_pa = __pa_symbol(omap5_secondary_startup);
|
||||||
|
|
||||||
if (cpu_is_omap44xx())
|
if (soc_is_omap44xx())
|
||||||
writel_relaxed(startup_pa, sar_base +
|
writel_relaxed(startup_pa, sar_base +
|
||||||
CPU1_WAKEUP_NS_PA_ADDR_OFFSET);
|
CPU1_WAKEUP_NS_PA_ADDR_OFFSET);
|
||||||
else
|
else
|
||||||
|
@ -94,6 +94,5 @@ ENTRY(omap_read_auxcoreboot0)
|
|||||||
ldr r12, =0x103
|
ldr r12, =0x103
|
||||||
dsb
|
dsb
|
||||||
smc #0
|
smc #0
|
||||||
mov r0, r0, lsr #9
|
|
||||||
ldmfd sp!, {r2-r12, pc}
|
ldmfd sp!, {r2-r12, pc}
|
||||||
ENDPROC(omap_read_auxcoreboot0)
|
ENDPROC(omap_read_auxcoreboot0)
|
||||||
|
@ -21,6 +21,7 @@
|
|||||||
#include <linux/io.h>
|
#include <linux/io.h>
|
||||||
#include <linux/irqchip/arm-gic.h>
|
#include <linux/irqchip/arm-gic.h>
|
||||||
|
|
||||||
|
#include <asm/sections.h>
|
||||||
#include <asm/smp_scu.h>
|
#include <asm/smp_scu.h>
|
||||||
#include <asm/virt.h>
|
#include <asm/virt.h>
|
||||||
|
|
||||||
@ -40,10 +41,14 @@
|
|||||||
|
|
||||||
#define OMAP5_CORE_COUNT 0x2
|
#define OMAP5_CORE_COUNT 0x2
|
||||||
|
|
||||||
|
#define AUX_CORE_BOOT0_GP_RELEASE 0x020
|
||||||
|
#define AUX_CORE_BOOT0_HS_RELEASE 0x200
|
||||||
|
|
||||||
struct omap_smp_config {
|
struct omap_smp_config {
|
||||||
unsigned long cpu1_rstctrl_pa;
|
unsigned long cpu1_rstctrl_pa;
|
||||||
void __iomem *cpu1_rstctrl_va;
|
void __iomem *cpu1_rstctrl_va;
|
||||||
void __iomem *scu_base;
|
void __iomem *scu_base;
|
||||||
|
void __iomem *wakeupgen_base;
|
||||||
void *startup_addr;
|
void *startup_addr;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -140,7 +145,6 @@ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
|
|||||||
static struct clockdomain *cpu1_clkdm;
|
static struct clockdomain *cpu1_clkdm;
|
||||||
static bool booted;
|
static bool booted;
|
||||||
static struct powerdomain *cpu1_pwrdm;
|
static struct powerdomain *cpu1_pwrdm;
|
||||||
void __iomem *base = omap_get_wakeupgen_base();
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Set synchronisation state between this boot processor
|
* Set synchronisation state between this boot processor
|
||||||
@ -155,9 +159,11 @@ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
|
|||||||
* A barrier is added to ensure that write buffer is drained
|
* A barrier is added to ensure that write buffer is drained
|
||||||
*/
|
*/
|
||||||
if (omap_secure_apis_support())
|
if (omap_secure_apis_support())
|
||||||
omap_modify_auxcoreboot0(0x200, 0xfffffdff);
|
omap_modify_auxcoreboot0(AUX_CORE_BOOT0_HS_RELEASE,
|
||||||
|
0xfffffdff);
|
||||||
else
|
else
|
||||||
writel_relaxed(0x20, base + OMAP_AUX_CORE_BOOT_0);
|
writel_relaxed(AUX_CORE_BOOT0_GP_RELEASE,
|
||||||
|
cfg.wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
|
||||||
|
|
||||||
if (!cpu1_clkdm && !cpu1_pwrdm) {
|
if (!cpu1_clkdm && !cpu1_pwrdm) {
|
||||||
cpu1_clkdm = clkdm_lookup("mpu1_clkdm");
|
cpu1_clkdm = clkdm_lookup("mpu1_clkdm");
|
||||||
@ -261,9 +267,72 @@ static void __init omap4_smp_init_cpus(void)
|
|||||||
set_cpu_possible(i, true);
|
set_cpu_possible(i, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* For now, just make sure the start-up address is not within the booting
|
||||||
|
* kernel space as that means we just overwrote whatever secondary_startup()
|
||||||
|
* code there was.
|
||||||
|
*/
|
||||||
|
static bool __init omap4_smp_cpu1_startup_valid(unsigned long addr)
|
||||||
|
{
|
||||||
|
if ((addr >= __pa(PAGE_OFFSET)) && (addr <= __pa(__bss_start)))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We may need to reset CPU1 before configuring, otherwise kexec boot can end
|
||||||
|
* up trying to use old kernel startup address or suspend-resume will
|
||||||
|
* occasionally fail to bring up CPU1 on 4430 if CPU1 fails to enter deeper
|
||||||
|
* idle states.
|
||||||
|
*/
|
||||||
|
static void __init omap4_smp_maybe_reset_cpu1(struct omap_smp_config *c)
|
||||||
|
{
|
||||||
|
unsigned long cpu1_startup_pa, cpu1_ns_pa_addr;
|
||||||
|
bool needs_reset = false;
|
||||||
|
u32 released;
|
||||||
|
|
||||||
|
if (omap_secure_apis_support())
|
||||||
|
released = omap_read_auxcoreboot0() & AUX_CORE_BOOT0_HS_RELEASE;
|
||||||
|
else
|
||||||
|
released = readl_relaxed(cfg.wakeupgen_base +
|
||||||
|
OMAP_AUX_CORE_BOOT_0) &
|
||||||
|
AUX_CORE_BOOT0_GP_RELEASE;
|
||||||
|
if (released) {
|
||||||
|
pr_warn("smp: CPU1 not parked?\n");
|
||||||
|
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
cpu1_startup_pa = readl_relaxed(cfg.wakeupgen_base +
|
||||||
|
OMAP_AUX_CORE_BOOT_1);
|
||||||
|
cpu1_ns_pa_addr = omap4_get_cpu1_ns_pa_addr();
|
||||||
|
|
||||||
|
/* Did the configured secondary_startup() get overwritten? */
|
||||||
|
if (!omap4_smp_cpu1_startup_valid(cpu1_startup_pa))
|
||||||
|
needs_reset = true;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If omap4 or 5 has NS_PA_ADDR configured, CPU1 may be in a
|
||||||
|
* deeper idle state in WFI and will wake to an invalid address.
|
||||||
|
*/
|
||||||
|
if ((soc_is_omap44xx() || soc_is_omap54xx()) &&
|
||||||
|
!omap4_smp_cpu1_startup_valid(cpu1_ns_pa_addr))
|
||||||
|
needs_reset = true;
|
||||||
|
|
||||||
|
if (!needs_reset || !c->cpu1_rstctrl_va)
|
||||||
|
return;
|
||||||
|
|
||||||
|
pr_info("smp: CPU1 parked within kernel, needs reset (0x%lx 0x%lx)\n",
|
||||||
|
cpu1_startup_pa, cpu1_ns_pa_addr);
|
||||||
|
|
||||||
|
writel_relaxed(1, c->cpu1_rstctrl_va);
|
||||||
|
readl_relaxed(c->cpu1_rstctrl_va);
|
||||||
|
writel_relaxed(0, c->cpu1_rstctrl_va);
|
||||||
|
}
|
||||||
|
|
||||||
static void __init omap4_smp_prepare_cpus(unsigned int max_cpus)
|
static void __init omap4_smp_prepare_cpus(unsigned int max_cpus)
|
||||||
{
|
{
|
||||||
void __iomem *base = omap_get_wakeupgen_base();
|
|
||||||
const struct omap_smp_config *c = NULL;
|
const struct omap_smp_config *c = NULL;
|
||||||
|
|
||||||
if (soc_is_omap443x())
|
if (soc_is_omap443x())
|
||||||
@ -281,6 +350,7 @@ static void __init omap4_smp_prepare_cpus(unsigned int max_cpus)
|
|||||||
/* Must preserve cfg.scu_base set earlier */
|
/* Must preserve cfg.scu_base set earlier */
|
||||||
cfg.cpu1_rstctrl_pa = c->cpu1_rstctrl_pa;
|
cfg.cpu1_rstctrl_pa = c->cpu1_rstctrl_pa;
|
||||||
cfg.startup_addr = c->startup_addr;
|
cfg.startup_addr = c->startup_addr;
|
||||||
|
cfg.wakeupgen_base = omap_get_wakeupgen_base();
|
||||||
|
|
||||||
if (soc_is_dra74x() || soc_is_omap54xx()) {
|
if (soc_is_dra74x() || soc_is_omap54xx()) {
|
||||||
if ((__boot_cpu_mode & MODE_MASK) == HYP_MODE)
|
if ((__boot_cpu_mode & MODE_MASK) == HYP_MODE)
|
||||||
@ -299,15 +369,7 @@ static void __init omap4_smp_prepare_cpus(unsigned int max_cpus)
|
|||||||
if (cfg.scu_base)
|
if (cfg.scu_base)
|
||||||
scu_enable(cfg.scu_base);
|
scu_enable(cfg.scu_base);
|
||||||
|
|
||||||
/*
|
omap4_smp_maybe_reset_cpu1(&cfg);
|
||||||
* Reset CPU1 before configuring, otherwise kexec will
|
|
||||||
* end up trying to use old kernel startup address.
|
|
||||||
*/
|
|
||||||
if (cfg.cpu1_rstctrl_va) {
|
|
||||||
writel_relaxed(1, cfg.cpu1_rstctrl_va);
|
|
||||||
readl_relaxed(cfg.cpu1_rstctrl_va);
|
|
||||||
writel_relaxed(0, cfg.cpu1_rstctrl_va);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Write the address of secondary startup routine into the
|
* Write the address of secondary startup routine into the
|
||||||
@ -319,7 +381,7 @@ static void __init omap4_smp_prepare_cpus(unsigned int max_cpus)
|
|||||||
omap_auxcoreboot_addr(__pa_symbol(cfg.startup_addr));
|
omap_auxcoreboot_addr(__pa_symbol(cfg.startup_addr));
|
||||||
else
|
else
|
||||||
writel_relaxed(__pa_symbol(cfg.startup_addr),
|
writel_relaxed(__pa_symbol(cfg.startup_addr),
|
||||||
base + OMAP_AUX_CORE_BOOT_1);
|
cfg.wakeupgen_base + OMAP_AUX_CORE_BOOT_1);
|
||||||
}
|
}
|
||||||
|
|
||||||
const struct smp_operations omap4_smp_ops __initconst = {
|
const struct smp_operations omap4_smp_ops __initconst = {
|
||||||
|
@ -222,6 +222,14 @@ static int _omap_device_notifier_call(struct notifier_block *nb,
|
|||||||
dev_err(dev, "failed to idle\n");
|
dev_err(dev, "failed to idle\n");
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
case BUS_NOTIFY_BIND_DRIVER:
|
||||||
|
od = to_omap_device(pdev);
|
||||||
|
if (od && (od->_state == OMAP_DEVICE_STATE_ENABLED) &&
|
||||||
|
pm_runtime_status_suspended(dev)) {
|
||||||
|
od->_driver_status = BUS_NOTIFY_BIND_DRIVER;
|
||||||
|
pm_runtime_set_active(dev);
|
||||||
|
}
|
||||||
|
break;
|
||||||
case BUS_NOTIFY_ADD_DEVICE:
|
case BUS_NOTIFY_ADD_DEVICE:
|
||||||
if (pdev->dev.of_node)
|
if (pdev->dev.of_node)
|
||||||
omap_device_build_from_dt(pdev);
|
omap_device_build_from_dt(pdev);
|
||||||
|
@ -6,6 +6,7 @@ menuconfig ARCH_ORION5X
|
|||||||
select GPIOLIB
|
select GPIOLIB
|
||||||
select MVEBU_MBUS
|
select MVEBU_MBUS
|
||||||
select PCI
|
select PCI
|
||||||
|
select PHYLIB if NETDEVICES
|
||||||
select PLAT_ORION_LEGACY
|
select PLAT_ORION_LEGACY
|
||||||
help
|
help
|
||||||
Support for the following Marvell Orion 5x series SoCs:
|
Support for the following Marvell Orion 5x series SoCs:
|
||||||
|
@ -468,6 +468,7 @@ void __init orion_ge11_init(struct mv643xx_eth_platform_data *eth_data,
|
|||||||
eth_data, &orion_ge11);
|
eth_data, &orion_ge11);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_ARCH_ORION5X
|
||||||
/*****************************************************************************
|
/*****************************************************************************
|
||||||
* Ethernet switch
|
* Ethernet switch
|
||||||
****************************************************************************/
|
****************************************************************************/
|
||||||
@ -480,6 +481,9 @@ void __init orion_ge00_switch_init(struct dsa_chip_data *d)
|
|||||||
struct mdio_board_info *bd;
|
struct mdio_board_info *bd;
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
|
|
||||||
|
if (!IS_BUILTIN(CONFIG_PHYLIB))
|
||||||
|
return;
|
||||||
|
|
||||||
for (i = 0; i < ARRAY_SIZE(d->port_names); i++)
|
for (i = 0; i < ARRAY_SIZE(d->port_names); i++)
|
||||||
if (!strcmp(d->port_names[i], "cpu"))
|
if (!strcmp(d->port_names[i], "cpu"))
|
||||||
break;
|
break;
|
||||||
@ -493,6 +497,7 @@ void __init orion_ge00_switch_init(struct dsa_chip_data *d)
|
|||||||
|
|
||||||
mdiobus_register_board_info(&orion_ge00_switch_board_info, 1);
|
mdiobus_register_board_info(&orion_ge00_switch_board_info, 1);
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
/*****************************************************************************
|
/*****************************************************************************
|
||||||
* I2C
|
* I2C
|
||||||
|
@ -179,8 +179,10 @@
|
|||||||
usbphy: phy@01c19400 {
|
usbphy: phy@01c19400 {
|
||||||
compatible = "allwinner,sun50i-a64-usb-phy";
|
compatible = "allwinner,sun50i-a64-usb-phy";
|
||||||
reg = <0x01c19400 0x14>,
|
reg = <0x01c19400 0x14>,
|
||||||
|
<0x01c1a800 0x4>,
|
||||||
<0x01c1b800 0x4>;
|
<0x01c1b800 0x4>;
|
||||||
reg-names = "phy_ctrl",
|
reg-names = "phy_ctrl",
|
||||||
|
"pmu0",
|
||||||
"pmu1";
|
"pmu1";
|
||||||
clocks = <&ccu CLK_USB_PHY0>,
|
clocks = <&ccu CLK_USB_PHY0>,
|
||||||
<&ccu CLK_USB_PHY1>;
|
<&ccu CLK_USB_PHY1>;
|
||||||
|
@ -39,10 +39,10 @@
|
|||||||
#define get_user __get_user
|
#define get_user __get_user
|
||||||
|
|
||||||
#if !defined(CONFIG_64BIT)
|
#if !defined(CONFIG_64BIT)
|
||||||
#define LDD_USER(ptr) __get_user_asm64(ptr)
|
#define LDD_USER(val, ptr) __get_user_asm64(val, ptr)
|
||||||
#define STD_USER(x, ptr) __put_user_asm64(x, ptr)
|
#define STD_USER(x, ptr) __put_user_asm64(x, ptr)
|
||||||
#else
|
#else
|
||||||
#define LDD_USER(ptr) __get_user_asm("ldd", ptr)
|
#define LDD_USER(val, ptr) __get_user_asm(val, "ldd", ptr)
|
||||||
#define STD_USER(x, ptr) __put_user_asm("std", x, ptr)
|
#define STD_USER(x, ptr) __put_user_asm("std", x, ptr)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -97,63 +97,87 @@ struct exception_data {
|
|||||||
" mtsp %0,%%sr2\n\t" \
|
" mtsp %0,%%sr2\n\t" \
|
||||||
: : "r"(get_fs()) : )
|
: : "r"(get_fs()) : )
|
||||||
|
|
||||||
#define __get_user(x, ptr) \
|
#define __get_user_internal(val, ptr) \
|
||||||
({ \
|
({ \
|
||||||
register long __gu_err __asm__ ("r8") = 0; \
|
register long __gu_err __asm__ ("r8") = 0; \
|
||||||
register long __gu_val; \
|
\
|
||||||
\
|
switch (sizeof(*(ptr))) { \
|
||||||
load_sr2(); \
|
case 1: __get_user_asm(val, "ldb", ptr); break; \
|
||||||
switch (sizeof(*(ptr))) { \
|
case 2: __get_user_asm(val, "ldh", ptr); break; \
|
||||||
case 1: __get_user_asm("ldb", ptr); break; \
|
case 4: __get_user_asm(val, "ldw", ptr); break; \
|
||||||
case 2: __get_user_asm("ldh", ptr); break; \
|
case 8: LDD_USER(val, ptr); break; \
|
||||||
case 4: __get_user_asm("ldw", ptr); break; \
|
default: BUILD_BUG(); \
|
||||||
case 8: LDD_USER(ptr); break; \
|
} \
|
||||||
default: BUILD_BUG(); break; \
|
\
|
||||||
} \
|
__gu_err; \
|
||||||
\
|
|
||||||
(x) = (__force __typeof__(*(ptr))) __gu_val; \
|
|
||||||
__gu_err; \
|
|
||||||
})
|
})
|
||||||
|
|
||||||
#define __get_user_asm(ldx, ptr) \
|
#define __get_user(val, ptr) \
|
||||||
|
({ \
|
||||||
|
load_sr2(); \
|
||||||
|
__get_user_internal(val, ptr); \
|
||||||
|
})
|
||||||
|
|
||||||
|
#define __get_user_asm(val, ldx, ptr) \
|
||||||
|
{ \
|
||||||
|
register long __gu_val; \
|
||||||
|
\
|
||||||
__asm__("1: " ldx " 0(%%sr2,%2),%0\n" \
|
__asm__("1: " ldx " 0(%%sr2,%2),%0\n" \
|
||||||
"9:\n" \
|
"9:\n" \
|
||||||
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
|
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
|
||||||
: "=r"(__gu_val), "=r"(__gu_err) \
|
: "=r"(__gu_val), "=r"(__gu_err) \
|
||||||
: "r"(ptr), "1"(__gu_err));
|
: "r"(ptr), "1"(__gu_err)); \
|
||||||
|
\
|
||||||
|
(val) = (__force __typeof__(*(ptr))) __gu_val; \
|
||||||
|
}
|
||||||
|
|
||||||
#if !defined(CONFIG_64BIT)
|
#if !defined(CONFIG_64BIT)
|
||||||
|
|
||||||
#define __get_user_asm64(ptr) \
|
#define __get_user_asm64(val, ptr) \
|
||||||
|
{ \
|
||||||
|
union { \
|
||||||
|
unsigned long long l; \
|
||||||
|
__typeof__(*(ptr)) t; \
|
||||||
|
} __gu_tmp; \
|
||||||
|
\
|
||||||
__asm__(" copy %%r0,%R0\n" \
|
__asm__(" copy %%r0,%R0\n" \
|
||||||
"1: ldw 0(%%sr2,%2),%0\n" \
|
"1: ldw 0(%%sr2,%2),%0\n" \
|
||||||
"2: ldw 4(%%sr2,%2),%R0\n" \
|
"2: ldw 4(%%sr2,%2),%R0\n" \
|
||||||
"9:\n" \
|
"9:\n" \
|
||||||
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
|
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
|
||||||
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
|
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
|
||||||
: "=r"(__gu_val), "=r"(__gu_err) \
|
: "=&r"(__gu_tmp.l), "=r"(__gu_err) \
|
||||||
: "r"(ptr), "1"(__gu_err));
|
: "r"(ptr), "1"(__gu_err)); \
|
||||||
|
\
|
||||||
|
(val) = __gu_tmp.t; \
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* !defined(CONFIG_64BIT) */
|
#endif /* !defined(CONFIG_64BIT) */
|
||||||
|
|
||||||
|
|
||||||
#define __put_user(x, ptr) \
|
#define __put_user_internal(x, ptr) \
|
||||||
({ \
|
({ \
|
||||||
register long __pu_err __asm__ ("r8") = 0; \
|
register long __pu_err __asm__ ("r8") = 0; \
|
||||||
__typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x); \
|
__typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x); \
|
||||||
\
|
\
|
||||||
load_sr2(); \
|
|
||||||
switch (sizeof(*(ptr))) { \
|
switch (sizeof(*(ptr))) { \
|
||||||
case 1: __put_user_asm("stb", __x, ptr); break; \
|
case 1: __put_user_asm("stb", __x, ptr); break; \
|
||||||
case 2: __put_user_asm("sth", __x, ptr); break; \
|
case 2: __put_user_asm("sth", __x, ptr); break; \
|
||||||
case 4: __put_user_asm("stw", __x, ptr); break; \
|
case 4: __put_user_asm("stw", __x, ptr); break; \
|
||||||
case 8: STD_USER(__x, ptr); break; \
|
case 8: STD_USER(__x, ptr); break; \
|
||||||
default: BUILD_BUG(); break; \
|
default: BUILD_BUG(); \
|
||||||
} \
|
} \
|
||||||
\
|
\
|
||||||
__pu_err; \
|
__pu_err; \
|
||||||
})
|
})
|
||||||
|
|
||||||
|
#define __put_user(x, ptr) \
|
||||||
|
({ \
|
||||||
|
load_sr2(); \
|
||||||
|
__put_user_internal(x, ptr); \
|
||||||
|
})
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The "__put_user/kernel_asm()" macros tell gcc they read from memory
|
* The "__put_user/kernel_asm()" macros tell gcc they read from memory
|
||||||
* instead of writing. This is because they do not write to any memory
|
* instead of writing. This is because they do not write to any memory
|
||||||
|
@ -201,7 +201,7 @@ ENTRY_CFI(pa_memcpy)
|
|||||||
add dst,len,end
|
add dst,len,end
|
||||||
|
|
||||||
/* short copy with less than 16 bytes? */
|
/* short copy with less than 16 bytes? */
|
||||||
cmpib,>>=,n 15,len,.Lbyte_loop
|
cmpib,COND(>>=),n 15,len,.Lbyte_loop
|
||||||
|
|
||||||
/* same alignment? */
|
/* same alignment? */
|
||||||
xor src,dst,t0
|
xor src,dst,t0
|
||||||
@ -216,7 +216,7 @@ ENTRY_CFI(pa_memcpy)
|
|||||||
/* loop until we are 64-bit aligned */
|
/* loop until we are 64-bit aligned */
|
||||||
.Lalign_loop64:
|
.Lalign_loop64:
|
||||||
extru dst,31,3,t1
|
extru dst,31,3,t1
|
||||||
cmpib,=,n 0,t1,.Lcopy_loop_16
|
cmpib,=,n 0,t1,.Lcopy_loop_16_start
|
||||||
20: ldb,ma 1(srcspc,src),t1
|
20: ldb,ma 1(srcspc,src),t1
|
||||||
21: stb,ma t1,1(dstspc,dst)
|
21: stb,ma t1,1(dstspc,dst)
|
||||||
b .Lalign_loop64
|
b .Lalign_loop64
|
||||||
@ -225,6 +225,7 @@ ENTRY_CFI(pa_memcpy)
|
|||||||
ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
|
ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
|
||||||
ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
|
ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
|
||||||
|
|
||||||
|
.Lcopy_loop_16_start:
|
||||||
ldi 31,t0
|
ldi 31,t0
|
||||||
.Lcopy_loop_16:
|
.Lcopy_loop_16:
|
||||||
cmpb,COND(>>=),n t0,len,.Lword_loop
|
cmpb,COND(>>=),n t0,len,.Lword_loop
|
||||||
@ -267,7 +268,7 @@ ENTRY_CFI(pa_memcpy)
|
|||||||
/* loop until we are 32-bit aligned */
|
/* loop until we are 32-bit aligned */
|
||||||
.Lalign_loop32:
|
.Lalign_loop32:
|
||||||
extru dst,31,2,t1
|
extru dst,31,2,t1
|
||||||
cmpib,=,n 0,t1,.Lcopy_loop_4
|
cmpib,=,n 0,t1,.Lcopy_loop_8
|
||||||
20: ldb,ma 1(srcspc,src),t1
|
20: ldb,ma 1(srcspc,src),t1
|
||||||
21: stb,ma t1,1(dstspc,dst)
|
21: stb,ma t1,1(dstspc,dst)
|
||||||
b .Lalign_loop32
|
b .Lalign_loop32
|
||||||
@ -277,7 +278,7 @@ ENTRY_CFI(pa_memcpy)
|
|||||||
ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
|
ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
|
||||||
|
|
||||||
|
|
||||||
.Lcopy_loop_4:
|
.Lcopy_loop_8:
|
||||||
cmpib,COND(>>=),n 15,len,.Lbyte_loop
|
cmpib,COND(>>=),n 15,len,.Lbyte_loop
|
||||||
|
|
||||||
10: ldw 0(srcspc,src),t1
|
10: ldw 0(srcspc,src),t1
|
||||||
@ -299,7 +300,7 @@ ENTRY_CFI(pa_memcpy)
|
|||||||
ASM_EXCEPTIONTABLE_ENTRY(16b,.Lcopy_done)
|
ASM_EXCEPTIONTABLE_ENTRY(16b,.Lcopy_done)
|
||||||
ASM_EXCEPTIONTABLE_ENTRY(17b,.Lcopy_done)
|
ASM_EXCEPTIONTABLE_ENTRY(17b,.Lcopy_done)
|
||||||
|
|
||||||
b .Lcopy_loop_4
|
b .Lcopy_loop_8
|
||||||
ldo -16(len),len
|
ldo -16(len),len
|
||||||
|
|
||||||
.Lbyte_loop:
|
.Lbyte_loop:
|
||||||
@ -324,7 +325,7 @@ ENTRY_CFI(pa_memcpy)
|
|||||||
.Lunaligned_copy:
|
.Lunaligned_copy:
|
||||||
/* align until dst is 32bit-word-aligned */
|
/* align until dst is 32bit-word-aligned */
|
||||||
extru dst,31,2,t1
|
extru dst,31,2,t1
|
||||||
cmpib,COND(=),n 0,t1,.Lcopy_dstaligned
|
cmpib,=,n 0,t1,.Lcopy_dstaligned
|
||||||
20: ldb 0(srcspc,src),t1
|
20: ldb 0(srcspc,src),t1
|
||||||
ldo 1(src),src
|
ldo 1(src),src
|
||||||
21: stb,ma t1,1(dstspc,dst)
|
21: stb,ma t1,1(dstspc,dst)
|
||||||
@ -362,7 +363,7 @@ ENTRY_CFI(pa_memcpy)
|
|||||||
cmpiclr,<> 1,t0,%r0
|
cmpiclr,<> 1,t0,%r0
|
||||||
b,n .Lcase1
|
b,n .Lcase1
|
||||||
.Lcase0:
|
.Lcase0:
|
||||||
cmpb,= %r0,len,.Lcda_finish
|
cmpb,COND(=) %r0,len,.Lcda_finish
|
||||||
nop
|
nop
|
||||||
|
|
||||||
1: ldw,ma 4(srcspc,src), a3
|
1: ldw,ma 4(srcspc,src), a3
|
||||||
@ -376,7 +377,7 @@ ENTRY_CFI(pa_memcpy)
|
|||||||
1: ldw,ma 4(srcspc,src), a3
|
1: ldw,ma 4(srcspc,src), a3
|
||||||
ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
|
ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
|
||||||
ldo -1(len),len
|
ldo -1(len),len
|
||||||
cmpb,=,n %r0,len,.Ldo0
|
cmpb,COND(=),n %r0,len,.Ldo0
|
||||||
.Ldo4:
|
.Ldo4:
|
||||||
1: ldw,ma 4(srcspc,src), a0
|
1: ldw,ma 4(srcspc,src), a0
|
||||||
ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
|
ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
|
||||||
@ -402,7 +403,7 @@ ENTRY_CFI(pa_memcpy)
|
|||||||
1: stw,ma t0, 4(dstspc,dst)
|
1: stw,ma t0, 4(dstspc,dst)
|
||||||
ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
|
ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
|
||||||
ldo -4(len),len
|
ldo -4(len),len
|
||||||
cmpb,<> %r0,len,.Ldo4
|
cmpb,COND(<>) %r0,len,.Ldo4
|
||||||
nop
|
nop
|
||||||
.Ldo0:
|
.Ldo0:
|
||||||
shrpw a2, a3, %sar, t0
|
shrpw a2, a3, %sar, t0
|
||||||
@ -436,14 +437,14 @@ ENTRY_CFI(pa_memcpy)
|
|||||||
/* fault exception fixup handlers: */
|
/* fault exception fixup handlers: */
|
||||||
#ifdef CONFIG_64BIT
|
#ifdef CONFIG_64BIT
|
||||||
.Lcopy16_fault:
|
.Lcopy16_fault:
|
||||||
10: b .Lcopy_done
|
b .Lcopy_done
|
||||||
std,ma t1,8(dstspc,dst)
|
10: std,ma t1,8(dstspc,dst)
|
||||||
ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
|
ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
.Lcopy8_fault:
|
.Lcopy8_fault:
|
||||||
10: b .Lcopy_done
|
b .Lcopy_done
|
||||||
stw,ma t1,4(dstspc,dst)
|
10: stw,ma t1,4(dstspc,dst)
|
||||||
ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
|
ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
|
||||||
|
|
||||||
.exit
|
.exit
|
||||||
|
@ -43,7 +43,7 @@ config SPARC
|
|||||||
select ARCH_HAS_SG_CHAIN
|
select ARCH_HAS_SG_CHAIN
|
||||||
select CPU_NO_EFFICIENT_FFS
|
select CPU_NO_EFFICIENT_FFS
|
||||||
select HAVE_ARCH_HARDENED_USERCOPY
|
select HAVE_ARCH_HARDENED_USERCOPY
|
||||||
select PROVE_LOCKING_SMALL if PROVE_LOCKING
|
select LOCKDEP_SMALL if LOCKDEP
|
||||||
select ARCH_WANT_RELAX_ORDER
|
select ARCH_WANT_RELAX_ORDER
|
||||||
|
|
||||||
config SPARC32
|
config SPARC32
|
||||||
|
@ -461,6 +461,22 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb,
|
|||||||
pgd_t *pgd;
|
pgd_t *pgd;
|
||||||
unsigned long next;
|
unsigned long next;
|
||||||
|
|
||||||
|
addr &= PMD_MASK;
|
||||||
|
if (addr < floor) {
|
||||||
|
addr += PMD_SIZE;
|
||||||
|
if (!addr)
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (ceiling) {
|
||||||
|
ceiling &= PMD_MASK;
|
||||||
|
if (!ceiling)
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (end - 1 > ceiling - 1)
|
||||||
|
end -= PMD_SIZE;
|
||||||
|
if (addr > end - 1)
|
||||||
|
return;
|
||||||
|
|
||||||
pgd = pgd_offset(tlb->mm, addr);
|
pgd = pgd_offset(tlb->mm, addr);
|
||||||
do {
|
do {
|
||||||
next = pgd_addr_end(addr, end);
|
next = pgd_addr_end(addr, end);
|
||||||
|
@ -55,7 +55,8 @@ static inline int arch_memcpy_from_pmem(void *dst, const void *src, size_t n)
|
|||||||
* @size: number of bytes to write back
|
* @size: number of bytes to write back
|
||||||
*
|
*
|
||||||
* Write back a cache range using the CLWB (cache line write back)
|
* Write back a cache range using the CLWB (cache line write back)
|
||||||
* instruction.
|
* instruction. Note that @size is internally rounded up to be cache
|
||||||
|
* line size aligned.
|
||||||
*/
|
*/
|
||||||
static inline void arch_wb_cache_pmem(void *addr, size_t size)
|
static inline void arch_wb_cache_pmem(void *addr, size_t size)
|
||||||
{
|
{
|
||||||
@ -69,15 +70,6 @@ static inline void arch_wb_cache_pmem(void *addr, size_t size)
|
|||||||
clwb(p);
|
clwb(p);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* copy_from_iter_nocache() on x86 only uses non-temporal stores for iovec
|
|
||||||
* iterators, so for other types (bvec & kvec) we must do a cache write-back.
|
|
||||||
*/
|
|
||||||
static inline bool __iter_needs_pmem_wb(struct iov_iter *i)
|
|
||||||
{
|
|
||||||
return iter_is_iovec(i) == false;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* arch_copy_from_iter_pmem - copy data from an iterator to PMEM
|
* arch_copy_from_iter_pmem - copy data from an iterator to PMEM
|
||||||
* @addr: PMEM destination address
|
* @addr: PMEM destination address
|
||||||
@ -94,7 +86,35 @@ static inline size_t arch_copy_from_iter_pmem(void *addr, size_t bytes,
|
|||||||
/* TODO: skip the write-back by always using non-temporal stores */
|
/* TODO: skip the write-back by always using non-temporal stores */
|
||||||
len = copy_from_iter_nocache(addr, bytes, i);
|
len = copy_from_iter_nocache(addr, bytes, i);
|
||||||
|
|
||||||
if (__iter_needs_pmem_wb(i))
|
/*
|
||||||
|
* In the iovec case on x86_64 copy_from_iter_nocache() uses
|
||||||
|
* non-temporal stores for the bulk of the transfer, but we need
|
||||||
|
* to manually flush if the transfer is unaligned. A cached
|
||||||
|
* memory copy is used when destination or size is not naturally
|
||||||
|
* aligned. That is:
|
||||||
|
* - Require 8-byte alignment when size is 8 bytes or larger.
|
||||||
|
* - Require 4-byte alignment when size is 4 bytes.
|
||||||
|
*
|
||||||
|
* In the non-iovec case the entire destination needs to be
|
||||||
|
* flushed.
|
||||||
|
*/
|
||||||
|
if (iter_is_iovec(i)) {
|
||||||
|
unsigned long flushed, dest = (unsigned long) addr;
|
||||||
|
|
||||||
|
if (bytes < 8) {
|
||||||
|
if (!IS_ALIGNED(dest, 4) || (bytes != 4))
|
||||||
|
arch_wb_cache_pmem(addr, 1);
|
||||||
|
} else {
|
||||||
|
if (!IS_ALIGNED(dest, 8)) {
|
||||||
|
dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
|
||||||
|
arch_wb_cache_pmem(addr, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
flushed = dest - (unsigned long) addr;
|
||||||
|
if (bytes > flushed && !IS_ALIGNED(bytes - flushed, 8))
|
||||||
|
arch_wb_cache_pmem(addr + bytes - 1, 1);
|
||||||
|
}
|
||||||
|
} else
|
||||||
arch_wb_cache_pmem(addr, bytes);
|
arch_wb_cache_pmem(addr, bytes);
|
||||||
|
|
||||||
return len;
|
return len;
|
||||||
|
@ -32,6 +32,7 @@ struct ahash_request_priv {
|
|||||||
crypto_completion_t complete;
|
crypto_completion_t complete;
|
||||||
void *data;
|
void *data;
|
||||||
u8 *result;
|
u8 *result;
|
||||||
|
u32 flags;
|
||||||
void *ubuf[] CRYPTO_MINALIGN_ATTR;
|
void *ubuf[] CRYPTO_MINALIGN_ATTR;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -253,6 +254,8 @@ static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
|
|||||||
priv->result = req->result;
|
priv->result = req->result;
|
||||||
priv->complete = req->base.complete;
|
priv->complete = req->base.complete;
|
||||||
priv->data = req->base.data;
|
priv->data = req->base.data;
|
||||||
|
priv->flags = req->base.flags;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* WARNING: We do not backup req->priv here! The req->priv
|
* WARNING: We do not backup req->priv here! The req->priv
|
||||||
* is for internal use of the Crypto API and the
|
* is for internal use of the Crypto API and the
|
||||||
@ -267,38 +270,44 @@ static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ahash_restore_req(struct ahash_request *req)
|
static void ahash_restore_req(struct ahash_request *req, int err)
|
||||||
{
|
{
|
||||||
struct ahash_request_priv *priv = req->priv;
|
struct ahash_request_priv *priv = req->priv;
|
||||||
|
|
||||||
|
if (!err)
|
||||||
|
memcpy(priv->result, req->result,
|
||||||
|
crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
|
||||||
|
|
||||||
/* Restore the original crypto request. */
|
/* Restore the original crypto request. */
|
||||||
req->result = priv->result;
|
req->result = priv->result;
|
||||||
req->base.complete = priv->complete;
|
|
||||||
req->base.data = priv->data;
|
ahash_request_set_callback(req, priv->flags,
|
||||||
|
priv->complete, priv->data);
|
||||||
req->priv = NULL;
|
req->priv = NULL;
|
||||||
|
|
||||||
/* Free the req->priv.priv from the ADJUSTED request. */
|
/* Free the req->priv.priv from the ADJUSTED request. */
|
||||||
kzfree(priv);
|
kzfree(priv);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ahash_op_unaligned_finish(struct ahash_request *req, int err)
|
static void ahash_notify_einprogress(struct ahash_request *req)
|
||||||
{
|
{
|
||||||
struct ahash_request_priv *priv = req->priv;
|
struct ahash_request_priv *priv = req->priv;
|
||||||
|
struct crypto_async_request oreq;
|
||||||
|
|
||||||
if (err == -EINPROGRESS)
|
oreq.data = priv->data;
|
||||||
return;
|
|
||||||
|
|
||||||
if (!err)
|
priv->complete(&oreq, -EINPROGRESS);
|
||||||
memcpy(priv->result, req->result,
|
|
||||||
crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
|
|
||||||
|
|
||||||
ahash_restore_req(req);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
|
static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
|
||||||
{
|
{
|
||||||
struct ahash_request *areq = req->data;
|
struct ahash_request *areq = req->data;
|
||||||
|
|
||||||
|
if (err == -EINPROGRESS) {
|
||||||
|
ahash_notify_einprogress(areq);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Restore the original request, see ahash_op_unaligned() for what
|
* Restore the original request, see ahash_op_unaligned() for what
|
||||||
* goes where.
|
* goes where.
|
||||||
@ -309,7 +318,7 @@ static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
/* First copy req->result into req->priv.result */
|
/* First copy req->result into req->priv.result */
|
||||||
ahash_op_unaligned_finish(areq, err);
|
ahash_restore_req(areq, err);
|
||||||
|
|
||||||
/* Complete the ORIGINAL request. */
|
/* Complete the ORIGINAL request. */
|
||||||
areq->base.complete(&areq->base, err);
|
areq->base.complete(&areq->base, err);
|
||||||
@ -325,7 +334,12 @@ static int ahash_op_unaligned(struct ahash_request *req,
|
|||||||
return err;
|
return err;
|
||||||
|
|
||||||
err = op(req);
|
err = op(req);
|
||||||
ahash_op_unaligned_finish(req, err);
|
if (err == -EINPROGRESS ||
|
||||||
|
(err == -EBUSY && (ahash_request_flags(req) &
|
||||||
|
CRYPTO_TFM_REQ_MAY_BACKLOG)))
|
||||||
|
return err;
|
||||||
|
|
||||||
|
ahash_restore_req(req, err);
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
@ -360,25 +374,14 @@ int crypto_ahash_digest(struct ahash_request *req)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(crypto_ahash_digest);
|
EXPORT_SYMBOL_GPL(crypto_ahash_digest);
|
||||||
|
|
||||||
static void ahash_def_finup_finish2(struct ahash_request *req, int err)
|
|
||||||
{
|
|
||||||
struct ahash_request_priv *priv = req->priv;
|
|
||||||
|
|
||||||
if (err == -EINPROGRESS)
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (!err)
|
|
||||||
memcpy(priv->result, req->result,
|
|
||||||
crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
|
|
||||||
|
|
||||||
ahash_restore_req(req);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
|
static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
|
||||||
{
|
{
|
||||||
struct ahash_request *areq = req->data;
|
struct ahash_request *areq = req->data;
|
||||||
|
|
||||||
ahash_def_finup_finish2(areq, err);
|
if (err == -EINPROGRESS)
|
||||||
|
return;
|
||||||
|
|
||||||
|
ahash_restore_req(areq, err);
|
||||||
|
|
||||||
areq->base.complete(&areq->base, err);
|
areq->base.complete(&areq->base, err);
|
||||||
}
|
}
|
||||||
@ -389,11 +392,15 @@ static int ahash_def_finup_finish1(struct ahash_request *req, int err)
|
|||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
req->base.complete = ahash_def_finup_done2;
|
req->base.complete = ahash_def_finup_done2;
|
||||||
req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
|
||||||
err = crypto_ahash_reqtfm(req)->final(req);
|
err = crypto_ahash_reqtfm(req)->final(req);
|
||||||
|
if (err == -EINPROGRESS ||
|
||||||
|
(err == -EBUSY && (ahash_request_flags(req) &
|
||||||
|
CRYPTO_TFM_REQ_MAY_BACKLOG)))
|
||||||
|
return err;
|
||||||
|
|
||||||
out:
|
out:
|
||||||
ahash_def_finup_finish2(req, err);
|
ahash_restore_req(req, err);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -401,7 +408,16 @@ static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
|
|||||||
{
|
{
|
||||||
struct ahash_request *areq = req->data;
|
struct ahash_request *areq = req->data;
|
||||||
|
|
||||||
|
if (err == -EINPROGRESS) {
|
||||||
|
ahash_notify_einprogress(areq);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||||
|
|
||||||
err = ahash_def_finup_finish1(areq, err);
|
err = ahash_def_finup_finish1(areq, err);
|
||||||
|
if (areq->priv)
|
||||||
|
return;
|
||||||
|
|
||||||
areq->base.complete(&areq->base, err);
|
areq->base.complete(&areq->base, err);
|
||||||
}
|
}
|
||||||
@ -416,6 +432,11 @@ static int ahash_def_finup(struct ahash_request *req)
|
|||||||
return err;
|
return err;
|
||||||
|
|
||||||
err = tfm->update(req);
|
err = tfm->update(req);
|
||||||
|
if (err == -EINPROGRESS ||
|
||||||
|
(err == -EBUSY && (ahash_request_flags(req) &
|
||||||
|
CRYPTO_TFM_REQ_MAY_BACKLOG)))
|
||||||
|
return err;
|
||||||
|
|
||||||
return ahash_def_finup_finish1(req, err);
|
return ahash_def_finup_finish1(req, err);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -40,6 +40,7 @@ struct aead_async_req {
|
|||||||
struct aead_async_rsgl first_rsgl;
|
struct aead_async_rsgl first_rsgl;
|
||||||
struct list_head list;
|
struct list_head list;
|
||||||
struct kiocb *iocb;
|
struct kiocb *iocb;
|
||||||
|
struct sock *sk;
|
||||||
unsigned int tsgls;
|
unsigned int tsgls;
|
||||||
char iv[];
|
char iv[];
|
||||||
};
|
};
|
||||||
@ -379,12 +380,10 @@ unlock:
|
|||||||
|
|
||||||
static void aead_async_cb(struct crypto_async_request *_req, int err)
|
static void aead_async_cb(struct crypto_async_request *_req, int err)
|
||||||
{
|
{
|
||||||
struct sock *sk = _req->data;
|
struct aead_request *req = _req->data;
|
||||||
struct alg_sock *ask = alg_sk(sk);
|
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
||||||
struct aead_ctx *ctx = ask->private;
|
|
||||||
struct crypto_aead *tfm = crypto_aead_reqtfm(&ctx->aead_req);
|
|
||||||
struct aead_request *req = aead_request_cast(_req);
|
|
||||||
struct aead_async_req *areq = GET_ASYM_REQ(req, tfm);
|
struct aead_async_req *areq = GET_ASYM_REQ(req, tfm);
|
||||||
|
struct sock *sk = areq->sk;
|
||||||
struct scatterlist *sg = areq->tsgl;
|
struct scatterlist *sg = areq->tsgl;
|
||||||
struct aead_async_rsgl *rsgl;
|
struct aead_async_rsgl *rsgl;
|
||||||
struct kiocb *iocb = areq->iocb;
|
struct kiocb *iocb = areq->iocb;
|
||||||
@ -447,11 +446,12 @@ static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg,
|
|||||||
memset(&areq->first_rsgl, '\0', sizeof(areq->first_rsgl));
|
memset(&areq->first_rsgl, '\0', sizeof(areq->first_rsgl));
|
||||||
INIT_LIST_HEAD(&areq->list);
|
INIT_LIST_HEAD(&areq->list);
|
||||||
areq->iocb = msg->msg_iocb;
|
areq->iocb = msg->msg_iocb;
|
||||||
|
areq->sk = sk;
|
||||||
memcpy(areq->iv, ctx->iv, crypto_aead_ivsize(tfm));
|
memcpy(areq->iv, ctx->iv, crypto_aead_ivsize(tfm));
|
||||||
aead_request_set_tfm(req, tfm);
|
aead_request_set_tfm(req, tfm);
|
||||||
aead_request_set_ad(req, ctx->aead_assoclen);
|
aead_request_set_ad(req, ctx->aead_assoclen);
|
||||||
aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
|
aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||||
aead_async_cb, sk);
|
aead_async_cb, req);
|
||||||
used -= ctx->aead_assoclen;
|
used -= ctx->aead_assoclen;
|
||||||
|
|
||||||
/* take over all tx sgls from ctx */
|
/* take over all tx sgls from ctx */
|
||||||
|
16
crypto/lrw.c
16
crypto/lrw.c
@ -345,6 +345,13 @@ static void encrypt_done(struct crypto_async_request *areq, int err)
|
|||||||
struct rctx *rctx;
|
struct rctx *rctx;
|
||||||
|
|
||||||
rctx = skcipher_request_ctx(req);
|
rctx = skcipher_request_ctx(req);
|
||||||
|
|
||||||
|
if (err == -EINPROGRESS) {
|
||||||
|
if (rctx->left != req->cryptlen)
|
||||||
|
return;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
subreq = &rctx->subreq;
|
subreq = &rctx->subreq;
|
||||||
subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
|
subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
|
||||||
|
|
||||||
@ -352,6 +359,7 @@ static void encrypt_done(struct crypto_async_request *areq, int err)
|
|||||||
if (rctx->left)
|
if (rctx->left)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
out:
|
||||||
skcipher_request_complete(req, err);
|
skcipher_request_complete(req, err);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -389,6 +397,13 @@ static void decrypt_done(struct crypto_async_request *areq, int err)
|
|||||||
struct rctx *rctx;
|
struct rctx *rctx;
|
||||||
|
|
||||||
rctx = skcipher_request_ctx(req);
|
rctx = skcipher_request_ctx(req);
|
||||||
|
|
||||||
|
if (err == -EINPROGRESS) {
|
||||||
|
if (rctx->left != req->cryptlen)
|
||||||
|
return;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
subreq = &rctx->subreq;
|
subreq = &rctx->subreq;
|
||||||
subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
|
subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
|
||||||
|
|
||||||
@ -396,6 +411,7 @@ static void decrypt_done(struct crypto_async_request *areq, int err)
|
|||||||
if (rctx->left)
|
if (rctx->left)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
out:
|
||||||
skcipher_request_complete(req, err);
|
skcipher_request_complete(req, err);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
16
crypto/xts.c
16
crypto/xts.c
@ -286,6 +286,13 @@ static void encrypt_done(struct crypto_async_request *areq, int err)
|
|||||||
struct rctx *rctx;
|
struct rctx *rctx;
|
||||||
|
|
||||||
rctx = skcipher_request_ctx(req);
|
rctx = skcipher_request_ctx(req);
|
||||||
|
|
||||||
|
if (err == -EINPROGRESS) {
|
||||||
|
if (rctx->left != req->cryptlen)
|
||||||
|
return;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
subreq = &rctx->subreq;
|
subreq = &rctx->subreq;
|
||||||
subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
|
subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
|
||||||
|
|
||||||
@ -293,6 +300,7 @@ static void encrypt_done(struct crypto_async_request *areq, int err)
|
|||||||
if (rctx->left)
|
if (rctx->left)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
out:
|
||||||
skcipher_request_complete(req, err);
|
skcipher_request_complete(req, err);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -330,6 +338,13 @@ static void decrypt_done(struct crypto_async_request *areq, int err)
|
|||||||
struct rctx *rctx;
|
struct rctx *rctx;
|
||||||
|
|
||||||
rctx = skcipher_request_ctx(req);
|
rctx = skcipher_request_ctx(req);
|
||||||
|
|
||||||
|
if (err == -EINPROGRESS) {
|
||||||
|
if (rctx->left != req->cryptlen)
|
||||||
|
return;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
subreq = &rctx->subreq;
|
subreq = &rctx->subreq;
|
||||||
subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
|
subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
|
||||||
|
|
||||||
@ -337,6 +352,7 @@ static void decrypt_done(struct crypto_async_request *areq, int err)
|
|||||||
if (rctx->left)
|
if (rctx->left)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
out:
|
||||||
skcipher_request_complete(req, err);
|
skcipher_request_complete(req, err);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1617,7 +1617,11 @@ static int cmp_map(const void *m0, const void *m1)
|
|||||||
const struct nfit_set_info_map *map0 = m0;
|
const struct nfit_set_info_map *map0 = m0;
|
||||||
const struct nfit_set_info_map *map1 = m1;
|
const struct nfit_set_info_map *map1 = m1;
|
||||||
|
|
||||||
return map0->region_offset - map1->region_offset;
|
if (map0->region_offset < map1->region_offset)
|
||||||
|
return -1;
|
||||||
|
else if (map0->region_offset > map1->region_offset)
|
||||||
|
return 1;
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Retrieve the nth entry referencing this spa */
|
/* Retrieve the nth entry referencing this spa */
|
||||||
|
@ -2,6 +2,7 @@ menuconfig DEV_DAX
|
|||||||
tristate "DAX: direct access to differentiated memory"
|
tristate "DAX: direct access to differentiated memory"
|
||||||
default m if NVDIMM_DAX
|
default m if NVDIMM_DAX
|
||||||
depends on TRANSPARENT_HUGEPAGE
|
depends on TRANSPARENT_HUGEPAGE
|
||||||
|
select SRCU
|
||||||
help
|
help
|
||||||
Support raw access to differentiated (persistence, bandwidth,
|
Support raw access to differentiated (persistence, bandwidth,
|
||||||
latency...) memory via an mmap(2) capable character
|
latency...) memory via an mmap(2) capable character
|
||||||
|
@ -25,6 +25,7 @@
|
|||||||
#include "dax.h"
|
#include "dax.h"
|
||||||
|
|
||||||
static dev_t dax_devt;
|
static dev_t dax_devt;
|
||||||
|
DEFINE_STATIC_SRCU(dax_srcu);
|
||||||
static struct class *dax_class;
|
static struct class *dax_class;
|
||||||
static DEFINE_IDA(dax_minor_ida);
|
static DEFINE_IDA(dax_minor_ida);
|
||||||
static int nr_dax = CONFIG_NR_DEV_DAX;
|
static int nr_dax = CONFIG_NR_DEV_DAX;
|
||||||
@ -60,7 +61,7 @@ struct dax_region {
|
|||||||
* @region - parent region
|
* @region - parent region
|
||||||
* @dev - device backing the character device
|
* @dev - device backing the character device
|
||||||
* @cdev - core chardev data
|
* @cdev - core chardev data
|
||||||
* @alive - !alive + rcu grace period == no new mappings can be established
|
* @alive - !alive + srcu grace period == no new mappings can be established
|
||||||
* @id - child id in the region
|
* @id - child id in the region
|
||||||
* @num_resources - number of physical address extents in this device
|
* @num_resources - number of physical address extents in this device
|
||||||
* @res - array of physical address ranges
|
* @res - array of physical address ranges
|
||||||
@ -569,7 +570,7 @@ static int __dax_dev_pud_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
|
|||||||
static int dax_dev_huge_fault(struct vm_fault *vmf,
|
static int dax_dev_huge_fault(struct vm_fault *vmf,
|
||||||
enum page_entry_size pe_size)
|
enum page_entry_size pe_size)
|
||||||
{
|
{
|
||||||
int rc;
|
int rc, id;
|
||||||
struct file *filp = vmf->vma->vm_file;
|
struct file *filp = vmf->vma->vm_file;
|
||||||
struct dax_dev *dax_dev = filp->private_data;
|
struct dax_dev *dax_dev = filp->private_data;
|
||||||
|
|
||||||
@ -578,7 +579,7 @@ static int dax_dev_huge_fault(struct vm_fault *vmf,
|
|||||||
? "write" : "read",
|
? "write" : "read",
|
||||||
vmf->vma->vm_start, vmf->vma->vm_end);
|
vmf->vma->vm_start, vmf->vma->vm_end);
|
||||||
|
|
||||||
rcu_read_lock();
|
id = srcu_read_lock(&dax_srcu);
|
||||||
switch (pe_size) {
|
switch (pe_size) {
|
||||||
case PE_SIZE_PTE:
|
case PE_SIZE_PTE:
|
||||||
rc = __dax_dev_pte_fault(dax_dev, vmf);
|
rc = __dax_dev_pte_fault(dax_dev, vmf);
|
||||||
@ -592,7 +593,7 @@ static int dax_dev_huge_fault(struct vm_fault *vmf,
|
|||||||
default:
|
default:
|
||||||
return VM_FAULT_FALLBACK;
|
return VM_FAULT_FALLBACK;
|
||||||
}
|
}
|
||||||
rcu_read_unlock();
|
srcu_read_unlock(&dax_srcu, id);
|
||||||
|
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
@ -713,11 +714,11 @@ static void unregister_dax_dev(void *dev)
|
|||||||
* Note, rcu is not protecting the liveness of dax_dev, rcu is
|
* Note, rcu is not protecting the liveness of dax_dev, rcu is
|
||||||
* ensuring that any fault handlers that might have seen
|
* ensuring that any fault handlers that might have seen
|
||||||
* dax_dev->alive == true, have completed. Any fault handlers
|
* dax_dev->alive == true, have completed. Any fault handlers
|
||||||
* that start after synchronize_rcu() has started will abort
|
* that start after synchronize_srcu() has started will abort
|
||||||
* upon seeing dax_dev->alive == false.
|
* upon seeing dax_dev->alive == false.
|
||||||
*/
|
*/
|
||||||
dax_dev->alive = false;
|
dax_dev->alive = false;
|
||||||
synchronize_rcu();
|
synchronize_srcu(&dax_srcu);
|
||||||
unmap_mapping_range(dax_dev->inode->i_mapping, 0, 0, 1);
|
unmap_mapping_range(dax_dev->inode->i_mapping, 0, 0, 1);
|
||||||
cdev_del(cdev);
|
cdev_del(cdev);
|
||||||
device_unregister(dev);
|
device_unregister(dev);
|
||||||
|
@ -134,6 +134,7 @@ static void set_max_bgx_per_node(struct pci_dev *pdev)
|
|||||||
pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &sdevid);
|
pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &sdevid);
|
||||||
switch (sdevid) {
|
switch (sdevid) {
|
||||||
case PCI_SUBSYS_DEVID_81XX_BGX:
|
case PCI_SUBSYS_DEVID_81XX_BGX:
|
||||||
|
case PCI_SUBSYS_DEVID_81XX_RGX:
|
||||||
max_bgx_per_node = MAX_BGX_PER_CN81XX;
|
max_bgx_per_node = MAX_BGX_PER_CN81XX;
|
||||||
break;
|
break;
|
||||||
case PCI_SUBSYS_DEVID_83XX_BGX:
|
case PCI_SUBSYS_DEVID_83XX_BGX:
|
||||||
|
@ -16,6 +16,7 @@
|
|||||||
/* Subsystem device IDs */
|
/* Subsystem device IDs */
|
||||||
#define PCI_SUBSYS_DEVID_88XX_BGX 0xA126
|
#define PCI_SUBSYS_DEVID_88XX_BGX 0xA126
|
||||||
#define PCI_SUBSYS_DEVID_81XX_BGX 0xA226
|
#define PCI_SUBSYS_DEVID_81XX_BGX 0xA226
|
||||||
|
#define PCI_SUBSYS_DEVID_81XX_RGX 0xA254
|
||||||
#define PCI_SUBSYS_DEVID_83XX_BGX 0xA326
|
#define PCI_SUBSYS_DEVID_83XX_BGX 0xA326
|
||||||
|
|
||||||
#define MAX_BGX_THUNDER 8 /* Max 2 nodes, 4 per node */
|
#define MAX_BGX_THUNDER 8 /* Max 2 nodes, 4 per node */
|
||||||
|
@ -613,7 +613,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
|
|||||||
struct mtk_mac *mac = netdev_priv(dev);
|
struct mtk_mac *mac = netdev_priv(dev);
|
||||||
struct mtk_eth *eth = mac->hw;
|
struct mtk_eth *eth = mac->hw;
|
||||||
struct mtk_tx_dma *itxd, *txd;
|
struct mtk_tx_dma *itxd, *txd;
|
||||||
struct mtk_tx_buf *tx_buf;
|
struct mtk_tx_buf *itx_buf, *tx_buf;
|
||||||
dma_addr_t mapped_addr;
|
dma_addr_t mapped_addr;
|
||||||
unsigned int nr_frags;
|
unsigned int nr_frags;
|
||||||
int i, n_desc = 1;
|
int i, n_desc = 1;
|
||||||
@ -627,8 +627,8 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
|
|||||||
fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT;
|
fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT;
|
||||||
txd4 |= fport;
|
txd4 |= fport;
|
||||||
|
|
||||||
tx_buf = mtk_desc_to_tx_buf(ring, itxd);
|
itx_buf = mtk_desc_to_tx_buf(ring, itxd);
|
||||||
memset(tx_buf, 0, sizeof(*tx_buf));
|
memset(itx_buf, 0, sizeof(*itx_buf));
|
||||||
|
|
||||||
if (gso)
|
if (gso)
|
||||||
txd4 |= TX_DMA_TSO;
|
txd4 |= TX_DMA_TSO;
|
||||||
@ -647,9 +647,11 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
WRITE_ONCE(itxd->txd1, mapped_addr);
|
WRITE_ONCE(itxd->txd1, mapped_addr);
|
||||||
tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
|
itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
|
||||||
dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
|
itx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
|
||||||
dma_unmap_len_set(tx_buf, dma_len0, skb_headlen(skb));
|
MTK_TX_FLAGS_FPORT1;
|
||||||
|
dma_unmap_addr_set(itx_buf, dma_addr0, mapped_addr);
|
||||||
|
dma_unmap_len_set(itx_buf, dma_len0, skb_headlen(skb));
|
||||||
|
|
||||||
/* TX SG offload */
|
/* TX SG offload */
|
||||||
txd = itxd;
|
txd = itxd;
|
||||||
@ -685,11 +687,13 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
|
|||||||
last_frag * TX_DMA_LS0));
|
last_frag * TX_DMA_LS0));
|
||||||
WRITE_ONCE(txd->txd4, fport);
|
WRITE_ONCE(txd->txd4, fport);
|
||||||
|
|
||||||
tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
|
|
||||||
tx_buf = mtk_desc_to_tx_buf(ring, txd);
|
tx_buf = mtk_desc_to_tx_buf(ring, txd);
|
||||||
memset(tx_buf, 0, sizeof(*tx_buf));
|
memset(tx_buf, 0, sizeof(*tx_buf));
|
||||||
|
tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
|
||||||
tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
|
tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
|
||||||
|
tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
|
||||||
|
MTK_TX_FLAGS_FPORT1;
|
||||||
|
|
||||||
dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
|
dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
|
||||||
dma_unmap_len_set(tx_buf, dma_len0, frag_map_size);
|
dma_unmap_len_set(tx_buf, dma_len0, frag_map_size);
|
||||||
frag_size -= frag_map_size;
|
frag_size -= frag_map_size;
|
||||||
@ -698,7 +702,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* store skb to cleanup */
|
/* store skb to cleanup */
|
||||||
tx_buf->skb = skb;
|
itx_buf->skb = skb;
|
||||||
|
|
||||||
WRITE_ONCE(itxd->txd4, txd4);
|
WRITE_ONCE(itxd->txd4, txd4);
|
||||||
WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
|
WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
|
||||||
@ -1012,17 +1016,16 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget)
|
|||||||
|
|
||||||
while ((cpu != dma) && budget) {
|
while ((cpu != dma) && budget) {
|
||||||
u32 next_cpu = desc->txd2;
|
u32 next_cpu = desc->txd2;
|
||||||
int mac;
|
int mac = 0;
|
||||||
|
|
||||||
desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
|
desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
|
||||||
if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
|
if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
mac = (desc->txd4 >> TX_DMA_FPORT_SHIFT) &
|
|
||||||
TX_DMA_FPORT_MASK;
|
|
||||||
mac--;
|
|
||||||
|
|
||||||
tx_buf = mtk_desc_to_tx_buf(ring, desc);
|
tx_buf = mtk_desc_to_tx_buf(ring, desc);
|
||||||
|
if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
|
||||||
|
mac = 1;
|
||||||
|
|
||||||
skb = tx_buf->skb;
|
skb = tx_buf->skb;
|
||||||
if (!skb) {
|
if (!skb) {
|
||||||
condition = 1;
|
condition = 1;
|
||||||
|
@ -410,12 +410,18 @@ struct mtk_hw_stats {
|
|||||||
struct u64_stats_sync syncp;
|
struct u64_stats_sync syncp;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* PDMA descriptor can point at 1-2 segments. This enum allows us to track how
|
|
||||||
* memory was allocated so that it can be freed properly
|
|
||||||
*/
|
|
||||||
enum mtk_tx_flags {
|
enum mtk_tx_flags {
|
||||||
|
/* PDMA descriptor can point at 1-2 segments. This enum allows us to
|
||||||
|
* track how memory was allocated so that it can be freed properly.
|
||||||
|
*/
|
||||||
MTK_TX_FLAGS_SINGLE0 = 0x01,
|
MTK_TX_FLAGS_SINGLE0 = 0x01,
|
||||||
MTK_TX_FLAGS_PAGE0 = 0x02,
|
MTK_TX_FLAGS_PAGE0 = 0x02,
|
||||||
|
|
||||||
|
/* MTK_TX_FLAGS_FPORTx allows tracking which port the transmitted
|
||||||
|
* SKB out instead of looking up through hardware TX descriptor.
|
||||||
|
*/
|
||||||
|
MTK_TX_FLAGS_FPORT0 = 0x04,
|
||||||
|
MTK_TX_FLAGS_FPORT1 = 0x08,
|
||||||
};
|
};
|
||||||
|
|
||||||
/* This enum allows us to identify how the clock is defined on the array of the
|
/* This enum allows us to identify how the clock is defined on the array of the
|
||||||
|
@ -1127,12 +1127,70 @@ static struct mdiobb_ops bb_ops = {
|
|||||||
.get_mdio_data = sh_get_mdio,
|
.get_mdio_data = sh_get_mdio,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* free Tx skb function */
|
||||||
|
static int sh_eth_tx_free(struct net_device *ndev, bool sent_only)
|
||||||
|
{
|
||||||
|
struct sh_eth_private *mdp = netdev_priv(ndev);
|
||||||
|
struct sh_eth_txdesc *txdesc;
|
||||||
|
int free_num = 0;
|
||||||
|
int entry;
|
||||||
|
bool sent;
|
||||||
|
|
||||||
|
for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
|
||||||
|
entry = mdp->dirty_tx % mdp->num_tx_ring;
|
||||||
|
txdesc = &mdp->tx_ring[entry];
|
||||||
|
sent = !(txdesc->status & cpu_to_le32(TD_TACT));
|
||||||
|
if (sent_only && !sent)
|
||||||
|
break;
|
||||||
|
/* TACT bit must be checked before all the following reads */
|
||||||
|
dma_rmb();
|
||||||
|
netif_info(mdp, tx_done, ndev,
|
||||||
|
"tx entry %d status 0x%08x\n",
|
||||||
|
entry, le32_to_cpu(txdesc->status));
|
||||||
|
/* Free the original skb. */
|
||||||
|
if (mdp->tx_skbuff[entry]) {
|
||||||
|
dma_unmap_single(&ndev->dev, le32_to_cpu(txdesc->addr),
|
||||||
|
le32_to_cpu(txdesc->len) >> 16,
|
||||||
|
DMA_TO_DEVICE);
|
||||||
|
dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
|
||||||
|
mdp->tx_skbuff[entry] = NULL;
|
||||||
|
free_num++;
|
||||||
|
}
|
||||||
|
txdesc->status = cpu_to_le32(TD_TFP);
|
||||||
|
if (entry >= mdp->num_tx_ring - 1)
|
||||||
|
txdesc->status |= cpu_to_le32(TD_TDLE);
|
||||||
|
|
||||||
|
if (sent) {
|
||||||
|
ndev->stats.tx_packets++;
|
||||||
|
ndev->stats.tx_bytes += le32_to_cpu(txdesc->len) >> 16;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return free_num;
|
||||||
|
}
|
||||||
|
|
||||||
/* free skb and descriptor buffer */
|
/* free skb and descriptor buffer */
|
||||||
static void sh_eth_ring_free(struct net_device *ndev)
|
static void sh_eth_ring_free(struct net_device *ndev)
|
||||||
{
|
{
|
||||||
struct sh_eth_private *mdp = netdev_priv(ndev);
|
struct sh_eth_private *mdp = netdev_priv(ndev);
|
||||||
int ringsize, i;
|
int ringsize, i;
|
||||||
|
|
||||||
|
if (mdp->rx_ring) {
|
||||||
|
for (i = 0; i < mdp->num_rx_ring; i++) {
|
||||||
|
if (mdp->rx_skbuff[i]) {
|
||||||
|
struct sh_eth_rxdesc *rxdesc = &mdp->rx_ring[i];
|
||||||
|
|
||||||
|
dma_unmap_single(&ndev->dev,
|
||||||
|
le32_to_cpu(rxdesc->addr),
|
||||||
|
ALIGN(mdp->rx_buf_sz, 32),
|
||||||
|
DMA_FROM_DEVICE);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
|
||||||
|
dma_free_coherent(NULL, ringsize, mdp->rx_ring,
|
||||||
|
mdp->rx_desc_dma);
|
||||||
|
mdp->rx_ring = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
/* Free Rx skb ringbuffer */
|
/* Free Rx skb ringbuffer */
|
||||||
if (mdp->rx_skbuff) {
|
if (mdp->rx_skbuff) {
|
||||||
for (i = 0; i < mdp->num_rx_ring; i++)
|
for (i = 0; i < mdp->num_rx_ring; i++)
|
||||||
@ -1141,27 +1199,18 @@ static void sh_eth_ring_free(struct net_device *ndev)
|
|||||||
kfree(mdp->rx_skbuff);
|
kfree(mdp->rx_skbuff);
|
||||||
mdp->rx_skbuff = NULL;
|
mdp->rx_skbuff = NULL;
|
||||||
|
|
||||||
/* Free Tx skb ringbuffer */
|
|
||||||
if (mdp->tx_skbuff) {
|
|
||||||
for (i = 0; i < mdp->num_tx_ring; i++)
|
|
||||||
dev_kfree_skb(mdp->tx_skbuff[i]);
|
|
||||||
}
|
|
||||||
kfree(mdp->tx_skbuff);
|
|
||||||
mdp->tx_skbuff = NULL;
|
|
||||||
|
|
||||||
if (mdp->rx_ring) {
|
|
||||||
ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
|
|
||||||
dma_free_coherent(NULL, ringsize, mdp->rx_ring,
|
|
||||||
mdp->rx_desc_dma);
|
|
||||||
mdp->rx_ring = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (mdp->tx_ring) {
|
if (mdp->tx_ring) {
|
||||||
|
sh_eth_tx_free(ndev, false);
|
||||||
|
|
||||||
ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
|
ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
|
||||||
dma_free_coherent(NULL, ringsize, mdp->tx_ring,
|
dma_free_coherent(NULL, ringsize, mdp->tx_ring,
|
||||||
mdp->tx_desc_dma);
|
mdp->tx_desc_dma);
|
||||||
mdp->tx_ring = NULL;
|
mdp->tx_ring = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Free Tx skb ringbuffer */
|
||||||
|
kfree(mdp->tx_skbuff);
|
||||||
|
mdp->tx_skbuff = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* format skb and descriptor buffer */
|
/* format skb and descriptor buffer */
|
||||||
@ -1409,43 +1458,6 @@ static void sh_eth_dev_exit(struct net_device *ndev)
|
|||||||
update_mac_address(ndev);
|
update_mac_address(ndev);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* free Tx skb function */
|
|
||||||
static int sh_eth_txfree(struct net_device *ndev)
|
|
||||||
{
|
|
||||||
struct sh_eth_private *mdp = netdev_priv(ndev);
|
|
||||||
struct sh_eth_txdesc *txdesc;
|
|
||||||
int free_num = 0;
|
|
||||||
int entry;
|
|
||||||
|
|
||||||
for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
|
|
||||||
entry = mdp->dirty_tx % mdp->num_tx_ring;
|
|
||||||
txdesc = &mdp->tx_ring[entry];
|
|
||||||
if (txdesc->status & cpu_to_le32(TD_TACT))
|
|
||||||
break;
|
|
||||||
/* TACT bit must be checked before all the following reads */
|
|
||||||
dma_rmb();
|
|
||||||
netif_info(mdp, tx_done, ndev,
|
|
||||||
"tx entry %d status 0x%08x\n",
|
|
||||||
entry, le32_to_cpu(txdesc->status));
|
|
||||||
/* Free the original skb. */
|
|
||||||
if (mdp->tx_skbuff[entry]) {
|
|
||||||
dma_unmap_single(&ndev->dev, le32_to_cpu(txdesc->addr),
|
|
||||||
le32_to_cpu(txdesc->len) >> 16,
|
|
||||||
DMA_TO_DEVICE);
|
|
||||||
dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
|
|
||||||
mdp->tx_skbuff[entry] = NULL;
|
|
||||||
free_num++;
|
|
||||||
}
|
|
||||||
txdesc->status = cpu_to_le32(TD_TFP);
|
|
||||||
if (entry >= mdp->num_tx_ring - 1)
|
|
||||||
txdesc->status |= cpu_to_le32(TD_TDLE);
|
|
||||||
|
|
||||||
ndev->stats.tx_packets++;
|
|
||||||
ndev->stats.tx_bytes += le32_to_cpu(txdesc->len) >> 16;
|
|
||||||
}
|
|
||||||
return free_num;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Packet receive function */
|
/* Packet receive function */
|
||||||
static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
|
static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
|
||||||
{
|
{
|
||||||
@ -1690,7 +1702,7 @@ static void sh_eth_error(struct net_device *ndev, u32 intr_status)
|
|||||||
intr_status, mdp->cur_tx, mdp->dirty_tx,
|
intr_status, mdp->cur_tx, mdp->dirty_tx,
|
||||||
(u32)ndev->state, edtrr);
|
(u32)ndev->state, edtrr);
|
||||||
/* dirty buffer free */
|
/* dirty buffer free */
|
||||||
sh_eth_txfree(ndev);
|
sh_eth_tx_free(ndev, true);
|
||||||
|
|
||||||
/* SH7712 BUG */
|
/* SH7712 BUG */
|
||||||
if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) {
|
if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) {
|
||||||
@ -1751,7 +1763,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
|
|||||||
/* Clear Tx interrupts */
|
/* Clear Tx interrupts */
|
||||||
sh_eth_write(ndev, intr_status & cd->tx_check, EESR);
|
sh_eth_write(ndev, intr_status & cd->tx_check, EESR);
|
||||||
|
|
||||||
sh_eth_txfree(ndev);
|
sh_eth_tx_free(ndev, true);
|
||||||
netif_wake_queue(ndev);
|
netif_wake_queue(ndev);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2412,7 +2424,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
|||||||
|
|
||||||
spin_lock_irqsave(&mdp->lock, flags);
|
spin_lock_irqsave(&mdp->lock, flags);
|
||||||
if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) {
|
if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) {
|
||||||
if (!sh_eth_txfree(ndev)) {
|
if (!sh_eth_tx_free(ndev, true)) {
|
||||||
netif_warn(mdp, tx_queued, ndev, "TxFD exhausted.\n");
|
netif_warn(mdp, tx_queued, ndev, "TxFD exhausted.\n");
|
||||||
netif_stop_queue(ndev);
|
netif_stop_queue(ndev);
|
||||||
spin_unlock_irqrestore(&mdp->lock, flags);
|
spin_unlock_irqrestore(&mdp->lock, flags);
|
||||||
|
@ -1371,6 +1371,13 @@ static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
|
|||||||
free_cpumask_var(thread_mask);
|
free_cpumask_var(thread_mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (count > EFX_MAX_RX_QUEUES) {
|
||||||
|
netif_cond_dbg(efx, probe, efx->net_dev, !rss_cpus, warn,
|
||||||
|
"Reducing number of rx queues from %u to %u.\n",
|
||||||
|
count, EFX_MAX_RX_QUEUES);
|
||||||
|
count = EFX_MAX_RX_QUEUES;
|
||||||
|
}
|
||||||
|
|
||||||
/* If RSS is requested for the PF *and* VFs then we can't write RSS
|
/* If RSS is requested for the PF *and* VFs then we can't write RSS
|
||||||
* table entries that are inaccessible to VFs
|
* table entries that are inaccessible to VFs
|
||||||
*/
|
*/
|
||||||
|
@ -1354,6 +1354,13 @@ static unsigned int ef4_wanted_parallelism(struct ef4_nic *efx)
|
|||||||
free_cpumask_var(thread_mask);
|
free_cpumask_var(thread_mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (count > EF4_MAX_RX_QUEUES) {
|
||||||
|
netif_cond_dbg(efx, probe, efx->net_dev, !rss_cpus, warn,
|
||||||
|
"Reducing number of rx queues from %u to %u.\n",
|
||||||
|
count, EF4_MAX_RX_QUEUES);
|
||||||
|
count = EF4_MAX_RX_QUEUES;
|
||||||
|
}
|
||||||
|
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -797,9 +797,6 @@ static struct phy_driver ksphy_driver[] = {
|
|||||||
.read_status = genphy_read_status,
|
.read_status = genphy_read_status,
|
||||||
.ack_interrupt = kszphy_ack_interrupt,
|
.ack_interrupt = kszphy_ack_interrupt,
|
||||||
.config_intr = kszphy_config_intr,
|
.config_intr = kszphy_config_intr,
|
||||||
.get_sset_count = kszphy_get_sset_count,
|
|
||||||
.get_strings = kszphy_get_strings,
|
|
||||||
.get_stats = kszphy_get_stats,
|
|
||||||
.suspend = genphy_suspend,
|
.suspend = genphy_suspend,
|
||||||
.resume = genphy_resume,
|
.resume = genphy_resume,
|
||||||
}, {
|
}, {
|
||||||
@ -939,9 +936,6 @@ static struct phy_driver ksphy_driver[] = {
|
|||||||
.read_status = genphy_read_status,
|
.read_status = genphy_read_status,
|
||||||
.ack_interrupt = kszphy_ack_interrupt,
|
.ack_interrupt = kszphy_ack_interrupt,
|
||||||
.config_intr = kszphy_config_intr,
|
.config_intr = kszphy_config_intr,
|
||||||
.get_sset_count = kszphy_get_sset_count,
|
|
||||||
.get_strings = kszphy_get_strings,
|
|
||||||
.get_stats = kszphy_get_stats,
|
|
||||||
.suspend = genphy_suspend,
|
.suspend = genphy_suspend,
|
||||||
.resume = genphy_resume,
|
.resume = genphy_resume,
|
||||||
}, {
|
}, {
|
||||||
@ -951,6 +945,7 @@ static struct phy_driver ksphy_driver[] = {
|
|||||||
.features = PHY_GBIT_FEATURES,
|
.features = PHY_GBIT_FEATURES,
|
||||||
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
|
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
|
||||||
.driver_data = &ksz9021_type,
|
.driver_data = &ksz9021_type,
|
||||||
|
.probe = kszphy_probe,
|
||||||
.config_init = ksz9021_config_init,
|
.config_init = ksz9021_config_init,
|
||||||
.config_aneg = genphy_config_aneg,
|
.config_aneg = genphy_config_aneg,
|
||||||
.read_status = genphy_read_status,
|
.read_status = genphy_read_status,
|
||||||
@ -970,6 +965,7 @@ static struct phy_driver ksphy_driver[] = {
|
|||||||
.features = PHY_GBIT_FEATURES,
|
.features = PHY_GBIT_FEATURES,
|
||||||
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
|
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
|
||||||
.driver_data = &ksz9021_type,
|
.driver_data = &ksz9021_type,
|
||||||
|
.probe = kszphy_probe,
|
||||||
.config_init = ksz9031_config_init,
|
.config_init = ksz9031_config_init,
|
||||||
.config_aneg = genphy_config_aneg,
|
.config_aneg = genphy_config_aneg,
|
||||||
.read_status = ksz9031_read_status,
|
.read_status = ksz9031_read_status,
|
||||||
@ -988,9 +984,6 @@ static struct phy_driver ksphy_driver[] = {
|
|||||||
.config_init = kszphy_config_init,
|
.config_init = kszphy_config_init,
|
||||||
.config_aneg = ksz8873mll_config_aneg,
|
.config_aneg = ksz8873mll_config_aneg,
|
||||||
.read_status = ksz8873mll_read_status,
|
.read_status = ksz8873mll_read_status,
|
||||||
.get_sset_count = kszphy_get_sset_count,
|
|
||||||
.get_strings = kszphy_get_strings,
|
|
||||||
.get_stats = kszphy_get_stats,
|
|
||||||
.suspend = genphy_suspend,
|
.suspend = genphy_suspend,
|
||||||
.resume = genphy_resume,
|
.resume = genphy_resume,
|
||||||
}, {
|
}, {
|
||||||
@ -1002,9 +995,6 @@ static struct phy_driver ksphy_driver[] = {
|
|||||||
.config_init = kszphy_config_init,
|
.config_init = kszphy_config_init,
|
||||||
.config_aneg = genphy_config_aneg,
|
.config_aneg = genphy_config_aneg,
|
||||||
.read_status = genphy_read_status,
|
.read_status = genphy_read_status,
|
||||||
.get_sset_count = kszphy_get_sset_count,
|
|
||||||
.get_strings = kszphy_get_strings,
|
|
||||||
.get_stats = kszphy_get_stats,
|
|
||||||
.suspend = genphy_suspend,
|
.suspend = genphy_suspend,
|
||||||
.resume = genphy_resume,
|
.resume = genphy_resume,
|
||||||
}, {
|
}, {
|
||||||
@ -1016,9 +1006,6 @@ static struct phy_driver ksphy_driver[] = {
|
|||||||
.config_init = kszphy_config_init,
|
.config_init = kszphy_config_init,
|
||||||
.config_aneg = ksz8873mll_config_aneg,
|
.config_aneg = ksz8873mll_config_aneg,
|
||||||
.read_status = ksz8873mll_read_status,
|
.read_status = ksz8873mll_read_status,
|
||||||
.get_sset_count = kszphy_get_sset_count,
|
|
||||||
.get_strings = kszphy_get_strings,
|
|
||||||
.get_stats = kszphy_get_stats,
|
|
||||||
.suspend = genphy_suspend,
|
.suspend = genphy_suspend,
|
||||||
.resume = genphy_resume,
|
.resume = genphy_resume,
|
||||||
} };
|
} };
|
||||||
|
@ -1264,7 +1264,7 @@ static int vrf_fib_rule(const struct net_device *dev, __u8 family, bool add_it)
|
|||||||
goto nla_put_failure;
|
goto nla_put_failure;
|
||||||
|
|
||||||
/* rule only needs to appear once */
|
/* rule only needs to appear once */
|
||||||
nlh->nlmsg_flags &= NLM_F_EXCL;
|
nlh->nlmsg_flags |= NLM_F_EXCL;
|
||||||
|
|
||||||
frh = nlmsg_data(nlh);
|
frh = nlmsg_data(nlh);
|
||||||
memset(frh, 0, sizeof(*frh));
|
memset(frh, 0, sizeof(*frh));
|
||||||
|
@ -934,8 +934,14 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
|
|||||||
rc = nd_desc->ndctl(nd_desc, nvdimm, cmd, buf, buf_len, NULL);
|
rc = nd_desc->ndctl(nd_desc, nvdimm, cmd, buf, buf_len, NULL);
|
||||||
if (rc < 0)
|
if (rc < 0)
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
nvdimm_bus_unlock(&nvdimm_bus->dev);
|
||||||
|
|
||||||
if (copy_to_user(p, buf, buf_len))
|
if (copy_to_user(p, buf, buf_len))
|
||||||
rc = -EFAULT;
|
rc = -EFAULT;
|
||||||
|
|
||||||
|
vfree(buf);
|
||||||
|
return rc;
|
||||||
|
|
||||||
out_unlock:
|
out_unlock:
|
||||||
nvdimm_bus_unlock(&nvdimm_bus->dev);
|
nvdimm_bus_unlock(&nvdimm_bus->dev);
|
||||||
out:
|
out:
|
||||||
|
@ -243,7 +243,15 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align))) {
|
if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align))) {
|
||||||
if (IS_ALIGNED(offset, 512) && IS_ALIGNED(size, 512)) {
|
/*
|
||||||
|
* FIXME: nsio_rw_bytes() may be called from atomic
|
||||||
|
* context in the btt case and nvdimm_clear_poison()
|
||||||
|
* takes a sleeping lock. Until the locking can be
|
||||||
|
* reworked this capability requires that the namespace
|
||||||
|
* is not claimed by btt.
|
||||||
|
*/
|
||||||
|
if (IS_ALIGNED(offset, 512) && IS_ALIGNED(size, 512)
|
||||||
|
&& (!ndns->claim || !is_nd_btt(ndns->claim))) {
|
||||||
long cleared;
|
long cleared;
|
||||||
|
|
||||||
cleared = nvdimm_clear_poison(&ndns->dev, offset, size);
|
cleared = nvdimm_clear_poison(&ndns->dev, offset, size);
|
||||||
|
@ -395,7 +395,7 @@ EXPORT_SYMBOL_GPL(nvdimm_create);
|
|||||||
|
|
||||||
int alias_dpa_busy(struct device *dev, void *data)
|
int alias_dpa_busy(struct device *dev, void *data)
|
||||||
{
|
{
|
||||||
resource_size_t map_end, blk_start, new, busy;
|
resource_size_t map_end, blk_start, new;
|
||||||
struct blk_alloc_info *info = data;
|
struct blk_alloc_info *info = data;
|
||||||
struct nd_mapping *nd_mapping;
|
struct nd_mapping *nd_mapping;
|
||||||
struct nd_region *nd_region;
|
struct nd_region *nd_region;
|
||||||
@ -436,29 +436,19 @@ int alias_dpa_busy(struct device *dev, void *data)
|
|||||||
retry:
|
retry:
|
||||||
/*
|
/*
|
||||||
* Find the free dpa from the end of the last pmem allocation to
|
* Find the free dpa from the end of the last pmem allocation to
|
||||||
* the end of the interleave-set mapping that is not already
|
* the end of the interleave-set mapping.
|
||||||
* covered by a blk allocation.
|
|
||||||
*/
|
*/
|
||||||
busy = 0;
|
|
||||||
for_each_dpa_resource(ndd, res) {
|
for_each_dpa_resource(ndd, res) {
|
||||||
|
if (strncmp(res->name, "pmem", 4) != 0)
|
||||||
|
continue;
|
||||||
if ((res->start >= blk_start && res->start < map_end)
|
if ((res->start >= blk_start && res->start < map_end)
|
||||||
|| (res->end >= blk_start
|
|| (res->end >= blk_start
|
||||||
&& res->end <= map_end)) {
|
&& res->end <= map_end)) {
|
||||||
if (strncmp(res->name, "pmem", 4) == 0) {
|
new = max(blk_start, min(map_end + 1, res->end + 1));
|
||||||
new = max(blk_start, min(map_end + 1,
|
if (new != blk_start) {
|
||||||
res->end + 1));
|
blk_start = new;
|
||||||
if (new != blk_start) {
|
goto retry;
|
||||||
blk_start = new;
|
}
|
||||||
goto retry;
|
|
||||||
}
|
|
||||||
} else
|
|
||||||
busy += min(map_end, res->end)
|
|
||||||
- max(nd_mapping->start, res->start) + 1;
|
|
||||||
} else if (nd_mapping->start > res->start
|
|
||||||
&& map_end < res->end) {
|
|
||||||
/* total eclipse of the PMEM region mapping */
|
|
||||||
busy += nd_mapping->size;
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -470,52 +460,11 @@ int alias_dpa_busy(struct device *dev, void *data)
|
|||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
info->available -= blk_start - nd_mapping->start + busy;
|
info->available -= blk_start - nd_mapping->start;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int blk_dpa_busy(struct device *dev, void *data)
|
|
||||||
{
|
|
||||||
struct blk_alloc_info *info = data;
|
|
||||||
struct nd_mapping *nd_mapping;
|
|
||||||
struct nd_region *nd_region;
|
|
||||||
resource_size_t map_end;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
if (!is_nd_pmem(dev))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
nd_region = to_nd_region(dev);
|
|
||||||
for (i = 0; i < nd_region->ndr_mappings; i++) {
|
|
||||||
nd_mapping = &nd_region->mapping[i];
|
|
||||||
if (nd_mapping->nvdimm == info->nd_mapping->nvdimm)
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (i >= nd_region->ndr_mappings)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
map_end = nd_mapping->start + nd_mapping->size - 1;
|
|
||||||
if (info->res->start >= nd_mapping->start
|
|
||||||
&& info->res->start < map_end) {
|
|
||||||
if (info->res->end <= map_end) {
|
|
||||||
info->busy = 0;
|
|
||||||
return 1;
|
|
||||||
} else {
|
|
||||||
info->busy -= info->res->end - map_end;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
} else if (info->res->end >= nd_mapping->start
|
|
||||||
&& info->res->end <= map_end) {
|
|
||||||
info->busy -= nd_mapping->start - info->res->start;
|
|
||||||
return 0;
|
|
||||||
} else {
|
|
||||||
info->busy -= nd_mapping->size;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* nd_blk_available_dpa - account the unused dpa of BLK region
|
* nd_blk_available_dpa - account the unused dpa of BLK region
|
||||||
* @nd_mapping: container of dpa-resource-root + labels
|
* @nd_mapping: container of dpa-resource-root + labels
|
||||||
@ -545,11 +494,7 @@ resource_size_t nd_blk_available_dpa(struct nd_region *nd_region)
|
|||||||
for_each_dpa_resource(ndd, res) {
|
for_each_dpa_resource(ndd, res) {
|
||||||
if (strncmp(res->name, "blk", 3) != 0)
|
if (strncmp(res->name, "blk", 3) != 0)
|
||||||
continue;
|
continue;
|
||||||
|
info.available -= resource_size(res);
|
||||||
info.res = res;
|
|
||||||
info.busy = resource_size(res);
|
|
||||||
device_for_each_child(&nvdimm_bus->dev, &info, blk_dpa_busy);
|
|
||||||
info.available -= info.busy;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return info.available;
|
return info.available;
|
||||||
|
@ -2023,7 +2023,7 @@ nvme_fc_configure_admin_queue(struct nvme_fc_ctrl *ctrl)
|
|||||||
}
|
}
|
||||||
|
|
||||||
ctrl->ctrl.sqsize =
|
ctrl->ctrl.sqsize =
|
||||||
min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize);
|
min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->ctrl.sqsize);
|
||||||
|
|
||||||
error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
|
error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
|
||||||
if (error)
|
if (error)
|
||||||
|
@ -1606,7 +1606,7 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl)
|
|||||||
}
|
}
|
||||||
|
|
||||||
ctrl->ctrl.sqsize =
|
ctrl->ctrl.sqsize =
|
||||||
min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize);
|
min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->ctrl.sqsize);
|
||||||
|
|
||||||
error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
|
error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
|
||||||
if (error)
|
if (error)
|
||||||
|
@ -392,7 +392,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
|
|||||||
}
|
}
|
||||||
|
|
||||||
ctrl->ctrl.sqsize =
|
ctrl->ctrl.sqsize =
|
||||||
min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize);
|
min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->ctrl.sqsize);
|
||||||
|
|
||||||
error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
|
error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
|
||||||
if (error)
|
if (error)
|
||||||
|
@ -275,7 +275,7 @@ int reset_control_status(struct reset_control *rstc)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(reset_control_status);
|
EXPORT_SYMBOL_GPL(reset_control_status);
|
||||||
|
|
||||||
static struct reset_control *__reset_control_get(
|
static struct reset_control *__reset_control_get_internal(
|
||||||
struct reset_controller_dev *rcdev,
|
struct reset_controller_dev *rcdev,
|
||||||
unsigned int index, bool shared)
|
unsigned int index, bool shared)
|
||||||
{
|
{
|
||||||
@ -308,7 +308,7 @@ static struct reset_control *__reset_control_get(
|
|||||||
return rstc;
|
return rstc;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __reset_control_put(struct reset_control *rstc)
|
static void __reset_control_put_internal(struct reset_control *rstc)
|
||||||
{
|
{
|
||||||
lockdep_assert_held(&reset_list_mutex);
|
lockdep_assert_held(&reset_list_mutex);
|
||||||
|
|
||||||
@ -377,7 +377,7 @@ struct reset_control *__of_reset_control_get(struct device_node *node,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* reset_list_mutex also protects the rcdev's reset_control list */
|
/* reset_list_mutex also protects the rcdev's reset_control list */
|
||||||
rstc = __reset_control_get(rcdev, rstc_id, shared);
|
rstc = __reset_control_get_internal(rcdev, rstc_id, shared);
|
||||||
|
|
||||||
mutex_unlock(&reset_list_mutex);
|
mutex_unlock(&reset_list_mutex);
|
||||||
|
|
||||||
@ -385,6 +385,17 @@ struct reset_control *__of_reset_control_get(struct device_node *node,
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(__of_reset_control_get);
|
EXPORT_SYMBOL_GPL(__of_reset_control_get);
|
||||||
|
|
||||||
|
struct reset_control *__reset_control_get(struct device *dev, const char *id,
|
||||||
|
int index, bool shared, bool optional)
|
||||||
|
{
|
||||||
|
if (dev->of_node)
|
||||||
|
return __of_reset_control_get(dev->of_node, id, index, shared,
|
||||||
|
optional);
|
||||||
|
|
||||||
|
return optional ? NULL : ERR_PTR(-EINVAL);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(__reset_control_get);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* reset_control_put - free the reset controller
|
* reset_control_put - free the reset controller
|
||||||
* @rstc: reset controller
|
* @rstc: reset controller
|
||||||
@ -396,7 +407,7 @@ void reset_control_put(struct reset_control *rstc)
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
mutex_lock(&reset_list_mutex);
|
mutex_lock(&reset_list_mutex);
|
||||||
__reset_control_put(rstc);
|
__reset_control_put_internal(rstc);
|
||||||
mutex_unlock(&reset_list_mutex);
|
mutex_unlock(&reset_list_mutex);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(reset_control_put);
|
EXPORT_SYMBOL_GPL(reset_control_put);
|
||||||
@ -417,8 +428,7 @@ struct reset_control *__devm_reset_control_get(struct device *dev,
|
|||||||
if (!ptr)
|
if (!ptr)
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
rstc = __of_reset_control_get(dev ? dev->of_node : NULL,
|
rstc = __reset_control_get(dev, id, index, shared, optional);
|
||||||
id, index, shared, optional);
|
|
||||||
if (!IS_ERR(rstc)) {
|
if (!IS_ERR(rstc)) {
|
||||||
*ptr = rstc;
|
*ptr = rstc;
|
||||||
devres_add(dev, ptr);
|
devres_add(dev, ptr);
|
||||||
|
@ -1690,9 +1690,6 @@ struct aac_dev
|
|||||||
#define aac_adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4) \
|
#define aac_adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4) \
|
||||||
(dev)->a_ops.adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4)
|
(dev)->a_ops.adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4)
|
||||||
|
|
||||||
#define aac_adapter_check_health(dev) \
|
|
||||||
(dev)->a_ops.adapter_check_health(dev)
|
|
||||||
|
|
||||||
#define aac_adapter_restart(dev, bled, reset_type) \
|
#define aac_adapter_restart(dev, bled, reset_type) \
|
||||||
((dev)->a_ops.adapter_restart(dev, bled, reset_type))
|
((dev)->a_ops.adapter_restart(dev, bled, reset_type))
|
||||||
|
|
||||||
@ -2615,6 +2612,14 @@ static inline unsigned int cap_to_cyls(sector_t capacity, unsigned divisor)
|
|||||||
return capacity;
|
return capacity;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline int aac_adapter_check_health(struct aac_dev *dev)
|
||||||
|
{
|
||||||
|
if (unlikely(pci_channel_offline(dev->pdev)))
|
||||||
|
return -1;
|
||||||
|
|
||||||
|
return (dev)->a_ops.adapter_check_health(dev);
|
||||||
|
}
|
||||||
|
|
||||||
/* SCp.phase values */
|
/* SCp.phase values */
|
||||||
#define AAC_OWNER_MIDLEVEL 0x101
|
#define AAC_OWNER_MIDLEVEL 0x101
|
||||||
#define AAC_OWNER_LOWLEVEL 0x102
|
#define AAC_OWNER_LOWLEVEL 0x102
|
||||||
|
@ -1873,7 +1873,8 @@ int aac_check_health(struct aac_dev * aac)
|
|||||||
spin_unlock_irqrestore(&aac->fib_lock, flagv);
|
spin_unlock_irqrestore(&aac->fib_lock, flagv);
|
||||||
|
|
||||||
if (BlinkLED < 0) {
|
if (BlinkLED < 0) {
|
||||||
printk(KERN_ERR "%s: Host adapter dead %d\n", aac->name, BlinkLED);
|
printk(KERN_ERR "%s: Host adapter is dead (or got a PCI error) %d\n",
|
||||||
|
aac->name, BlinkLED);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6293,7 +6293,12 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
|
|||||||
break;
|
break;
|
||||||
case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
|
case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
|
||||||
case IPR_IOASA_IR_DUAL_IOA_DISABLED:
|
case IPR_IOASA_IR_DUAL_IOA_DISABLED:
|
||||||
scsi_cmd->result |= (DID_PASSTHROUGH << 16);
|
/*
|
||||||
|
* exception: do not set DID_PASSTHROUGH on CHECK CONDITION
|
||||||
|
* so SCSI mid-layer and upper layers handle it accordingly.
|
||||||
|
*/
|
||||||
|
if (scsi_cmd->result != SAM_STAT_CHECK_CONDITION)
|
||||||
|
scsi_cmd->result |= (DID_PASSTHROUGH << 16);
|
||||||
break;
|
break;
|
||||||
case IPR_IOASC_BUS_WAS_RESET:
|
case IPR_IOASC_BUS_WAS_RESET:
|
||||||
case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
|
case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
|
||||||
|
@ -99,7 +99,8 @@ static void qedf_fcoe_process_vlan_resp(struct qedf_ctx *qedf,
|
|||||||
qedf_set_vlan_id(qedf, vid);
|
qedf_set_vlan_id(qedf, vid);
|
||||||
|
|
||||||
/* Inform waiter that it's ok to call fcoe_ctlr_link up() */
|
/* Inform waiter that it's ok to call fcoe_ctlr_link up() */
|
||||||
complete(&qedf->fipvlan_compl);
|
if (!completion_done(&qedf->fipvlan_compl))
|
||||||
|
complete(&qedf->fipvlan_compl);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2803,6 +2803,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
|
|||||||
atomic_set(&qedf->num_offloads, 0);
|
atomic_set(&qedf->num_offloads, 0);
|
||||||
qedf->stop_io_on_error = false;
|
qedf->stop_io_on_error = false;
|
||||||
pci_set_drvdata(pdev, qedf);
|
pci_set_drvdata(pdev, qedf);
|
||||||
|
init_completion(&qedf->fipvlan_compl);
|
||||||
|
|
||||||
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO,
|
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO,
|
||||||
"QLogic FastLinQ FCoE Module qedf %s, "
|
"QLogic FastLinQ FCoE Module qedf %s, "
|
||||||
|
@ -1160,8 +1160,13 @@ static inline
|
|||||||
uint32_t qla2x00_isp_reg_stat(struct qla_hw_data *ha)
|
uint32_t qla2x00_isp_reg_stat(struct qla_hw_data *ha)
|
||||||
{
|
{
|
||||||
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
|
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
|
||||||
|
struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
|
||||||
|
|
||||||
return ((RD_REG_DWORD(®->host_status)) == ISP_REG_DISCONNECT);
|
if (IS_P3P_TYPE(ha))
|
||||||
|
return ((RD_REG_DWORD(®82->host_int)) == ISP_REG_DISCONNECT);
|
||||||
|
else
|
||||||
|
return ((RD_REG_DWORD(®->host_status)) ==
|
||||||
|
ISP_REG_DISCONNECT);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**************************************************************************
|
/**************************************************************************
|
||||||
|
@ -2102,6 +2102,22 @@ static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp,
|
|||||||
|
|
||||||
#define READ_CAPACITY_RETRIES_ON_RESET 10
|
#define READ_CAPACITY_RETRIES_ON_RESET 10
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Ensure that we don't overflow sector_t when CONFIG_LBDAF is not set
|
||||||
|
* and the reported logical block size is bigger than 512 bytes. Note
|
||||||
|
* that last_sector is a u64 and therefore logical_to_sectors() is not
|
||||||
|
* applicable.
|
||||||
|
*/
|
||||||
|
static bool sd_addressable_capacity(u64 lba, unsigned int sector_size)
|
||||||
|
{
|
||||||
|
u64 last_sector = (lba + 1ULL) << (ilog2(sector_size) - 9);
|
||||||
|
|
||||||
|
if (sizeof(sector_t) == 4 && last_sector > U32_MAX)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
|
static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
|
||||||
unsigned char *buffer)
|
unsigned char *buffer)
|
||||||
{
|
{
|
||||||
@ -2167,7 +2183,7 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
|
|||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((sizeof(sdkp->capacity) == 4) && (lba >= 0xffffffffULL)) {
|
if (!sd_addressable_capacity(lba, sector_size)) {
|
||||||
sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
|
sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
|
||||||
"kernel compiled with support for large block "
|
"kernel compiled with support for large block "
|
||||||
"devices.\n");
|
"devices.\n");
|
||||||
@ -2256,7 +2272,7 @@ static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp,
|
|||||||
return sector_size;
|
return sector_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((sizeof(sdkp->capacity) == 4) && (lba == 0xffffffff)) {
|
if (!sd_addressable_capacity(lba, sector_size)) {
|
||||||
sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
|
sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
|
||||||
"kernel compiled with support for large block "
|
"kernel compiled with support for large block "
|
||||||
"devices.\n");
|
"devices.\n");
|
||||||
@ -2956,7 +2972,8 @@ static int sd_revalidate_disk(struct gendisk *disk)
|
|||||||
q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
|
q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
|
||||||
rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
|
rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
|
||||||
} else
|
} else
|
||||||
rw_max = BLK_DEF_MAX_SECTORS;
|
rw_max = min_not_zero(logical_to_sectors(sdp, dev_max),
|
||||||
|
(sector_t)BLK_DEF_MAX_SECTORS);
|
||||||
|
|
||||||
/* Combine with controller limits */
|
/* Combine with controller limits */
|
||||||
q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q));
|
q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q));
|
||||||
|
@ -836,6 +836,7 @@ static void get_capabilities(struct scsi_cd *cd)
|
|||||||
unsigned char *buffer;
|
unsigned char *buffer;
|
||||||
struct scsi_mode_data data;
|
struct scsi_mode_data data;
|
||||||
struct scsi_sense_hdr sshdr;
|
struct scsi_sense_hdr sshdr;
|
||||||
|
unsigned int ms_len = 128;
|
||||||
int rc, n;
|
int rc, n;
|
||||||
|
|
||||||
static const char *loadmech[] =
|
static const char *loadmech[] =
|
||||||
@ -862,10 +863,11 @@ static void get_capabilities(struct scsi_cd *cd)
|
|||||||
scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr);
|
scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr);
|
||||||
|
|
||||||
/* ask for mode page 0x2a */
|
/* ask for mode page 0x2a */
|
||||||
rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, 128,
|
rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, ms_len,
|
||||||
SR_TIMEOUT, 3, &data, NULL);
|
SR_TIMEOUT, 3, &data, NULL);
|
||||||
|
|
||||||
if (!scsi_status_is_good(rc)) {
|
if (!scsi_status_is_good(rc) || data.length > ms_len ||
|
||||||
|
data.header_length + data.block_descriptor_length > data.length) {
|
||||||
/* failed, drive doesn't have capabilities mode page */
|
/* failed, drive doesn't have capabilities mode page */
|
||||||
cd->cdi.speed = 1;
|
cd->cdi.speed = 1;
|
||||||
cd->cdi.mask |= (CDC_CD_R | CDC_CD_RW | CDC_DVD_R |
|
cd->cdi.mask |= (CDC_CD_R | CDC_CD_RW | CDC_DVD_R |
|
||||||
|
@ -491,6 +491,41 @@ static void tty_ldisc_close(struct tty_struct *tty, struct tty_ldisc *ld)
|
|||||||
tty_ldisc_debug(tty, "%p: closed\n", ld);
|
tty_ldisc_debug(tty, "%p: closed\n", ld);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* tty_ldisc_restore - helper for tty ldisc change
|
||||||
|
* @tty: tty to recover
|
||||||
|
* @old: previous ldisc
|
||||||
|
*
|
||||||
|
* Restore the previous line discipline or N_TTY when a line discipline
|
||||||
|
* change fails due to an open error
|
||||||
|
*/
|
||||||
|
|
||||||
|
static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old)
|
||||||
|
{
|
||||||
|
struct tty_ldisc *new_ldisc;
|
||||||
|
int r;
|
||||||
|
|
||||||
|
/* There is an outstanding reference here so this is safe */
|
||||||
|
old = tty_ldisc_get(tty, old->ops->num);
|
||||||
|
WARN_ON(IS_ERR(old));
|
||||||
|
tty->ldisc = old;
|
||||||
|
tty_set_termios_ldisc(tty, old->ops->num);
|
||||||
|
if (tty_ldisc_open(tty, old) < 0) {
|
||||||
|
tty_ldisc_put(old);
|
||||||
|
/* This driver is always present */
|
||||||
|
new_ldisc = tty_ldisc_get(tty, N_TTY);
|
||||||
|
if (IS_ERR(new_ldisc))
|
||||||
|
panic("n_tty: get");
|
||||||
|
tty->ldisc = new_ldisc;
|
||||||
|
tty_set_termios_ldisc(tty, N_TTY);
|
||||||
|
r = tty_ldisc_open(tty, new_ldisc);
|
||||||
|
if (r < 0)
|
||||||
|
panic("Couldn't open N_TTY ldisc for "
|
||||||
|
"%s --- error %d.",
|
||||||
|
tty_name(tty), r);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* tty_set_ldisc - set line discipline
|
* tty_set_ldisc - set line discipline
|
||||||
* @tty: the terminal to set
|
* @tty: the terminal to set
|
||||||
@ -504,7 +539,12 @@ static void tty_ldisc_close(struct tty_struct *tty, struct tty_ldisc *ld)
|
|||||||
|
|
||||||
int tty_set_ldisc(struct tty_struct *tty, int disc)
|
int tty_set_ldisc(struct tty_struct *tty, int disc)
|
||||||
{
|
{
|
||||||
int retval, old_disc;
|
int retval;
|
||||||
|
struct tty_ldisc *old_ldisc, *new_ldisc;
|
||||||
|
|
||||||
|
new_ldisc = tty_ldisc_get(tty, disc);
|
||||||
|
if (IS_ERR(new_ldisc))
|
||||||
|
return PTR_ERR(new_ldisc);
|
||||||
|
|
||||||
tty_lock(tty);
|
tty_lock(tty);
|
||||||
retval = tty_ldisc_lock(tty, 5 * HZ);
|
retval = tty_ldisc_lock(tty, 5 * HZ);
|
||||||
@ -517,8 +557,7 @@ int tty_set_ldisc(struct tty_struct *tty, int disc)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Check the no-op case */
|
/* Check the no-op case */
|
||||||
old_disc = tty->ldisc->ops->num;
|
if (tty->ldisc->ops->num == disc)
|
||||||
if (old_disc == disc)
|
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
if (test_bit(TTY_HUPPED, &tty->flags)) {
|
if (test_bit(TTY_HUPPED, &tty->flags)) {
|
||||||
@ -527,25 +566,34 @@ int tty_set_ldisc(struct tty_struct *tty, int disc)
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
retval = tty_ldisc_reinit(tty, disc);
|
old_ldisc = tty->ldisc;
|
||||||
|
|
||||||
|
/* Shutdown the old discipline. */
|
||||||
|
tty_ldisc_close(tty, old_ldisc);
|
||||||
|
|
||||||
|
/* Now set up the new line discipline. */
|
||||||
|
tty->ldisc = new_ldisc;
|
||||||
|
tty_set_termios_ldisc(tty, disc);
|
||||||
|
|
||||||
|
retval = tty_ldisc_open(tty, new_ldisc);
|
||||||
if (retval < 0) {
|
if (retval < 0) {
|
||||||
/* Back to the old one or N_TTY if we can't */
|
/* Back to the old one or N_TTY if we can't */
|
||||||
if (tty_ldisc_reinit(tty, old_disc) < 0) {
|
tty_ldisc_put(new_ldisc);
|
||||||
pr_err("tty: TIOCSETD failed, reinitializing N_TTY\n");
|
tty_ldisc_restore(tty, old_ldisc);
|
||||||
if (tty_ldisc_reinit(tty, N_TTY) < 0) {
|
|
||||||
/* At this point we have tty->ldisc == NULL. */
|
|
||||||
pr_err("tty: reinitializing N_TTY failed\n");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (tty->ldisc && tty->ldisc->ops->num != old_disc &&
|
if (tty->ldisc->ops->num != old_ldisc->ops->num && tty->ops->set_ldisc) {
|
||||||
tty->ops->set_ldisc) {
|
|
||||||
down_read(&tty->termios_rwsem);
|
down_read(&tty->termios_rwsem);
|
||||||
tty->ops->set_ldisc(tty);
|
tty->ops->set_ldisc(tty);
|
||||||
up_read(&tty->termios_rwsem);
|
up_read(&tty->termios_rwsem);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* At this point we hold a reference to the new ldisc and a
|
||||||
|
reference to the old ldisc, or we hold two references to
|
||||||
|
the old ldisc (if it was restored as part of error cleanup
|
||||||
|
above). In either case, releasing a single reference from
|
||||||
|
the old ldisc is correct. */
|
||||||
|
new_ldisc = old_ldisc;
|
||||||
out:
|
out:
|
||||||
tty_ldisc_unlock(tty);
|
tty_ldisc_unlock(tty);
|
||||||
|
|
||||||
@ -553,6 +601,7 @@ out:
|
|||||||
already running */
|
already running */
|
||||||
tty_buffer_restart_work(tty->port);
|
tty_buffer_restart_work(tty->port);
|
||||||
err:
|
err:
|
||||||
|
tty_ldisc_put(new_ldisc); /* drop the extra reference */
|
||||||
tty_unlock(tty);
|
tty_unlock(tty);
|
||||||
return retval;
|
return retval;
|
||||||
}
|
}
|
||||||
@ -613,8 +662,10 @@ int tty_ldisc_reinit(struct tty_struct *tty, int disc)
|
|||||||
int retval;
|
int retval;
|
||||||
|
|
||||||
ld = tty_ldisc_get(tty, disc);
|
ld = tty_ldisc_get(tty, disc);
|
||||||
if (IS_ERR(ld))
|
if (IS_ERR(ld)) {
|
||||||
|
BUG_ON(disc == N_TTY);
|
||||||
return PTR_ERR(ld);
|
return PTR_ERR(ld);
|
||||||
|
}
|
||||||
|
|
||||||
if (tty->ldisc) {
|
if (tty->ldisc) {
|
||||||
tty_ldisc_close(tty, tty->ldisc);
|
tty_ldisc_close(tty, tty->ldisc);
|
||||||
@ -626,8 +677,10 @@ int tty_ldisc_reinit(struct tty_struct *tty, int disc)
|
|||||||
tty_set_termios_ldisc(tty, disc);
|
tty_set_termios_ldisc(tty, disc);
|
||||||
retval = tty_ldisc_open(tty, tty->ldisc);
|
retval = tty_ldisc_open(tty, tty->ldisc);
|
||||||
if (retval) {
|
if (retval) {
|
||||||
tty_ldisc_put(tty->ldisc);
|
if (!WARN_ON(disc == N_TTY)) {
|
||||||
tty->ldisc = NULL;
|
tty_ldisc_put(tty->ldisc);
|
||||||
|
tty->ldisc = NULL;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return retval;
|
return retval;
|
||||||
}
|
}
|
||||||
|
@ -2145,6 +2145,9 @@ static const char *path_init(struct nameidata *nd, unsigned flags)
|
|||||||
int retval = 0;
|
int retval = 0;
|
||||||
const char *s = nd->name->name;
|
const char *s = nd->name->name;
|
||||||
|
|
||||||
|
if (!*s)
|
||||||
|
flags &= ~LOOKUP_RCU;
|
||||||
|
|
||||||
nd->last_type = LAST_ROOT; /* if there are only slashes... */
|
nd->last_type = LAST_ROOT; /* if there are only slashes... */
|
||||||
nd->flags = flags | LOOKUP_JUMPED | LOOKUP_PARENT;
|
nd->flags = flags | LOOKUP_JUMPED | LOOKUP_PARENT;
|
||||||
nd->depth = 0;
|
nd->depth = 0;
|
||||||
|
@ -208,14 +208,19 @@ restart:
|
|||||||
continue;
|
continue;
|
||||||
/*
|
/*
|
||||||
* Skip ops whose filesystem we don't know about unless
|
* Skip ops whose filesystem we don't know about unless
|
||||||
* it is being mounted.
|
* it is being mounted or unmounted. It is possible for
|
||||||
|
* a filesystem we don't know about to be unmounted if
|
||||||
|
* it fails to mount in the kernel after userspace has
|
||||||
|
* been sent the mount request.
|
||||||
*/
|
*/
|
||||||
/* XXX: is there a better way to detect this? */
|
/* XXX: is there a better way to detect this? */
|
||||||
} else if (ret == -1 &&
|
} else if (ret == -1 &&
|
||||||
!(op->upcall.type ==
|
!(op->upcall.type ==
|
||||||
ORANGEFS_VFS_OP_FS_MOUNT ||
|
ORANGEFS_VFS_OP_FS_MOUNT ||
|
||||||
op->upcall.type ==
|
op->upcall.type ==
|
||||||
ORANGEFS_VFS_OP_GETATTR)) {
|
ORANGEFS_VFS_OP_GETATTR ||
|
||||||
|
op->upcall.type ==
|
||||||
|
ORANGEFS_VFS_OP_FS_UMOUNT)) {
|
||||||
gossip_debug(GOSSIP_DEV_DEBUG,
|
gossip_debug(GOSSIP_DEV_DEBUG,
|
||||||
"orangefs: skipping op tag %llu %s\n",
|
"orangefs: skipping op tag %llu %s\n",
|
||||||
llu(op->tag), get_opname_string(op));
|
llu(op->tag), get_opname_string(op));
|
||||||
|
@ -249,6 +249,7 @@ struct orangefs_sb_info_s {
|
|||||||
char devname[ORANGEFS_MAX_SERVER_ADDR_LEN];
|
char devname[ORANGEFS_MAX_SERVER_ADDR_LEN];
|
||||||
struct super_block *sb;
|
struct super_block *sb;
|
||||||
int mount_pending;
|
int mount_pending;
|
||||||
|
int no_list;
|
||||||
struct list_head list;
|
struct list_head list;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -493,7 +493,7 @@ struct dentry *orangefs_mount(struct file_system_type *fst,
|
|||||||
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
d = ERR_PTR(ret);
|
d = ERR_PTR(ret);
|
||||||
goto free_op;
|
goto free_sb_and_op;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -519,6 +519,9 @@ struct dentry *orangefs_mount(struct file_system_type *fst,
|
|||||||
spin_unlock(&orangefs_superblocks_lock);
|
spin_unlock(&orangefs_superblocks_lock);
|
||||||
op_release(new_op);
|
op_release(new_op);
|
||||||
|
|
||||||
|
/* Must be removed from the list now. */
|
||||||
|
ORANGEFS_SB(sb)->no_list = 0;
|
||||||
|
|
||||||
if (orangefs_userspace_version >= 20906) {
|
if (orangefs_userspace_version >= 20906) {
|
||||||
new_op = op_alloc(ORANGEFS_VFS_OP_FEATURES);
|
new_op = op_alloc(ORANGEFS_VFS_OP_FEATURES);
|
||||||
if (!new_op)
|
if (!new_op)
|
||||||
@ -533,6 +536,10 @@ struct dentry *orangefs_mount(struct file_system_type *fst,
|
|||||||
|
|
||||||
return dget(sb->s_root);
|
return dget(sb->s_root);
|
||||||
|
|
||||||
|
free_sb_and_op:
|
||||||
|
/* Will call orangefs_kill_sb with sb not in list. */
|
||||||
|
ORANGEFS_SB(sb)->no_list = 1;
|
||||||
|
deactivate_locked_super(sb);
|
||||||
free_op:
|
free_op:
|
||||||
gossip_err("orangefs_mount: mount request failed with %d\n", ret);
|
gossip_err("orangefs_mount: mount request failed with %d\n", ret);
|
||||||
if (ret == -EINVAL) {
|
if (ret == -EINVAL) {
|
||||||
@ -558,12 +565,14 @@ void orangefs_kill_sb(struct super_block *sb)
|
|||||||
*/
|
*/
|
||||||
orangefs_unmount_sb(sb);
|
orangefs_unmount_sb(sb);
|
||||||
|
|
||||||
/* remove the sb from our list of orangefs specific sb's */
|
if (!ORANGEFS_SB(sb)->no_list) {
|
||||||
|
/* remove the sb from our list of orangefs specific sb's */
|
||||||
spin_lock(&orangefs_superblocks_lock);
|
spin_lock(&orangefs_superblocks_lock);
|
||||||
__list_del_entry(&ORANGEFS_SB(sb)->list); /* not list_del_init */
|
/* not list_del_init */
|
||||||
ORANGEFS_SB(sb)->list.prev = NULL;
|
__list_del_entry(&ORANGEFS_SB(sb)->list);
|
||||||
spin_unlock(&orangefs_superblocks_lock);
|
ORANGEFS_SB(sb)->list.prev = NULL;
|
||||||
|
spin_unlock(&orangefs_superblocks_lock);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* make sure that ORANGEFS_DEV_REMOUNT_ALL loop that might've seen us
|
* make sure that ORANGEFS_DEV_REMOUNT_ALL loop that might've seen us
|
||||||
|
@ -166,6 +166,16 @@ static inline struct ahash_instance *ahash_alloc_instance(
|
|||||||
return crypto_alloc_instance2(name, alg, ahash_instance_headroom());
|
return crypto_alloc_instance2(name, alg, ahash_instance_headroom());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void ahash_request_complete(struct ahash_request *req, int err)
|
||||||
|
{
|
||||||
|
req->base.complete(&req->base, err);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline u32 ahash_request_flags(struct ahash_request *req)
|
||||||
|
{
|
||||||
|
return req->base.flags;
|
||||||
|
}
|
||||||
|
|
||||||
static inline struct crypto_ahash *crypto_spawn_ahash(
|
static inline struct crypto_ahash *crypto_spawn_ahash(
|
||||||
struct crypto_ahash_spawn *spawn)
|
struct crypto_ahash_spawn *spawn)
|
||||||
{
|
{
|
||||||
|
@ -1672,12 +1672,36 @@ static inline bool bios_segs_mergeable(struct request_queue *q,
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool bio_will_gap(struct request_queue *q, struct bio *prev,
|
static inline bool bio_will_gap(struct request_queue *q,
|
||||||
struct bio *next)
|
struct request *prev_rq,
|
||||||
|
struct bio *prev,
|
||||||
|
struct bio *next)
|
||||||
{
|
{
|
||||||
if (bio_has_data(prev) && queue_virt_boundary(q)) {
|
if (bio_has_data(prev) && queue_virt_boundary(q)) {
|
||||||
struct bio_vec pb, nb;
|
struct bio_vec pb, nb;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* don't merge if the 1st bio starts with non-zero
|
||||||
|
* offset, otherwise it is quite difficult to respect
|
||||||
|
* sg gap limit. We work hard to merge a huge number of small
|
||||||
|
* single bios in case of mkfs.
|
||||||
|
*/
|
||||||
|
if (prev_rq)
|
||||||
|
bio_get_first_bvec(prev_rq->bio, &pb);
|
||||||
|
else
|
||||||
|
bio_get_first_bvec(prev, &pb);
|
||||||
|
if (pb.bv_offset)
|
||||||
|
return true;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We don't need to worry about the situation that the
|
||||||
|
* merged segment ends in unaligned virt boundary:
|
||||||
|
*
|
||||||
|
* - if 'pb' ends aligned, the merged segment ends aligned
|
||||||
|
* - if 'pb' ends unaligned, the next bio must include
|
||||||
|
* one single bvec of 'nb', otherwise the 'nb' can't
|
||||||
|
* merge with 'pb'
|
||||||
|
*/
|
||||||
bio_get_last_bvec(prev, &pb);
|
bio_get_last_bvec(prev, &pb);
|
||||||
bio_get_first_bvec(next, &nb);
|
bio_get_first_bvec(next, &nb);
|
||||||
|
|
||||||
@ -1690,12 +1714,12 @@ static inline bool bio_will_gap(struct request_queue *q, struct bio *prev,
|
|||||||
|
|
||||||
static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
|
static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
|
||||||
{
|
{
|
||||||
return bio_will_gap(req->q, req->biotail, bio);
|
return bio_will_gap(req->q, req, req->biotail, bio);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
|
static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
|
||||||
{
|
{
|
||||||
return bio_will_gap(req->q, bio, req->bio);
|
return bio_will_gap(req->q, NULL, bio, req->bio);
|
||||||
}
|
}
|
||||||
|
|
||||||
int kblockd_schedule_work(struct work_struct *work);
|
int kblockd_schedule_work(struct work_struct *work);
|
||||||
|
@ -15,6 +15,9 @@ int reset_control_status(struct reset_control *rstc);
|
|||||||
struct reset_control *__of_reset_control_get(struct device_node *node,
|
struct reset_control *__of_reset_control_get(struct device_node *node,
|
||||||
const char *id, int index, bool shared,
|
const char *id, int index, bool shared,
|
||||||
bool optional);
|
bool optional);
|
||||||
|
struct reset_control *__reset_control_get(struct device *dev, const char *id,
|
||||||
|
int index, bool shared,
|
||||||
|
bool optional);
|
||||||
void reset_control_put(struct reset_control *rstc);
|
void reset_control_put(struct reset_control *rstc);
|
||||||
struct reset_control *__devm_reset_control_get(struct device *dev,
|
struct reset_control *__devm_reset_control_get(struct device *dev,
|
||||||
const char *id, int index, bool shared,
|
const char *id, int index, bool shared,
|
||||||
@ -72,6 +75,13 @@ static inline struct reset_control *__of_reset_control_get(
|
|||||||
return optional ? NULL : ERR_PTR(-ENOTSUPP);
|
return optional ? NULL : ERR_PTR(-ENOTSUPP);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline struct reset_control *__reset_control_get(
|
||||||
|
struct device *dev, const char *id,
|
||||||
|
int index, bool shared, bool optional)
|
||||||
|
{
|
||||||
|
return optional ? NULL : ERR_PTR(-ENOTSUPP);
|
||||||
|
}
|
||||||
|
|
||||||
static inline struct reset_control *__devm_reset_control_get(
|
static inline struct reset_control *__devm_reset_control_get(
|
||||||
struct device *dev, const char *id,
|
struct device *dev, const char *id,
|
||||||
int index, bool shared, bool optional)
|
int index, bool shared, bool optional)
|
||||||
@ -102,8 +112,7 @@ __must_check reset_control_get_exclusive(struct device *dev, const char *id)
|
|||||||
#ifndef CONFIG_RESET_CONTROLLER
|
#ifndef CONFIG_RESET_CONTROLLER
|
||||||
WARN_ON(1);
|
WARN_ON(1);
|
||||||
#endif
|
#endif
|
||||||
return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, false,
|
return __reset_control_get(dev, id, 0, false, false);
|
||||||
false);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -131,22 +140,19 @@ __must_check reset_control_get_exclusive(struct device *dev, const char *id)
|
|||||||
static inline struct reset_control *reset_control_get_shared(
|
static inline struct reset_control *reset_control_get_shared(
|
||||||
struct device *dev, const char *id)
|
struct device *dev, const char *id)
|
||||||
{
|
{
|
||||||
return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, true,
|
return __reset_control_get(dev, id, 0, true, false);
|
||||||
false);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct reset_control *reset_control_get_optional_exclusive(
|
static inline struct reset_control *reset_control_get_optional_exclusive(
|
||||||
struct device *dev, const char *id)
|
struct device *dev, const char *id)
|
||||||
{
|
{
|
||||||
return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, false,
|
return __reset_control_get(dev, id, 0, false, true);
|
||||||
true);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct reset_control *reset_control_get_optional_shared(
|
static inline struct reset_control *reset_control_get_optional_shared(
|
||||||
struct device *dev, const char *id)
|
struct device *dev, const char *id)
|
||||||
{
|
{
|
||||||
return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, true,
|
return __reset_control_get(dev, id, 0, true, true);
|
||||||
true);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -3349,6 +3349,14 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
|
|||||||
if (insn->imm == BPF_FUNC_xdp_adjust_head)
|
if (insn->imm == BPF_FUNC_xdp_adjust_head)
|
||||||
prog->xdp_adjust_head = 1;
|
prog->xdp_adjust_head = 1;
|
||||||
if (insn->imm == BPF_FUNC_tail_call) {
|
if (insn->imm == BPF_FUNC_tail_call) {
|
||||||
|
/* If we tail call into other programs, we
|
||||||
|
* cannot make any assumptions since they can
|
||||||
|
* be replaced dynamically during runtime in
|
||||||
|
* the program array.
|
||||||
|
*/
|
||||||
|
prog->cb_access = 1;
|
||||||
|
prog->xdp_adjust_head = 1;
|
||||||
|
|
||||||
/* mark bpf_tail_call as different opcode to avoid
|
/* mark bpf_tail_call as different opcode to avoid
|
||||||
* conditional branch in the interpeter for every normal
|
* conditional branch in the interpeter for every normal
|
||||||
* call and to prevent accidental JITing by JIT compiler
|
* call and to prevent accidental JITing by JIT compiler
|
||||||
|
@ -1146,7 +1146,7 @@ struct dentry *cgroup1_mount(struct file_system_type *fs_type, int flags,
|
|||||||
* path is super cold. Let's just sleep a bit and retry.
|
* path is super cold. Let's just sleep a bit and retry.
|
||||||
*/
|
*/
|
||||||
pinned_sb = kernfs_pin_sb(root->kf_root, NULL);
|
pinned_sb = kernfs_pin_sb(root->kf_root, NULL);
|
||||||
if (IS_ERR_OR_NULL(pinned_sb) ||
|
if (IS_ERR(pinned_sb) ||
|
||||||
!percpu_ref_tryget_live(&root->cgrp.self.refcnt)) {
|
!percpu_ref_tryget_live(&root->cgrp.self.refcnt)) {
|
||||||
mutex_unlock(&cgroup_mutex);
|
mutex_unlock(&cgroup_mutex);
|
||||||
if (!IS_ERR_OR_NULL(pinned_sb))
|
if (!IS_ERR_OR_NULL(pinned_sb))
|
||||||
|
@ -46,13 +46,13 @@ enum {
|
|||||||
(LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
|
(LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* CONFIG_PROVE_LOCKING_SMALL is defined for sparc. Sparc requires .text,
|
* CONFIG_LOCKDEP_SMALL is defined for sparc. Sparc requires .text,
|
||||||
* .data and .bss to fit in required 32MB limit for the kernel. With
|
* .data and .bss to fit in required 32MB limit for the kernel. With
|
||||||
* PROVE_LOCKING we could go over this limit and cause system boot-up problems.
|
* CONFIG_LOCKDEP we could go over this limit and cause system boot-up problems.
|
||||||
* So, reduce the static allocations for lockdeps related structures so that
|
* So, reduce the static allocations for lockdeps related structures so that
|
||||||
* everything fits in current required size limit.
|
* everything fits in current required size limit.
|
||||||
*/
|
*/
|
||||||
#ifdef CONFIG_PROVE_LOCKING_SMALL
|
#ifdef CONFIG_LOCKDEP_SMALL
|
||||||
/*
|
/*
|
||||||
* MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies
|
* MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies
|
||||||
* we track.
|
* we track.
|
||||||
|
@ -3755,23 +3755,24 @@ static void __enable_ftrace_function_probe(struct ftrace_ops_hash *old_hash)
|
|||||||
ftrace_probe_registered = 1;
|
ftrace_probe_registered = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __disable_ftrace_function_probe(void)
|
static bool __disable_ftrace_function_probe(void)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (!ftrace_probe_registered)
|
if (!ftrace_probe_registered)
|
||||||
return;
|
return false;
|
||||||
|
|
||||||
for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
|
for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
|
||||||
struct hlist_head *hhd = &ftrace_func_hash[i];
|
struct hlist_head *hhd = &ftrace_func_hash[i];
|
||||||
if (hhd->first)
|
if (hhd->first)
|
||||||
return;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* no more funcs left */
|
/* no more funcs left */
|
||||||
ftrace_shutdown(&trace_probe_ops, 0);
|
ftrace_shutdown(&trace_probe_ops, 0);
|
||||||
|
|
||||||
ftrace_probe_registered = 0;
|
ftrace_probe_registered = 0;
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -3901,6 +3902,7 @@ static void
|
|||||||
__unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
|
__unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
|
||||||
void *data, int flags)
|
void *data, int flags)
|
||||||
{
|
{
|
||||||
|
struct ftrace_ops_hash old_hash_ops;
|
||||||
struct ftrace_func_entry *rec_entry;
|
struct ftrace_func_entry *rec_entry;
|
||||||
struct ftrace_func_probe *entry;
|
struct ftrace_func_probe *entry;
|
||||||
struct ftrace_func_probe *p;
|
struct ftrace_func_probe *p;
|
||||||
@ -3912,6 +3914,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
|
|||||||
struct hlist_node *tmp;
|
struct hlist_node *tmp;
|
||||||
char str[KSYM_SYMBOL_LEN];
|
char str[KSYM_SYMBOL_LEN];
|
||||||
int i, ret;
|
int i, ret;
|
||||||
|
bool disabled;
|
||||||
|
|
||||||
if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
|
if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
|
||||||
func_g.search = NULL;
|
func_g.search = NULL;
|
||||||
@ -3930,6 +3933,10 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
|
|||||||
|
|
||||||
mutex_lock(&trace_probe_ops.func_hash->regex_lock);
|
mutex_lock(&trace_probe_ops.func_hash->regex_lock);
|
||||||
|
|
||||||
|
old_hash_ops.filter_hash = old_hash;
|
||||||
|
/* Probes only have filters */
|
||||||
|
old_hash_ops.notrace_hash = NULL;
|
||||||
|
|
||||||
hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
|
hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
|
||||||
if (!hash)
|
if (!hash)
|
||||||
/* Hmm, should report this somehow */
|
/* Hmm, should report this somehow */
|
||||||
@ -3967,12 +3974,17 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
mutex_lock(&ftrace_lock);
|
mutex_lock(&ftrace_lock);
|
||||||
__disable_ftrace_function_probe();
|
disabled = __disable_ftrace_function_probe();
|
||||||
/*
|
/*
|
||||||
* Remove after the disable is called. Otherwise, if the last
|
* Remove after the disable is called. Otherwise, if the last
|
||||||
* probe is removed, a null hash means *all enabled*.
|
* probe is removed, a null hash means *all enabled*.
|
||||||
*/
|
*/
|
||||||
ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
|
ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
|
||||||
|
|
||||||
|
/* still need to update the function call sites */
|
||||||
|
if (ftrace_enabled && !disabled)
|
||||||
|
ftrace_run_modify_code(&trace_probe_ops, FTRACE_UPDATE_CALLS,
|
||||||
|
&old_hash_ops);
|
||||||
synchronize_sched();
|
synchronize_sched();
|
||||||
if (!ret)
|
if (!ret)
|
||||||
free_ftrace_hash_rcu(old_hash);
|
free_ftrace_hash_rcu(old_hash);
|
||||||
@ -5554,6 +5566,15 @@ static void clear_ftrace_pids(struct trace_array *tr)
|
|||||||
trace_free_pid_list(pid_list);
|
trace_free_pid_list(pid_list);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void ftrace_clear_pids(struct trace_array *tr)
|
||||||
|
{
|
||||||
|
mutex_lock(&ftrace_lock);
|
||||||
|
|
||||||
|
clear_ftrace_pids(tr);
|
||||||
|
|
||||||
|
mutex_unlock(&ftrace_lock);
|
||||||
|
}
|
||||||
|
|
||||||
static void ftrace_pid_reset(struct trace_array *tr)
|
static void ftrace_pid_reset(struct trace_array *tr)
|
||||||
{
|
{
|
||||||
mutex_lock(&ftrace_lock);
|
mutex_lock(&ftrace_lock);
|
||||||
|
@ -7402,6 +7402,7 @@ static int instance_rmdir(const char *name)
|
|||||||
|
|
||||||
tracing_set_nop(tr);
|
tracing_set_nop(tr);
|
||||||
event_trace_del_tracer(tr);
|
event_trace_del_tracer(tr);
|
||||||
|
ftrace_clear_pids(tr);
|
||||||
ftrace_destroy_function_files(tr);
|
ftrace_destroy_function_files(tr);
|
||||||
tracefs_remove_recursive(tr->dir);
|
tracefs_remove_recursive(tr->dir);
|
||||||
free_trace_buffers(tr);
|
free_trace_buffers(tr);
|
||||||
|
@ -896,6 +896,7 @@ int using_ftrace_ops_list_func(void);
|
|||||||
void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer);
|
void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer);
|
||||||
void ftrace_init_tracefs_toplevel(struct trace_array *tr,
|
void ftrace_init_tracefs_toplevel(struct trace_array *tr,
|
||||||
struct dentry *d_tracer);
|
struct dentry *d_tracer);
|
||||||
|
void ftrace_clear_pids(struct trace_array *tr);
|
||||||
#else
|
#else
|
||||||
static inline int ftrace_trace_task(struct trace_array *tr)
|
static inline int ftrace_trace_task(struct trace_array *tr)
|
||||||
{
|
{
|
||||||
@ -914,6 +915,7 @@ ftrace_init_global_array_ops(struct trace_array *tr) { }
|
|||||||
static inline void ftrace_reset_array_ops(struct trace_array *tr) { }
|
static inline void ftrace_reset_array_ops(struct trace_array *tr) { }
|
||||||
static inline void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d) { }
|
static inline void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d) { }
|
||||||
static inline void ftrace_init_tracefs_toplevel(struct trace_array *tr, struct dentry *d) { }
|
static inline void ftrace_init_tracefs_toplevel(struct trace_array *tr, struct dentry *d) { }
|
||||||
|
static inline void ftrace_clear_pids(struct trace_array *tr) { }
|
||||||
/* ftace_func_t type is not defined, use macro instead of static inline */
|
/* ftace_func_t type is not defined, use macro instead of static inline */
|
||||||
#define ftrace_init_array_ops(tr, func) do { } while (0)
|
#define ftrace_init_array_ops(tr, func) do { } while (0)
|
||||||
#endif /* CONFIG_FUNCTION_TRACER */
|
#endif /* CONFIG_FUNCTION_TRACER */
|
||||||
|
@ -1103,9 +1103,6 @@ config PROVE_LOCKING
|
|||||||
|
|
||||||
For more details, see Documentation/locking/lockdep-design.txt.
|
For more details, see Documentation/locking/lockdep-design.txt.
|
||||||
|
|
||||||
config PROVE_LOCKING_SMALL
|
|
||||||
bool
|
|
||||||
|
|
||||||
config LOCKDEP
|
config LOCKDEP
|
||||||
bool
|
bool
|
||||||
depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
|
depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
|
||||||
@ -1114,6 +1111,9 @@ config LOCKDEP
|
|||||||
select KALLSYMS
|
select KALLSYMS
|
||||||
select KALLSYMS_ALL
|
select KALLSYMS_ALL
|
||||||
|
|
||||||
|
config LOCKDEP_SMALL
|
||||||
|
bool
|
||||||
|
|
||||||
config LOCK_STAT
|
config LOCK_STAT
|
||||||
bool "Lock usage statistics"
|
bool "Lock usage statistics"
|
||||||
depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
|
depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
|
||||||
|
@ -3817,6 +3817,7 @@ static void __skb_complete_tx_timestamp(struct sk_buff *skb,
|
|||||||
serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
|
serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
|
||||||
serr->ee.ee_info = tstype;
|
serr->ee.ee_info = tstype;
|
||||||
serr->opt_stats = opt_stats;
|
serr->opt_stats = opt_stats;
|
||||||
|
serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0;
|
||||||
if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) {
|
if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) {
|
||||||
serr->ee.ee_data = skb_shinfo(skb)->tskey;
|
serr->ee.ee_data = skb_shinfo(skb)->tskey;
|
||||||
if (sk->sk_protocol == IPPROTO_TCP &&
|
if (sk->sk_protocol == IPPROTO_TCP &&
|
||||||
|
@ -488,16 +488,15 @@ static bool ipv4_datagram_support_cmsg(const struct sock *sk,
|
|||||||
return false;
|
return false;
|
||||||
|
|
||||||
/* Support IP_PKTINFO on tstamp packets if requested, to correlate
|
/* Support IP_PKTINFO on tstamp packets if requested, to correlate
|
||||||
* timestamp with egress dev. Not possible for packets without dev
|
* timestamp with egress dev. Not possible for packets without iif
|
||||||
* or without payload (SOF_TIMESTAMPING_OPT_TSONLY).
|
* or without payload (SOF_TIMESTAMPING_OPT_TSONLY).
|
||||||
*/
|
*/
|
||||||
if ((!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG)) ||
|
info = PKTINFO_SKB_CB(skb);
|
||||||
(!skb->dev))
|
if (!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG) ||
|
||||||
|
!info->ipi_ifindex)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
info = PKTINFO_SKB_CB(skb);
|
|
||||||
info->ipi_spec_dst.s_addr = ip_hdr(skb)->saddr;
|
info->ipi_spec_dst.s_addr = ip_hdr(skb)->saddr;
|
||||||
info->ipi_ifindex = skb->dev->ifindex;
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -591,6 +590,7 @@ static bool setsockopt_needs_rtnl(int optname)
|
|||||||
case MCAST_LEAVE_GROUP:
|
case MCAST_LEAVE_GROUP:
|
||||||
case MCAST_LEAVE_SOURCE_GROUP:
|
case MCAST_LEAVE_SOURCE_GROUP:
|
||||||
case MCAST_UNBLOCK_SOURCE:
|
case MCAST_UNBLOCK_SOURCE:
|
||||||
|
case IP_ROUTER_ALERT:
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
|
@ -1278,7 +1278,7 @@ static void mrtsock_destruct(struct sock *sk)
|
|||||||
struct net *net = sock_net(sk);
|
struct net *net = sock_net(sk);
|
||||||
struct mr_table *mrt;
|
struct mr_table *mrt;
|
||||||
|
|
||||||
rtnl_lock();
|
ASSERT_RTNL();
|
||||||
ipmr_for_each_table(mrt, net) {
|
ipmr_for_each_table(mrt, net) {
|
||||||
if (sk == rtnl_dereference(mrt->mroute_sk)) {
|
if (sk == rtnl_dereference(mrt->mroute_sk)) {
|
||||||
IPV4_DEVCONF_ALL(net, MC_FORWARDING)--;
|
IPV4_DEVCONF_ALL(net, MC_FORWARDING)--;
|
||||||
@ -1290,7 +1290,6 @@ static void mrtsock_destruct(struct sock *sk)
|
|||||||
mroute_clean_tables(mrt, false);
|
mroute_clean_tables(mrt, false);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
rtnl_unlock();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Socket options and virtual interface manipulation. The whole
|
/* Socket options and virtual interface manipulation. The whole
|
||||||
@ -1355,13 +1354,8 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval,
|
|||||||
if (sk != rcu_access_pointer(mrt->mroute_sk)) {
|
if (sk != rcu_access_pointer(mrt->mroute_sk)) {
|
||||||
ret = -EACCES;
|
ret = -EACCES;
|
||||||
} else {
|
} else {
|
||||||
/* We need to unlock here because mrtsock_destruct takes
|
|
||||||
* care of rtnl itself and we can't change that due to
|
|
||||||
* the IP_ROUTER_ALERT setsockopt which runs without it.
|
|
||||||
*/
|
|
||||||
rtnl_unlock();
|
|
||||||
ret = ip_ra_control(sk, 0, NULL);
|
ret = ip_ra_control(sk, 0, NULL);
|
||||||
goto out;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case MRT_ADD_VIF:
|
case MRT_ADD_VIF:
|
||||||
@ -1472,7 +1466,6 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval,
|
|||||||
}
|
}
|
||||||
out_unlock:
|
out_unlock:
|
||||||
rtnl_unlock();
|
rtnl_unlock();
|
||||||
out:
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -682,7 +682,9 @@ static void raw_close(struct sock *sk, long timeout)
|
|||||||
/*
|
/*
|
||||||
* Raw sockets may have direct kernel references. Kill them.
|
* Raw sockets may have direct kernel references. Kill them.
|
||||||
*/
|
*/
|
||||||
|
rtnl_lock();
|
||||||
ip_ra_control(sk, 0, NULL);
|
ip_ra_control(sk, 0, NULL);
|
||||||
|
rtnl_unlock();
|
||||||
|
|
||||||
sk_common_release(sk);
|
sk_common_release(sk);
|
||||||
}
|
}
|
||||||
|
@ -405,9 +405,6 @@ static inline bool ipv6_datagram_support_addr(struct sock_exterr_skb *serr)
|
|||||||
* At one point, excluding local errors was a quick test to identify icmp/icmp6
|
* At one point, excluding local errors was a quick test to identify icmp/icmp6
|
||||||
* errors. This is no longer true, but the test remained, so the v6 stack,
|
* errors. This is no longer true, but the test remained, so the v6 stack,
|
||||||
* unlike v4, also honors cmsg requests on all wifi and timestamp errors.
|
* unlike v4, also honors cmsg requests on all wifi and timestamp errors.
|
||||||
*
|
|
||||||
* Timestamp code paths do not initialize the fields expected by cmsg:
|
|
||||||
* the PKTINFO fields in skb->cb[]. Fill those in here.
|
|
||||||
*/
|
*/
|
||||||
static bool ip6_datagram_support_cmsg(struct sk_buff *skb,
|
static bool ip6_datagram_support_cmsg(struct sk_buff *skb,
|
||||||
struct sock_exterr_skb *serr)
|
struct sock_exterr_skb *serr)
|
||||||
@ -419,14 +416,9 @@ static bool ip6_datagram_support_cmsg(struct sk_buff *skb,
|
|||||||
if (serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL)
|
if (serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (!skb->dev)
|
if (!IP6CB(skb)->iif)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (skb->protocol == htons(ETH_P_IPV6))
|
|
||||||
IP6CB(skb)->iif = skb->dev->ifindex;
|
|
||||||
else
|
|
||||||
PKTINFO_SKB_CB(skb)->ipi_ifindex = skb->dev->ifindex;
|
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -124,11 +124,14 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
|
|||||||
max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs));
|
max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs));
|
||||||
/*
|
/*
|
||||||
* RFC4291 2.5.3
|
* RFC4291 2.5.3
|
||||||
|
* The loopback address must not be used as the source address in IPv6
|
||||||
|
* packets that are sent outside of a single node. [..]
|
||||||
* A packet received on an interface with a destination address
|
* A packet received on an interface with a destination address
|
||||||
* of loopback must be dropped.
|
* of loopback must be dropped.
|
||||||
*/
|
*/
|
||||||
if (!(dev->flags & IFF_LOOPBACK) &&
|
if ((ipv6_addr_loopback(&hdr->saddr) ||
|
||||||
ipv6_addr_loopback(&hdr->daddr))
|
ipv6_addr_loopback(&hdr->daddr)) &&
|
||||||
|
!(dev->flags & IFF_LOOPBACK))
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
/* RFC4291 Errata ID: 3480
|
/* RFC4291 Errata ID: 3480
|
||||||
|
117
tools/testing/selftests/ftrace/test.d/ftrace/func-filter-pid.tc
Normal file
117
tools/testing/selftests/ftrace/test.d/ftrace/func-filter-pid.tc
Normal file
@ -0,0 +1,117 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
# description: ftrace - function pid filters
|
||||||
|
|
||||||
|
# Make sure that function pid matching filter works.
|
||||||
|
# Also test it on an instance directory
|
||||||
|
|
||||||
|
if ! grep -q function available_tracers; then
|
||||||
|
echo "no function tracer configured"
|
||||||
|
exit_unsupported
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ ! -f set_ftrace_pid ]; then
|
||||||
|
echo "set_ftrace_pid not found? Is function tracer not set?"
|
||||||
|
exit_unsupported
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ ! -f set_ftrace_filter ]; then
|
||||||
|
echo "set_ftrace_filter not found? Is function tracer not set?"
|
||||||
|
exit_unsupported
|
||||||
|
fi
|
||||||
|
|
||||||
|
do_function_fork=1
|
||||||
|
|
||||||
|
if [ ! -f options/function-fork ]; then
|
||||||
|
do_function_fork=0
|
||||||
|
echo "no option for function-fork found. Option will not be tested."
|
||||||
|
fi
|
||||||
|
|
||||||
|
read PID _ < /proc/self/stat
|
||||||
|
|
||||||
|
if [ $do_function_fork -eq 1 ]; then
|
||||||
|
# default value of function-fork option
|
||||||
|
orig_value=`grep function-fork trace_options`
|
||||||
|
fi
|
||||||
|
|
||||||
|
do_reset() {
|
||||||
|
reset_tracer
|
||||||
|
clear_trace
|
||||||
|
enable_tracing
|
||||||
|
echo > set_ftrace_filter
|
||||||
|
echo > set_ftrace_pid
|
||||||
|
|
||||||
|
if [ $do_function_fork -eq 0 ]; then
|
||||||
|
return
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo $orig_value > trace_options
|
||||||
|
}
|
||||||
|
|
||||||
|
fail() { # msg
|
||||||
|
do_reset
|
||||||
|
echo $1
|
||||||
|
exit $FAIL
|
||||||
|
}
|
||||||
|
|
||||||
|
yield() {
|
||||||
|
ping localhost -c 1 || sleep .001 || usleep 1 || sleep 1
|
||||||
|
}
|
||||||
|
|
||||||
|
do_test() {
|
||||||
|
disable_tracing
|
||||||
|
|
||||||
|
echo do_execve* > set_ftrace_filter
|
||||||
|
echo *do_fork >> set_ftrace_filter
|
||||||
|
|
||||||
|
echo $PID > set_ftrace_pid
|
||||||
|
echo function > current_tracer
|
||||||
|
|
||||||
|
if [ $do_function_fork -eq 1 ]; then
|
||||||
|
# don't allow children to be traced
|
||||||
|
echo nofunction-fork > trace_options
|
||||||
|
fi
|
||||||
|
|
||||||
|
enable_tracing
|
||||||
|
yield
|
||||||
|
|
||||||
|
count_pid=`cat trace | grep -v ^# | grep $PID | wc -l`
|
||||||
|
count_other=`cat trace | grep -v ^# | grep -v $PID | wc -l`
|
||||||
|
|
||||||
|
# count_other should be 0
|
||||||
|
if [ $count_pid -eq 0 -o $count_other -ne 0 ]; then
|
||||||
|
fail "PID filtering not working?"
|
||||||
|
fi
|
||||||
|
|
||||||
|
disable_tracing
|
||||||
|
clear_trace
|
||||||
|
|
||||||
|
if [ $do_function_fork -eq 0 ]; then
|
||||||
|
return
|
||||||
|
fi
|
||||||
|
|
||||||
|
# allow children to be traced
|
||||||
|
echo function-fork > trace_options
|
||||||
|
|
||||||
|
enable_tracing
|
||||||
|
yield
|
||||||
|
|
||||||
|
count_pid=`cat trace | grep -v ^# | grep $PID | wc -l`
|
||||||
|
count_other=`cat trace | grep -v ^# | grep -v $PID | wc -l`
|
||||||
|
|
||||||
|
# count_other should NOT be 0
|
||||||
|
if [ $count_pid -eq 0 -o $count_other -eq 0 ]; then
|
||||||
|
fail "PID filtering not following fork?"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
do_test
|
||||||
|
|
||||||
|
mkdir instances/foo
|
||||||
|
cd instances/foo
|
||||||
|
do_test
|
||||||
|
cd ../../
|
||||||
|
rmdir instances/foo
|
||||||
|
|
||||||
|
do_reset
|
||||||
|
|
||||||
|
exit 0
|
Loading…
Reference in New Issue
Block a user