Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/usb/qmi_wwan.c Overlapping additions of new device IDs to qmi_wwan.c Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
dc25b25897
1
.mailmap
1
.mailmap
@ -17,6 +17,7 @@ Aleksey Gorelov <aleksey_gorelov@phoenix.com>
|
|||||||
Al Viro <viro@ftp.linux.org.uk>
|
Al Viro <viro@ftp.linux.org.uk>
|
||||||
Al Viro <viro@zenIV.linux.org.uk>
|
Al Viro <viro@zenIV.linux.org.uk>
|
||||||
Andreas Herrmann <aherrman@de.ibm.com>
|
Andreas Herrmann <aherrman@de.ibm.com>
|
||||||
|
Andrey Ryabinin <ryabinin.a.a@gmail.com> <a.ryabinin@samsung.com>
|
||||||
Andrew Morton <akpm@linux-foundation.org>
|
Andrew Morton <akpm@linux-foundation.org>
|
||||||
Andrew Vasquez <andrew.vasquez@qlogic.com>
|
Andrew Vasquez <andrew.vasquez@qlogic.com>
|
||||||
Andy Adamson <andros@citi.umich.edu>
|
Andy Adamson <andros@citi.umich.edu>
|
||||||
|
@ -199,6 +199,7 @@ nodes to be present and contain the properties described below.
|
|||||||
"qcom,kpss-acc-v1"
|
"qcom,kpss-acc-v1"
|
||||||
"qcom,kpss-acc-v2"
|
"qcom,kpss-acc-v2"
|
||||||
"rockchip,rk3066-smp"
|
"rockchip,rk3066-smp"
|
||||||
|
"ste,dbx500-smp"
|
||||||
|
|
||||||
- cpu-release-addr
|
- cpu-release-addr
|
||||||
Usage: required for systems that have an "enable-method"
|
Usage: required for systems that have an "enable-method"
|
||||||
|
@ -3588,6 +3588,15 @@ S: Maintained
|
|||||||
F: drivers/gpu/drm/rockchip/
|
F: drivers/gpu/drm/rockchip/
|
||||||
F: Documentation/devicetree/bindings/video/rockchip*
|
F: Documentation/devicetree/bindings/video/rockchip*
|
||||||
|
|
||||||
|
DRM DRIVERS FOR STI
|
||||||
|
M: Benjamin Gaignard <benjamin.gaignard@linaro.org>
|
||||||
|
M: Vincent Abriou <vincent.abriou@st.com>
|
||||||
|
L: dri-devel@lists.freedesktop.org
|
||||||
|
T: git http://git.linaro.org/people/benjamin.gaignard/kernel.git
|
||||||
|
S: Maintained
|
||||||
|
F: drivers/gpu/drm/sti
|
||||||
|
F: Documentation/devicetree/bindings/gpu/st,stih4xx.txt
|
||||||
|
|
||||||
DSBR100 USB FM RADIO DRIVER
|
DSBR100 USB FM RADIO DRIVER
|
||||||
M: Alexey Klimov <klimov.linux@gmail.com>
|
M: Alexey Klimov <klimov.linux@gmail.com>
|
||||||
L: linux-media@vger.kernel.org
|
L: linux-media@vger.kernel.org
|
||||||
|
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
|||||||
VERSION = 4
|
VERSION = 4
|
||||||
PATCHLEVEL = 2
|
PATCHLEVEL = 2
|
||||||
SUBLEVEL = 0
|
SUBLEVEL = 0
|
||||||
EXTRAVERSION = -rc6
|
EXTRAVERSION = -rc7
|
||||||
NAME = Hurr durr I'ma sheep
|
NAME = Hurr durr I'ma sheep
|
||||||
|
|
||||||
# *DOCUMENTATION*
|
# *DOCUMENTATION*
|
||||||
|
@ -116,7 +116,7 @@
|
|||||||
ranges = <0 0x2000 0x2000>;
|
ranges = <0 0x2000 0x2000>;
|
||||||
|
|
||||||
scm_conf: scm_conf@0 {
|
scm_conf: scm_conf@0 {
|
||||||
compatible = "syscon";
|
compatible = "syscon", "simple-bus";
|
||||||
reg = <0x0 0x1400>;
|
reg = <0x0 0x1400>;
|
||||||
#address-cells = <1>;
|
#address-cells = <1>;
|
||||||
#size-cells = <1>;
|
#size-cells = <1>;
|
||||||
|
@ -181,10 +181,10 @@
|
|||||||
interrupt-names = "msi";
|
interrupt-names = "msi";
|
||||||
#interrupt-cells = <1>;
|
#interrupt-cells = <1>;
|
||||||
interrupt-map-mask = <0 0 0 0x7>;
|
interrupt-map-mask = <0 0 0 0x7>;
|
||||||
interrupt-map = <0 0 0 1 &intc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
|
interrupt-map = <0 0 0 1 &gpc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
|
||||||
<0 0 0 2 &intc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
|
<0 0 0 2 &gpc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
|
||||||
<0 0 0 3 &intc GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
|
<0 0 0 3 &gpc GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
|
||||||
<0 0 0 4 &intc GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
|
<0 0 0 4 &gpc GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
clocks = <&clks IMX6QDL_CLK_PCIE_AXI>,
|
clocks = <&clks IMX6QDL_CLK_PCIE_AXI>,
|
||||||
<&clks IMX6QDL_CLK_LVDS1_GATE>,
|
<&clks IMX6QDL_CLK_LVDS1_GATE>,
|
||||||
<&clks IMX6QDL_CLK_PCIE_REF_125M>;
|
<&clks IMX6QDL_CLK_PCIE_REF_125M>;
|
||||||
|
@ -131,10 +131,17 @@
|
|||||||
<GIC_SPI 376 IRQ_TYPE_EDGE_RISING>;
|
<GIC_SPI 376 IRQ_TYPE_EDGE_RISING>;
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
mdio: mdio@24200f00 {
|
||||||
|
compatible = "ti,keystone_mdio", "ti,davinci_mdio";
|
||||||
|
#address-cells = <1>;
|
||||||
|
#size-cells = <0>;
|
||||||
|
reg = <0x24200f00 0x100>;
|
||||||
|
status = "disabled";
|
||||||
|
clocks = <&clkcpgmac>;
|
||||||
|
clock-names = "fck";
|
||||||
|
bus_freq = <2500000>;
|
||||||
|
};
|
||||||
/include/ "k2e-netcp.dtsi"
|
/include/ "k2e-netcp.dtsi"
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
&mdio {
|
|
||||||
reg = <0x24200f00 0x100>;
|
|
||||||
};
|
|
||||||
|
@ -98,6 +98,17 @@
|
|||||||
#gpio-cells = <2>;
|
#gpio-cells = <2>;
|
||||||
gpio,syscon-dev = <&devctrl 0x25c>;
|
gpio,syscon-dev = <&devctrl 0x25c>;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
mdio: mdio@02090300 {
|
||||||
|
compatible = "ti,keystone_mdio", "ti,davinci_mdio";
|
||||||
|
#address-cells = <1>;
|
||||||
|
#size-cells = <0>;
|
||||||
|
reg = <0x02090300 0x100>;
|
||||||
|
status = "disabled";
|
||||||
|
clocks = <&clkcpgmac>;
|
||||||
|
clock-names = "fck";
|
||||||
|
bus_freq = <2500000>;
|
||||||
|
};
|
||||||
/include/ "k2hk-netcp.dtsi"
|
/include/ "k2hk-netcp.dtsi"
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
@ -29,7 +29,6 @@
|
|||||||
};
|
};
|
||||||
|
|
||||||
soc {
|
soc {
|
||||||
|
|
||||||
/include/ "k2l-clocks.dtsi"
|
/include/ "k2l-clocks.dtsi"
|
||||||
|
|
||||||
uart2: serial@02348400 {
|
uart2: serial@02348400 {
|
||||||
@ -79,6 +78,17 @@
|
|||||||
#gpio-cells = <2>;
|
#gpio-cells = <2>;
|
||||||
gpio,syscon-dev = <&devctrl 0x24c>;
|
gpio,syscon-dev = <&devctrl 0x24c>;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
mdio: mdio@26200f00 {
|
||||||
|
compatible = "ti,keystone_mdio", "ti,davinci_mdio";
|
||||||
|
#address-cells = <1>;
|
||||||
|
#size-cells = <0>;
|
||||||
|
reg = <0x26200f00 0x100>;
|
||||||
|
status = "disabled";
|
||||||
|
clocks = <&clkcpgmac>;
|
||||||
|
clock-names = "fck";
|
||||||
|
bus_freq = <2500000>;
|
||||||
|
};
|
||||||
/include/ "k2l-netcp.dtsi"
|
/include/ "k2l-netcp.dtsi"
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
@ -96,7 +106,3 @@
|
|||||||
/* Pin muxed. Enabled and configured by Bootloader */
|
/* Pin muxed. Enabled and configured by Bootloader */
|
||||||
status = "disabled";
|
status = "disabled";
|
||||||
};
|
};
|
||||||
|
|
||||||
&mdio {
|
|
||||||
reg = <0x26200f00 0x100>;
|
|
||||||
};
|
|
||||||
|
@ -267,17 +267,6 @@
|
|||||||
1 0 0x21000A00 0x00000100>;
|
1 0 0x21000A00 0x00000100>;
|
||||||
};
|
};
|
||||||
|
|
||||||
mdio: mdio@02090300 {
|
|
||||||
compatible = "ti,keystone_mdio", "ti,davinci_mdio";
|
|
||||||
#address-cells = <1>;
|
|
||||||
#size-cells = <0>;
|
|
||||||
reg = <0x02090300 0x100>;
|
|
||||||
status = "disabled";
|
|
||||||
clocks = <&clkpa>;
|
|
||||||
clock-names = "fck";
|
|
||||||
bus_freq = <2500000>;
|
|
||||||
};
|
|
||||||
|
|
||||||
kirq0: keystone_irq@26202a0 {
|
kirq0: keystone_irq@26202a0 {
|
||||||
compatible = "ti,keystone-irq";
|
compatible = "ti,keystone-irq";
|
||||||
interrupts = <GIC_SPI 4 IRQ_TYPE_EDGE_RISING>;
|
interrupts = <GIC_SPI 4 IRQ_TYPE_EDGE_RISING>;
|
||||||
|
@ -51,7 +51,8 @@
|
|||||||
};
|
};
|
||||||
|
|
||||||
scm_conf: scm_conf@270 {
|
scm_conf: scm_conf@270 {
|
||||||
compatible = "syscon";
|
compatible = "syscon",
|
||||||
|
"simple-bus";
|
||||||
reg = <0x270 0x240>;
|
reg = <0x270 0x240>;
|
||||||
#address-cells = <1>;
|
#address-cells = <1>;
|
||||||
#size-cells = <1>;
|
#size-cells = <1>;
|
||||||
|
@ -191,7 +191,8 @@
|
|||||||
};
|
};
|
||||||
|
|
||||||
omap4_padconf_global: omap4_padconf_global@5a0 {
|
omap4_padconf_global: omap4_padconf_global@5a0 {
|
||||||
compatible = "syscon";
|
compatible = "syscon",
|
||||||
|
"simple-bus";
|
||||||
reg = <0x5a0 0x170>;
|
reg = <0x5a0 0x170>;
|
||||||
#address-cells = <1>;
|
#address-cells = <1>;
|
||||||
#size-cells = <1>;
|
#size-cells = <1>;
|
||||||
|
@ -180,7 +180,8 @@
|
|||||||
};
|
};
|
||||||
|
|
||||||
omap5_padconf_global: omap5_padconf_global@5a0 {
|
omap5_padconf_global: omap5_padconf_global@5a0 {
|
||||||
compatible = "syscon";
|
compatible = "syscon",
|
||||||
|
"simple-bus";
|
||||||
reg = <0x5a0 0xec>;
|
reg = <0x5a0 0xec>;
|
||||||
#address-cells = <1>;
|
#address-cells = <1>;
|
||||||
#size-cells = <1>;
|
#size-cells = <1>;
|
||||||
|
@ -15,6 +15,33 @@
|
|||||||
#include "skeleton.dtsi"
|
#include "skeleton.dtsi"
|
||||||
|
|
||||||
/ {
|
/ {
|
||||||
|
cpus {
|
||||||
|
#address-cells = <1>;
|
||||||
|
#size-cells = <0>;
|
||||||
|
enable-method = "ste,dbx500-smp";
|
||||||
|
|
||||||
|
cpu-map {
|
||||||
|
cluster0 {
|
||||||
|
core0 {
|
||||||
|
cpu = <&CPU0>;
|
||||||
|
};
|
||||||
|
core1 {
|
||||||
|
cpu = <&CPU1>;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
CPU0: cpu@300 {
|
||||||
|
device_type = "cpu";
|
||||||
|
compatible = "arm,cortex-a9";
|
||||||
|
reg = <0x300>;
|
||||||
|
};
|
||||||
|
CPU1: cpu@301 {
|
||||||
|
device_type = "cpu";
|
||||||
|
compatible = "arm,cortex-a9";
|
||||||
|
reg = <0x301>;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
soc {
|
soc {
|
||||||
#address-cells = <1>;
|
#address-cells = <1>;
|
||||||
#size-cells = <1>;
|
#size-cells = <1>;
|
||||||
@ -22,32 +49,6 @@
|
|||||||
interrupt-parent = <&intc>;
|
interrupt-parent = <&intc>;
|
||||||
ranges;
|
ranges;
|
||||||
|
|
||||||
cpus {
|
|
||||||
#address-cells = <1>;
|
|
||||||
#size-cells = <0>;
|
|
||||||
|
|
||||||
cpu-map {
|
|
||||||
cluster0 {
|
|
||||||
core0 {
|
|
||||||
cpu = <&CPU0>;
|
|
||||||
};
|
|
||||||
core1 {
|
|
||||||
cpu = <&CPU1>;
|
|
||||||
};
|
|
||||||
};
|
|
||||||
};
|
|
||||||
CPU0: cpu@0 {
|
|
||||||
device_type = "cpu";
|
|
||||||
compatible = "arm,cortex-a9";
|
|
||||||
reg = <0>;
|
|
||||||
};
|
|
||||||
CPU1: cpu@1 {
|
|
||||||
device_type = "cpu";
|
|
||||||
compatible = "arm,cortex-a9";
|
|
||||||
reg = <1>;
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
ptm@801ae000 {
|
ptm@801ae000 {
|
||||||
compatible = "arm,coresight-etm3x", "arm,primecell";
|
compatible = "arm,coresight-etm3x", "arm,primecell";
|
||||||
reg = <0x801ae000 0x1000>;
|
reg = <0x801ae000 0x1000>;
|
||||||
|
@ -61,6 +61,7 @@ work_pending:
|
|||||||
movlt scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE)
|
movlt scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE)
|
||||||
ldmia sp, {r0 - r6} @ have to reload r0 - r6
|
ldmia sp, {r0 - r6} @ have to reload r0 - r6
|
||||||
b local_restart @ ... and off we go
|
b local_restart @ ... and off we go
|
||||||
|
ENDPROC(ret_fast_syscall)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* "slow" syscall return path. "why" tells us if this was a real syscall.
|
* "slow" syscall return path. "why" tells us if this was a real syscall.
|
||||||
|
@ -399,6 +399,9 @@ ENTRY(secondary_startup)
|
|||||||
sub lr, r4, r5 @ mmu has been enabled
|
sub lr, r4, r5 @ mmu has been enabled
|
||||||
add r3, r7, lr
|
add r3, r7, lr
|
||||||
ldrd r4, [r3, #0] @ get secondary_data.pgdir
|
ldrd r4, [r3, #0] @ get secondary_data.pgdir
|
||||||
|
ARM_BE8(eor r4, r4, r5) @ Swap r5 and r4 in BE:
|
||||||
|
ARM_BE8(eor r5, r4, r5) @ it can be done in 3 steps
|
||||||
|
ARM_BE8(eor r4, r4, r5) @ without using a temp reg.
|
||||||
ldr r8, [r3, #8] @ get secondary_data.swapper_pg_dir
|
ldr r8, [r3, #8] @ get secondary_data.swapper_pg_dir
|
||||||
badr lr, __enable_mmu @ return address
|
badr lr, __enable_mmu @ return address
|
||||||
mov r13, r12 @ __secondary_switched address
|
mov r13, r12 @ __secondary_switched address
|
||||||
|
@ -296,7 +296,6 @@ static bool tk_is_cntvct(const struct timekeeper *tk)
|
|||||||
*/
|
*/
|
||||||
void update_vsyscall(struct timekeeper *tk)
|
void update_vsyscall(struct timekeeper *tk)
|
||||||
{
|
{
|
||||||
struct timespec xtime_coarse;
|
|
||||||
struct timespec64 *wtm = &tk->wall_to_monotonic;
|
struct timespec64 *wtm = &tk->wall_to_monotonic;
|
||||||
|
|
||||||
if (!cntvct_ok) {
|
if (!cntvct_ok) {
|
||||||
@ -308,10 +307,10 @@ void update_vsyscall(struct timekeeper *tk)
|
|||||||
|
|
||||||
vdso_write_begin(vdso_data);
|
vdso_write_begin(vdso_data);
|
||||||
|
|
||||||
xtime_coarse = __current_kernel_time();
|
|
||||||
vdso_data->tk_is_cntvct = tk_is_cntvct(tk);
|
vdso_data->tk_is_cntvct = tk_is_cntvct(tk);
|
||||||
vdso_data->xtime_coarse_sec = xtime_coarse.tv_sec;
|
vdso_data->xtime_coarse_sec = tk->xtime_sec;
|
||||||
vdso_data->xtime_coarse_nsec = xtime_coarse.tv_nsec;
|
vdso_data->xtime_coarse_nsec = (u32)(tk->tkr_mono.xtime_nsec >>
|
||||||
|
tk->tkr_mono.shift);
|
||||||
vdso_data->wtm_clock_sec = wtm->tv_sec;
|
vdso_data->wtm_clock_sec = wtm->tv_sec;
|
||||||
vdso_data->wtm_clock_nsec = wtm->tv_nsec;
|
vdso_data->wtm_clock_nsec = wtm->tv_nsec;
|
||||||
|
|
||||||
|
@ -146,9 +146,8 @@ static __init int exynos4_pm_init_power_domain(void)
|
|||||||
pd->base = of_iomap(np, 0);
|
pd->base = of_iomap(np, 0);
|
||||||
if (!pd->base) {
|
if (!pd->base) {
|
||||||
pr_warn("%s: failed to map memory\n", __func__);
|
pr_warn("%s: failed to map memory\n", __func__);
|
||||||
kfree(pd->pd.name);
|
kfree_const(pd->pd.name);
|
||||||
kfree(pd);
|
kfree(pd);
|
||||||
of_node_put(np);
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -14,7 +14,7 @@ VDSO_LDFLAGS += -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096
|
|||||||
VDSO_LDFLAGS += -nostdlib -shared
|
VDSO_LDFLAGS += -nostdlib -shared
|
||||||
VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
|
VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
|
||||||
VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--build-id)
|
VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--build-id)
|
||||||
VDSO_LDFLAGS += $(call cc-option, -fuse-ld=bfd)
|
VDSO_LDFLAGS += $(call cc-ldoption, -fuse-ld=bfd)
|
||||||
|
|
||||||
obj-$(CONFIG_VDSO) += vdso.o
|
obj-$(CONFIG_VDSO) += vdso.o
|
||||||
extra-$(CONFIG_VDSO) += vdso.lds
|
extra-$(CONFIG_VDSO) += vdso.lds
|
||||||
|
@ -80,7 +80,7 @@ syscall_trace_entry:
|
|||||||
SAVE_STATIC
|
SAVE_STATIC
|
||||||
move s0, t2
|
move s0, t2
|
||||||
move a0, sp
|
move a0, sp
|
||||||
daddiu a1, v0, __NR_64_Linux
|
move a1, v0
|
||||||
jal syscall_trace_enter
|
jal syscall_trace_enter
|
||||||
|
|
||||||
bltz v0, 2f # seccomp failed? Skip syscall
|
bltz v0, 2f # seccomp failed? Skip syscall
|
||||||
|
@ -72,7 +72,7 @@ n32_syscall_trace_entry:
|
|||||||
SAVE_STATIC
|
SAVE_STATIC
|
||||||
move s0, t2
|
move s0, t2
|
||||||
move a0, sp
|
move a0, sp
|
||||||
daddiu a1, v0, __NR_N32_Linux
|
move a1, v0
|
||||||
jal syscall_trace_enter
|
jal syscall_trace_enter
|
||||||
|
|
||||||
bltz v0, 2f # seccomp failed? Skip syscall
|
bltz v0, 2f # seccomp failed? Skip syscall
|
||||||
|
@ -140,6 +140,7 @@ sysexit_from_sys_call:
|
|||||||
*/
|
*/
|
||||||
andl $~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
|
andl $~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
|
||||||
movl RIP(%rsp), %ecx /* User %eip */
|
movl RIP(%rsp), %ecx /* User %eip */
|
||||||
|
movq RAX(%rsp), %rax
|
||||||
RESTORE_RSI_RDI
|
RESTORE_RSI_RDI
|
||||||
xorl %edx, %edx /* Do not leak kernel information */
|
xorl %edx, %edx /* Do not leak kernel information */
|
||||||
xorq %r8, %r8
|
xorq %r8, %r8
|
||||||
@ -219,7 +220,6 @@ sysexit_from_sys_call:
|
|||||||
1: setbe %al /* 1 if error, 0 if not */
|
1: setbe %al /* 1 if error, 0 if not */
|
||||||
movzbl %al, %edi /* zero-extend that into %edi */
|
movzbl %al, %edi /* zero-extend that into %edi */
|
||||||
call __audit_syscall_exit
|
call __audit_syscall_exit
|
||||||
movq RAX(%rsp), %rax /* reload syscall return value */
|
|
||||||
movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %edi
|
movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %edi
|
||||||
DISABLE_INTERRUPTS(CLBR_NONE)
|
DISABLE_INTERRUPTS(CLBR_NONE)
|
||||||
TRACE_IRQS_OFF
|
TRACE_IRQS_OFF
|
||||||
@ -368,6 +368,7 @@ sysretl_from_sys_call:
|
|||||||
RESTORE_RSI_RDI_RDX
|
RESTORE_RSI_RDI_RDX
|
||||||
movl RIP(%rsp), %ecx
|
movl RIP(%rsp), %ecx
|
||||||
movl EFLAGS(%rsp), %r11d
|
movl EFLAGS(%rsp), %r11d
|
||||||
|
movq RAX(%rsp), %rax
|
||||||
xorq %r10, %r10
|
xorq %r10, %r10
|
||||||
xorq %r9, %r9
|
xorq %r9, %r9
|
||||||
xorq %r8, %r8
|
xorq %r8, %r8
|
||||||
|
@ -2534,7 +2534,7 @@ static int intel_pmu_cpu_prepare(int cpu)
|
|||||||
if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
|
if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
|
||||||
cpuc->shared_regs = allocate_shared_regs(cpu);
|
cpuc->shared_regs = allocate_shared_regs(cpu);
|
||||||
if (!cpuc->shared_regs)
|
if (!cpuc->shared_regs)
|
||||||
return NOTIFY_BAD;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
|
if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
|
||||||
@ -2542,18 +2542,27 @@ static int intel_pmu_cpu_prepare(int cpu)
|
|||||||
|
|
||||||
cpuc->constraint_list = kzalloc(sz, GFP_KERNEL);
|
cpuc->constraint_list = kzalloc(sz, GFP_KERNEL);
|
||||||
if (!cpuc->constraint_list)
|
if (!cpuc->constraint_list)
|
||||||
return NOTIFY_BAD;
|
goto err_shared_regs;
|
||||||
|
|
||||||
cpuc->excl_cntrs = allocate_excl_cntrs(cpu);
|
cpuc->excl_cntrs = allocate_excl_cntrs(cpu);
|
||||||
if (!cpuc->excl_cntrs) {
|
if (!cpuc->excl_cntrs)
|
||||||
kfree(cpuc->constraint_list);
|
goto err_constraint_list;
|
||||||
kfree(cpuc->shared_regs);
|
|
||||||
return NOTIFY_BAD;
|
|
||||||
}
|
|
||||||
cpuc->excl_thread_id = 0;
|
cpuc->excl_thread_id = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
return NOTIFY_OK;
|
return NOTIFY_OK;
|
||||||
|
|
||||||
|
err_constraint_list:
|
||||||
|
kfree(cpuc->constraint_list);
|
||||||
|
cpuc->constraint_list = NULL;
|
||||||
|
|
||||||
|
err_shared_regs:
|
||||||
|
kfree(cpuc->shared_regs);
|
||||||
|
cpuc->shared_regs = NULL;
|
||||||
|
|
||||||
|
err:
|
||||||
|
return NOTIFY_BAD;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void intel_pmu_cpu_starting(int cpu)
|
static void intel_pmu_cpu_starting(int cpu)
|
||||||
|
@ -1255,7 +1255,7 @@ static inline void cqm_pick_event_reader(int cpu)
|
|||||||
cpumask_set_cpu(cpu, &cqm_cpumask);
|
cpumask_set_cpu(cpu, &cqm_cpumask);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void intel_cqm_cpu_prepare(unsigned int cpu)
|
static void intel_cqm_cpu_starting(unsigned int cpu)
|
||||||
{
|
{
|
||||||
struct intel_pqr_state *state = &per_cpu(pqr_state, cpu);
|
struct intel_pqr_state *state = &per_cpu(pqr_state, cpu);
|
||||||
struct cpuinfo_x86 *c = &cpu_data(cpu);
|
struct cpuinfo_x86 *c = &cpu_data(cpu);
|
||||||
@ -1296,13 +1296,11 @@ static int intel_cqm_cpu_notifier(struct notifier_block *nb,
|
|||||||
unsigned int cpu = (unsigned long)hcpu;
|
unsigned int cpu = (unsigned long)hcpu;
|
||||||
|
|
||||||
switch (action & ~CPU_TASKS_FROZEN) {
|
switch (action & ~CPU_TASKS_FROZEN) {
|
||||||
case CPU_UP_PREPARE:
|
|
||||||
intel_cqm_cpu_prepare(cpu);
|
|
||||||
break;
|
|
||||||
case CPU_DOWN_PREPARE:
|
case CPU_DOWN_PREPARE:
|
||||||
intel_cqm_cpu_exit(cpu);
|
intel_cqm_cpu_exit(cpu);
|
||||||
break;
|
break;
|
||||||
case CPU_STARTING:
|
case CPU_STARTING:
|
||||||
|
intel_cqm_cpu_starting(cpu);
|
||||||
cqm_pick_event_reader(cpu);
|
cqm_pick_event_reader(cpu);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -1373,7 +1371,7 @@ static int __init intel_cqm_init(void)
|
|||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
for_each_online_cpu(i) {
|
for_each_online_cpu(i) {
|
||||||
intel_cqm_cpu_prepare(i);
|
intel_cqm_cpu_starting(i);
|
||||||
cqm_pick_event_reader(i);
|
cqm_pick_event_reader(i);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -28,11 +28,11 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
|
|||||||
struct desc_struct *desc;
|
struct desc_struct *desc;
|
||||||
unsigned long base;
|
unsigned long base;
|
||||||
|
|
||||||
seg &= ~7UL;
|
seg >>= 3;
|
||||||
|
|
||||||
mutex_lock(&child->mm->context.lock);
|
mutex_lock(&child->mm->context.lock);
|
||||||
if (unlikely(!child->mm->context.ldt ||
|
if (unlikely(!child->mm->context.ldt ||
|
||||||
(seg >> 3) >= child->mm->context.ldt->size))
|
seg >= child->mm->context.ldt->size))
|
||||||
addr = -1L; /* bogus selector, access would fault */
|
addr = -1L; /* bogus selector, access would fault */
|
||||||
else {
|
else {
|
||||||
desc = &child->mm->context.ldt->entries[seg];
|
desc = &child->mm->context.ldt->entries[seg];
|
||||||
|
@ -2105,7 +2105,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
|||||||
if (guest_cpuid_has_tsc_adjust(vcpu)) {
|
if (guest_cpuid_has_tsc_adjust(vcpu)) {
|
||||||
if (!msr_info->host_initiated) {
|
if (!msr_info->host_initiated) {
|
||||||
s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
|
s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
|
||||||
kvm_x86_ops->adjust_tsc_offset(vcpu, adj, true);
|
adjust_tsc_offset_guest(vcpu, adj);
|
||||||
}
|
}
|
||||||
vcpu->arch.ia32_tsc_adjust_msr = data;
|
vcpu->arch.ia32_tsc_adjust_msr = data;
|
||||||
}
|
}
|
||||||
@ -6327,6 +6327,7 @@ static void process_smi_save_state_64(struct kvm_vcpu *vcpu, char *buf)
|
|||||||
static void process_smi(struct kvm_vcpu *vcpu)
|
static void process_smi(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
struct kvm_segment cs, ds;
|
struct kvm_segment cs, ds;
|
||||||
|
struct desc_ptr dt;
|
||||||
char buf[512];
|
char buf[512];
|
||||||
u32 cr0;
|
u32 cr0;
|
||||||
|
|
||||||
@ -6359,6 +6360,10 @@ static void process_smi(struct kvm_vcpu *vcpu)
|
|||||||
|
|
||||||
kvm_x86_ops->set_cr4(vcpu, 0);
|
kvm_x86_ops->set_cr4(vcpu, 0);
|
||||||
|
|
||||||
|
/* Undocumented: IDT limit is set to zero on entry to SMM. */
|
||||||
|
dt.address = dt.size = 0;
|
||||||
|
kvm_x86_ops->set_idt(vcpu, &dt);
|
||||||
|
|
||||||
__kvm_set_dr(vcpu, 7, DR7_FIXED_1);
|
__kvm_set_dr(vcpu, 7, DR7_FIXED_1);
|
||||||
|
|
||||||
cs.selector = (vcpu->arch.smbase >> 4) & 0xffff;
|
cs.selector = (vcpu->arch.smbase >> 4) & 0xffff;
|
||||||
|
@ -29,7 +29,6 @@
|
|||||||
|
|
||||||
#include <asm/uaccess.h>
|
#include <asm/uaccess.h>
|
||||||
#include <asm/traps.h>
|
#include <asm/traps.h>
|
||||||
#include <asm/desc.h>
|
|
||||||
#include <asm/user.h>
|
#include <asm/user.h>
|
||||||
#include <asm/fpu/internal.h>
|
#include <asm/fpu/internal.h>
|
||||||
|
|
||||||
@ -181,7 +180,7 @@ void math_emulate(struct math_emu_info *info)
|
|||||||
math_abort(FPU_info, SIGILL);
|
math_abort(FPU_info, SIGILL);
|
||||||
}
|
}
|
||||||
|
|
||||||
code_descriptor = LDT_DESCRIPTOR(FPU_CS);
|
code_descriptor = FPU_get_ldt_descriptor(FPU_CS);
|
||||||
if (SEG_D_SIZE(code_descriptor)) {
|
if (SEG_D_SIZE(code_descriptor)) {
|
||||||
/* The above test may be wrong, the book is not clear */
|
/* The above test may be wrong, the book is not clear */
|
||||||
/* Segmented 32 bit protected mode */
|
/* Segmented 32 bit protected mode */
|
||||||
|
@ -16,9 +16,24 @@
|
|||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/mm.h>
|
#include <linux/mm.h>
|
||||||
|
|
||||||
/* s is always from a cpu register, and the cpu does bounds checking
|
#include <asm/desc.h>
|
||||||
* during register load --> no further bounds checks needed */
|
#include <asm/mmu_context.h>
|
||||||
#define LDT_DESCRIPTOR(s) (((struct desc_struct *)current->mm->context.ldt)[(s) >> 3])
|
|
||||||
|
static inline struct desc_struct FPU_get_ldt_descriptor(unsigned seg)
|
||||||
|
{
|
||||||
|
static struct desc_struct zero_desc;
|
||||||
|
struct desc_struct ret = zero_desc;
|
||||||
|
|
||||||
|
#ifdef CONFIG_MODIFY_LDT_SYSCALL
|
||||||
|
seg >>= 3;
|
||||||
|
mutex_lock(¤t->mm->context.lock);
|
||||||
|
if (current->mm->context.ldt && seg < current->mm->context.ldt->size)
|
||||||
|
ret = current->mm->context.ldt->entries[seg];
|
||||||
|
mutex_unlock(¤t->mm->context.lock);
|
||||||
|
#endif
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
#define SEG_D_SIZE(x) ((x).b & (3 << 21))
|
#define SEG_D_SIZE(x) ((x).b & (3 << 21))
|
||||||
#define SEG_G_BIT(x) ((x).b & (1 << 23))
|
#define SEG_G_BIT(x) ((x).b & (1 << 23))
|
||||||
#define SEG_GRANULARITY(x) (((x).b & (1 << 23)) ? 4096 : 1)
|
#define SEG_GRANULARITY(x) (((x).b & (1 << 23)) ? 4096 : 1)
|
||||||
|
@ -20,7 +20,6 @@
|
|||||||
#include <linux/stddef.h>
|
#include <linux/stddef.h>
|
||||||
|
|
||||||
#include <asm/uaccess.h>
|
#include <asm/uaccess.h>
|
||||||
#include <asm/desc.h>
|
|
||||||
|
|
||||||
#include "fpu_system.h"
|
#include "fpu_system.h"
|
||||||
#include "exception.h"
|
#include "exception.h"
|
||||||
@ -158,7 +157,7 @@ static long pm_address(u_char FPU_modrm, u_char segment,
|
|||||||
addr->selector = PM_REG_(segment);
|
addr->selector = PM_REG_(segment);
|
||||||
}
|
}
|
||||||
|
|
||||||
descriptor = LDT_DESCRIPTOR(PM_REG_(segment));
|
descriptor = FPU_get_ldt_descriptor(addr->selector);
|
||||||
base_address = SEG_BASE_ADDR(descriptor);
|
base_address = SEG_BASE_ADDR(descriptor);
|
||||||
address = base_address + offset;
|
address = base_address + offset;
|
||||||
limit = base_address
|
limit = base_address
|
||||||
|
@ -8,7 +8,7 @@ config XEN
|
|||||||
select PARAVIRT_CLOCK
|
select PARAVIRT_CLOCK
|
||||||
select XEN_HAVE_PVMMU
|
select XEN_HAVE_PVMMU
|
||||||
depends on X86_64 || (X86_32 && X86_PAE)
|
depends on X86_64 || (X86_32 && X86_PAE)
|
||||||
depends on X86_TSC
|
depends on X86_LOCAL_APIC && X86_TSC
|
||||||
help
|
help
|
||||||
This is the Linux Xen port. Enabling this will allow the
|
This is the Linux Xen port. Enabling this will allow the
|
||||||
kernel to boot in a paravirtualized environment under the
|
kernel to boot in a paravirtualized environment under the
|
||||||
@ -17,7 +17,7 @@ config XEN
|
|||||||
config XEN_DOM0
|
config XEN_DOM0
|
||||||
def_bool y
|
def_bool y
|
||||||
depends on XEN && PCI_XEN && SWIOTLB_XEN
|
depends on XEN && PCI_XEN && SWIOTLB_XEN
|
||||||
depends on X86_LOCAL_APIC && X86_IO_APIC && ACPI && PCI
|
depends on X86_IO_APIC && ACPI && PCI
|
||||||
|
|
||||||
config XEN_PVHVM
|
config XEN_PVHVM
|
||||||
def_bool y
|
def_bool y
|
||||||
|
@ -241,8 +241,8 @@ EXPORT_SYMBOL(blk_queue_bounce_limit);
|
|||||||
* Description:
|
* Description:
|
||||||
* Enables a low level driver to set a hard upper limit,
|
* Enables a low level driver to set a hard upper limit,
|
||||||
* max_hw_sectors, on the size of requests. max_hw_sectors is set by
|
* max_hw_sectors, on the size of requests. max_hw_sectors is set by
|
||||||
* the device driver based upon the combined capabilities of I/O
|
* the device driver based upon the capabilities of the I/O
|
||||||
* controller and storage device.
|
* controller.
|
||||||
*
|
*
|
||||||
* max_sectors is a soft limit imposed by the block layer for
|
* max_sectors is a soft limit imposed by the block layer for
|
||||||
* filesystem type requests. This value can be overridden on a
|
* filesystem type requests. This value can be overridden on a
|
||||||
|
@ -393,8 +393,6 @@ static int crypto_authenc_esn_genicv(struct aead_request *req, u8 *iv,
|
|||||||
struct scatterlist *cipher = areq_ctx->cipher;
|
struct scatterlist *cipher = areq_ctx->cipher;
|
||||||
struct scatterlist *hsg = areq_ctx->hsg;
|
struct scatterlist *hsg = areq_ctx->hsg;
|
||||||
struct scatterlist *tsg = areq_ctx->tsg;
|
struct scatterlist *tsg = areq_ctx->tsg;
|
||||||
struct scatterlist *assoc1;
|
|
||||||
struct scatterlist *assoc2;
|
|
||||||
unsigned int ivsize = crypto_aead_ivsize(authenc_esn);
|
unsigned int ivsize = crypto_aead_ivsize(authenc_esn);
|
||||||
unsigned int cryptlen = req->cryptlen;
|
unsigned int cryptlen = req->cryptlen;
|
||||||
struct page *dstp;
|
struct page *dstp;
|
||||||
@ -412,27 +410,19 @@ static int crypto_authenc_esn_genicv(struct aead_request *req, u8 *iv,
|
|||||||
cryptlen += ivsize;
|
cryptlen += ivsize;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (sg_is_last(assoc))
|
if (assoc->length < 12)
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
assoc1 = assoc + 1;
|
|
||||||
if (sg_is_last(assoc1))
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
assoc2 = assoc + 2;
|
|
||||||
if (!sg_is_last(assoc2))
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
sg_init_table(hsg, 2);
|
sg_init_table(hsg, 2);
|
||||||
sg_set_page(hsg, sg_page(assoc), assoc->length, assoc->offset);
|
sg_set_page(hsg, sg_page(assoc), 4, assoc->offset);
|
||||||
sg_set_page(hsg + 1, sg_page(assoc2), assoc2->length, assoc2->offset);
|
sg_set_page(hsg + 1, sg_page(assoc), 4, assoc->offset + 8);
|
||||||
|
|
||||||
sg_init_table(tsg, 1);
|
sg_init_table(tsg, 1);
|
||||||
sg_set_page(tsg, sg_page(assoc1), assoc1->length, assoc1->offset);
|
sg_set_page(tsg, sg_page(assoc), 4, assoc->offset + 4);
|
||||||
|
|
||||||
areq_ctx->cryptlen = cryptlen;
|
areq_ctx->cryptlen = cryptlen;
|
||||||
areq_ctx->headlen = assoc->length + assoc2->length;
|
areq_ctx->headlen = 8;
|
||||||
areq_ctx->trailen = assoc1->length;
|
areq_ctx->trailen = 4;
|
||||||
areq_ctx->sg = dst;
|
areq_ctx->sg = dst;
|
||||||
|
|
||||||
areq_ctx->complete = authenc_esn_geniv_ahash_done;
|
areq_ctx->complete = authenc_esn_geniv_ahash_done;
|
||||||
@ -563,8 +553,6 @@ static int crypto_authenc_esn_iverify(struct aead_request *req, u8 *iv,
|
|||||||
struct scatterlist *cipher = areq_ctx->cipher;
|
struct scatterlist *cipher = areq_ctx->cipher;
|
||||||
struct scatterlist *hsg = areq_ctx->hsg;
|
struct scatterlist *hsg = areq_ctx->hsg;
|
||||||
struct scatterlist *tsg = areq_ctx->tsg;
|
struct scatterlist *tsg = areq_ctx->tsg;
|
||||||
struct scatterlist *assoc1;
|
|
||||||
struct scatterlist *assoc2;
|
|
||||||
unsigned int ivsize = crypto_aead_ivsize(authenc_esn);
|
unsigned int ivsize = crypto_aead_ivsize(authenc_esn);
|
||||||
struct page *srcp;
|
struct page *srcp;
|
||||||
u8 *vsrc;
|
u8 *vsrc;
|
||||||
@ -580,27 +568,19 @@ static int crypto_authenc_esn_iverify(struct aead_request *req, u8 *iv,
|
|||||||
cryptlen += ivsize;
|
cryptlen += ivsize;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (sg_is_last(assoc))
|
if (assoc->length < 12)
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
assoc1 = assoc + 1;
|
|
||||||
if (sg_is_last(assoc1))
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
assoc2 = assoc + 2;
|
|
||||||
if (!sg_is_last(assoc2))
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
sg_init_table(hsg, 2);
|
sg_init_table(hsg, 2);
|
||||||
sg_set_page(hsg, sg_page(assoc), assoc->length, assoc->offset);
|
sg_set_page(hsg, sg_page(assoc), 4, assoc->offset);
|
||||||
sg_set_page(hsg + 1, sg_page(assoc2), assoc2->length, assoc2->offset);
|
sg_set_page(hsg + 1, sg_page(assoc), 4, assoc->offset + 8);
|
||||||
|
|
||||||
sg_init_table(tsg, 1);
|
sg_init_table(tsg, 1);
|
||||||
sg_set_page(tsg, sg_page(assoc1), assoc1->length, assoc1->offset);
|
sg_set_page(tsg, sg_page(assoc), 4, assoc->offset + 4);
|
||||||
|
|
||||||
areq_ctx->cryptlen = cryptlen;
|
areq_ctx->cryptlen = cryptlen;
|
||||||
areq_ctx->headlen = assoc->length + assoc2->length;
|
areq_ctx->headlen = 8;
|
||||||
areq_ctx->trailen = assoc1->length;
|
areq_ctx->trailen = 4;
|
||||||
areq_ctx->sg = src;
|
areq_ctx->sg = src;
|
||||||
|
|
||||||
areq_ctx->complete = authenc_esn_verify_ahash_done;
|
areq_ctx->complete = authenc_esn_verify_ahash_done;
|
||||||
|
@ -32,6 +32,7 @@
|
|||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/pci.h>
|
#include <linux/pci.h>
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
|
#include <linux/workqueue.h>
|
||||||
#include <acpi/video.h>
|
#include <acpi/video.h>
|
||||||
|
|
||||||
ACPI_MODULE_NAME("video");
|
ACPI_MODULE_NAME("video");
|
||||||
@ -41,6 +42,7 @@ void acpi_video_unregister_backlight(void);
|
|||||||
|
|
||||||
static bool backlight_notifier_registered;
|
static bool backlight_notifier_registered;
|
||||||
static struct notifier_block backlight_nb;
|
static struct notifier_block backlight_nb;
|
||||||
|
static struct work_struct backlight_notify_work;
|
||||||
|
|
||||||
static enum acpi_backlight_type acpi_backlight_cmdline = acpi_backlight_undef;
|
static enum acpi_backlight_type acpi_backlight_cmdline = acpi_backlight_undef;
|
||||||
static enum acpi_backlight_type acpi_backlight_dmi = acpi_backlight_undef;
|
static enum acpi_backlight_type acpi_backlight_dmi = acpi_backlight_undef;
|
||||||
@ -262,6 +264,13 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
|
|||||||
{ },
|
{ },
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* This uses a workqueue to avoid various locking ordering issues */
|
||||||
|
static void acpi_video_backlight_notify_work(struct work_struct *work)
|
||||||
|
{
|
||||||
|
if (acpi_video_get_backlight_type() != acpi_backlight_video)
|
||||||
|
acpi_video_unregister_backlight();
|
||||||
|
}
|
||||||
|
|
||||||
static int acpi_video_backlight_notify(struct notifier_block *nb,
|
static int acpi_video_backlight_notify(struct notifier_block *nb,
|
||||||
unsigned long val, void *bd)
|
unsigned long val, void *bd)
|
||||||
{
|
{
|
||||||
@ -269,9 +278,8 @@ static int acpi_video_backlight_notify(struct notifier_block *nb,
|
|||||||
|
|
||||||
/* A raw bl registering may change video -> native */
|
/* A raw bl registering may change video -> native */
|
||||||
if (backlight->props.type == BACKLIGHT_RAW &&
|
if (backlight->props.type == BACKLIGHT_RAW &&
|
||||||
val == BACKLIGHT_REGISTERED &&
|
val == BACKLIGHT_REGISTERED)
|
||||||
acpi_video_get_backlight_type() != acpi_backlight_video)
|
schedule_work(&backlight_notify_work);
|
||||||
acpi_video_unregister_backlight();
|
|
||||||
|
|
||||||
return NOTIFY_OK;
|
return NOTIFY_OK;
|
||||||
}
|
}
|
||||||
@ -304,6 +312,8 @@ enum acpi_backlight_type acpi_video_get_backlight_type(void)
|
|||||||
acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
|
acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
|
||||||
ACPI_UINT32_MAX, find_video, NULL,
|
ACPI_UINT32_MAX, find_video, NULL,
|
||||||
&video_caps, NULL);
|
&video_caps, NULL);
|
||||||
|
INIT_WORK(&backlight_notify_work,
|
||||||
|
acpi_video_backlight_notify_work);
|
||||||
backlight_nb.notifier_call = acpi_video_backlight_notify;
|
backlight_nb.notifier_call = acpi_video_backlight_notify;
|
||||||
backlight_nb.priority = 0;
|
backlight_nb.priority = 0;
|
||||||
if (backlight_register_notifier(&backlight_nb) == 0)
|
if (backlight_register_notifier(&backlight_nb) == 0)
|
||||||
|
@ -92,7 +92,7 @@ static inline u32 brcm_sata_readreg(void __iomem *addr)
|
|||||||
* Other architectures (e.g., ARM) either do not support big endian, or
|
* Other architectures (e.g., ARM) either do not support big endian, or
|
||||||
* else leave I/O in little endian mode.
|
* else leave I/O in little endian mode.
|
||||||
*/
|
*/
|
||||||
if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(__BIG_ENDIAN))
|
if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
|
||||||
return __raw_readl(addr);
|
return __raw_readl(addr);
|
||||||
else
|
else
|
||||||
return readl_relaxed(addr);
|
return readl_relaxed(addr);
|
||||||
@ -101,7 +101,7 @@ static inline u32 brcm_sata_readreg(void __iomem *addr)
|
|||||||
static inline void brcm_sata_writereg(u32 val, void __iomem *addr)
|
static inline void brcm_sata_writereg(u32 val, void __iomem *addr)
|
||||||
{
|
{
|
||||||
/* See brcm_sata_readreg() comments */
|
/* See brcm_sata_readreg() comments */
|
||||||
if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(__BIG_ENDIAN))
|
if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
|
||||||
__raw_writel(val, addr);
|
__raw_writel(val, addr);
|
||||||
else
|
else
|
||||||
writel_relaxed(val, addr);
|
writel_relaxed(val, addr);
|
||||||
@ -209,6 +209,7 @@ static void brcm_sata_init(struct brcm_ahci_priv *priv)
|
|||||||
priv->top_ctrl + SATA_TOP_CTRL_BUS_CTRL);
|
priv->top_ctrl + SATA_TOP_CTRL_BUS_CTRL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_PM_SLEEP
|
||||||
static int brcm_ahci_suspend(struct device *dev)
|
static int brcm_ahci_suspend(struct device *dev)
|
||||||
{
|
{
|
||||||
struct ata_host *host = dev_get_drvdata(dev);
|
struct ata_host *host = dev_get_drvdata(dev);
|
||||||
@ -231,6 +232,7 @@ static int brcm_ahci_resume(struct device *dev)
|
|||||||
brcm_sata_phys_enable(priv);
|
brcm_sata_phys_enable(priv);
|
||||||
return ahci_platform_resume(dev);
|
return ahci_platform_resume(dev);
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
static struct scsi_host_template ahci_platform_sht = {
|
static struct scsi_host_template ahci_platform_sht = {
|
||||||
AHCI_SHT(DRV_NAME),
|
AHCI_SHT(DRV_NAME),
|
||||||
|
@ -694,11 +694,11 @@ static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
|
|||||||
* RETURNS:
|
* RETURNS:
|
||||||
* Block address read from @tf.
|
* Block address read from @tf.
|
||||||
*/
|
*/
|
||||||
u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev)
|
u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
|
||||||
{
|
{
|
||||||
u64 block = 0;
|
u64 block = 0;
|
||||||
|
|
||||||
if (!dev || tf->flags & ATA_TFLAG_LBA) {
|
if (tf->flags & ATA_TFLAG_LBA) {
|
||||||
if (tf->flags & ATA_TFLAG_LBA48) {
|
if (tf->flags & ATA_TFLAG_LBA48) {
|
||||||
block |= (u64)tf->hob_lbah << 40;
|
block |= (u64)tf->hob_lbah << 40;
|
||||||
block |= (u64)tf->hob_lbam << 32;
|
block |= (u64)tf->hob_lbam << 32;
|
||||||
@ -2147,24 +2147,6 @@ static int ata_dev_config_ncq(struct ata_device *dev,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ata_dev_config_sense_reporting(struct ata_device *dev)
|
|
||||||
{
|
|
||||||
unsigned int err_mask;
|
|
||||||
|
|
||||||
if (!ata_id_has_sense_reporting(dev->id))
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (ata_id_sense_reporting_enabled(dev->id))
|
|
||||||
return;
|
|
||||||
|
|
||||||
err_mask = ata_dev_set_feature(dev, SETFEATURE_SENSE_DATA, 0x1);
|
|
||||||
if (err_mask) {
|
|
||||||
ata_dev_dbg(dev,
|
|
||||||
"failed to enable Sense Data Reporting, Emask 0x%x\n",
|
|
||||||
err_mask);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ata_dev_configure - Configure the specified ATA/ATAPI device
|
* ata_dev_configure - Configure the specified ATA/ATAPI device
|
||||||
* @dev: Target device to configure
|
* @dev: Target device to configure
|
||||||
@ -2387,7 +2369,7 @@ int ata_dev_configure(struct ata_device *dev)
|
|||||||
dev->devslp_timing[i] = sata_setting[j];
|
dev->devslp_timing[i] = sata_setting[j];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ata_dev_config_sense_reporting(dev);
|
|
||||||
dev->cdb_len = 16;
|
dev->cdb_len = 16;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1592,8 +1592,6 @@ static int ata_eh_read_log_10h(struct ata_device *dev,
|
|||||||
tf->hob_lbah = buf[10];
|
tf->hob_lbah = buf[10];
|
||||||
tf->nsect = buf[12];
|
tf->nsect = buf[12];
|
||||||
tf->hob_nsect = buf[13];
|
tf->hob_nsect = buf[13];
|
||||||
if (ata_id_has_ncq_autosense(dev->id))
|
|
||||||
tf->auxiliary = buf[14] << 16 | buf[15] << 8 | buf[16];
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -1629,70 +1627,6 @@ unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
|
|||||||
return err_mask;
|
return err_mask;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* ata_eh_request_sense - perform REQUEST_SENSE_DATA_EXT
|
|
||||||
* @dev: device to perform REQUEST_SENSE_SENSE_DATA_EXT to
|
|
||||||
* @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
|
|
||||||
* @dfl_sense_key: default sense key to use
|
|
||||||
*
|
|
||||||
* Perform REQUEST_SENSE_DATA_EXT after the device reported CHECK
|
|
||||||
* SENSE. This function is EH helper.
|
|
||||||
*
|
|
||||||
* LOCKING:
|
|
||||||
* Kernel thread context (may sleep).
|
|
||||||
*
|
|
||||||
* RETURNS:
|
|
||||||
* encoded sense data on success, 0 on failure or if sense data
|
|
||||||
* is not available.
|
|
||||||
*/
|
|
||||||
static u32 ata_eh_request_sense(struct ata_queued_cmd *qc,
|
|
||||||
struct scsi_cmnd *cmd)
|
|
||||||
{
|
|
||||||
struct ata_device *dev = qc->dev;
|
|
||||||
struct ata_taskfile tf;
|
|
||||||
unsigned int err_mask;
|
|
||||||
|
|
||||||
if (!cmd)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
DPRINTK("ATA request sense\n");
|
|
||||||
ata_dev_warn(dev, "request sense\n");
|
|
||||||
if (!ata_id_sense_reporting_enabled(dev->id)) {
|
|
||||||
ata_dev_warn(qc->dev, "sense data reporting disabled\n");
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
ata_tf_init(dev, &tf);
|
|
||||||
|
|
||||||
tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
|
|
||||||
tf.flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
|
|
||||||
tf.command = ATA_CMD_REQ_SENSE_DATA;
|
|
||||||
tf.protocol = ATA_PROT_NODATA;
|
|
||||||
|
|
||||||
err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
|
|
||||||
/*
|
|
||||||
* ACS-4 states:
|
|
||||||
* The device may set the SENSE DATA AVAILABLE bit to one in the
|
|
||||||
* STATUS field and clear the ERROR bit to zero in the STATUS field
|
|
||||||
* to indicate that the command returned completion without an error
|
|
||||||
* and the sense data described in table 306 is available.
|
|
||||||
*
|
|
||||||
* IOW the 'ATA_SENSE' bit might not be set even though valid
|
|
||||||
* sense data is available.
|
|
||||||
* So check for both.
|
|
||||||
*/
|
|
||||||
if ((tf.command & ATA_SENSE) ||
|
|
||||||
tf.lbah != 0 || tf.lbam != 0 || tf.lbal != 0) {
|
|
||||||
ata_scsi_set_sense(cmd, tf.lbah, tf.lbam, tf.lbal);
|
|
||||||
qc->flags |= ATA_QCFLAG_SENSE_VALID;
|
|
||||||
ata_dev_warn(dev, "sense data %02x/%02x/%02x\n",
|
|
||||||
tf.lbah, tf.lbam, tf.lbal);
|
|
||||||
} else {
|
|
||||||
ata_dev_warn(dev, "request sense failed stat %02x emask %x\n",
|
|
||||||
tf.command, err_mask);
|
|
||||||
}
|
|
||||||
return err_mask;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
|
* atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
|
||||||
* @dev: device to perform REQUEST_SENSE to
|
* @dev: device to perform REQUEST_SENSE to
|
||||||
@ -1855,19 +1789,6 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
|
|||||||
memcpy(&qc->result_tf, &tf, sizeof(tf));
|
memcpy(&qc->result_tf, &tf, sizeof(tf));
|
||||||
qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
|
qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
|
||||||
qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
|
qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
|
||||||
if (qc->result_tf.auxiliary) {
|
|
||||||
char sense_key, asc, ascq;
|
|
||||||
|
|
||||||
sense_key = (qc->result_tf.auxiliary >> 16) & 0xff;
|
|
||||||
asc = (qc->result_tf.auxiliary >> 8) & 0xff;
|
|
||||||
ascq = qc->result_tf.auxiliary & 0xff;
|
|
||||||
ata_dev_dbg(dev, "NCQ Autosense %02x/%02x/%02x\n",
|
|
||||||
sense_key, asc, ascq);
|
|
||||||
ata_scsi_set_sense(qc->scsicmd, sense_key, asc, ascq);
|
|
||||||
ata_scsi_set_sense_information(qc->scsicmd, &qc->result_tf);
|
|
||||||
qc->flags |= ATA_QCFLAG_SENSE_VALID;
|
|
||||||
}
|
|
||||||
|
|
||||||
ehc->i.err_mask &= ~AC_ERR_DEV;
|
ehc->i.err_mask &= ~AC_ERR_DEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1897,27 +1818,6 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
|
|||||||
return ATA_EH_RESET;
|
return ATA_EH_RESET;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Sense data reporting does not work if the
|
|
||||||
* device fault bit is set.
|
|
||||||
*/
|
|
||||||
if ((stat & ATA_SENSE) && !(stat & ATA_DF) &&
|
|
||||||
!(qc->flags & ATA_QCFLAG_SENSE_VALID)) {
|
|
||||||
if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) {
|
|
||||||
tmp = ata_eh_request_sense(qc, qc->scsicmd);
|
|
||||||
if (tmp)
|
|
||||||
qc->err_mask |= tmp;
|
|
||||||
else
|
|
||||||
ata_scsi_set_sense_information(qc->scsicmd, tf);
|
|
||||||
} else {
|
|
||||||
ata_dev_warn(qc->dev, "sense data available but port frozen\n");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Set by NCQ autosense or request sense above */
|
|
||||||
if (qc->flags & ATA_QCFLAG_SENSE_VALID)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
if (stat & (ATA_ERR | ATA_DF))
|
if (stat & (ATA_ERR | ATA_DF))
|
||||||
qc->err_mask |= AC_ERR_DEV;
|
qc->err_mask |= AC_ERR_DEV;
|
||||||
else
|
else
|
||||||
@ -2661,15 +2561,14 @@ static void ata_eh_link_report(struct ata_link *link)
|
|||||||
|
|
||||||
#ifdef CONFIG_ATA_VERBOSE_ERROR
|
#ifdef CONFIG_ATA_VERBOSE_ERROR
|
||||||
if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ |
|
if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ |
|
||||||
ATA_SENSE | ATA_ERR)) {
|
ATA_ERR)) {
|
||||||
if (res->command & ATA_BUSY)
|
if (res->command & ATA_BUSY)
|
||||||
ata_dev_err(qc->dev, "status: { Busy }\n");
|
ata_dev_err(qc->dev, "status: { Busy }\n");
|
||||||
else
|
else
|
||||||
ata_dev_err(qc->dev, "status: { %s%s%s%s%s}\n",
|
ata_dev_err(qc->dev, "status: { %s%s%s%s}\n",
|
||||||
res->command & ATA_DRDY ? "DRDY " : "",
|
res->command & ATA_DRDY ? "DRDY " : "",
|
||||||
res->command & ATA_DF ? "DF " : "",
|
res->command & ATA_DF ? "DF " : "",
|
||||||
res->command & ATA_DRQ ? "DRQ " : "",
|
res->command & ATA_DRQ ? "DRQ " : "",
|
||||||
res->command & ATA_SENSE ? "SENSE " : "",
|
|
||||||
res->command & ATA_ERR ? "ERR " : "");
|
res->command & ATA_ERR ? "ERR " : "");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -270,28 +270,13 @@ DEVICE_ATTR(unload_heads, S_IRUGO | S_IWUSR,
|
|||||||
ata_scsi_park_show, ata_scsi_park_store);
|
ata_scsi_park_show, ata_scsi_park_store);
|
||||||
EXPORT_SYMBOL_GPL(dev_attr_unload_heads);
|
EXPORT_SYMBOL_GPL(dev_attr_unload_heads);
|
||||||
|
|
||||||
void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
|
static void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
|
||||||
{
|
{
|
||||||
if (!cmd)
|
|
||||||
return;
|
|
||||||
|
|
||||||
cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
|
cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
|
||||||
|
|
||||||
scsi_build_sense_buffer(0, cmd->sense_buffer, sk, asc, ascq);
|
scsi_build_sense_buffer(0, cmd->sense_buffer, sk, asc, ascq);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ata_scsi_set_sense_information(struct scsi_cmnd *cmd,
|
|
||||||
const struct ata_taskfile *tf)
|
|
||||||
{
|
|
||||||
u64 information;
|
|
||||||
|
|
||||||
if (!cmd)
|
|
||||||
return;
|
|
||||||
|
|
||||||
information = ata_tf_read_block(tf, NULL);
|
|
||||||
scsi_set_sense_information(cmd->sense_buffer, information);
|
|
||||||
}
|
|
||||||
|
|
||||||
static ssize_t
|
static ssize_t
|
||||||
ata_scsi_em_message_store(struct device *dev, struct device_attribute *attr,
|
ata_scsi_em_message_store(struct device *dev, struct device_attribute *attr,
|
||||||
const char *buf, size_t count)
|
const char *buf, size_t count)
|
||||||
@ -1792,9 +1777,7 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
|
|||||||
((cdb[2] & 0x20) || need_sense)) {
|
((cdb[2] & 0x20) || need_sense)) {
|
||||||
ata_gen_passthru_sense(qc);
|
ata_gen_passthru_sense(qc);
|
||||||
} else {
|
} else {
|
||||||
if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
|
if (!need_sense) {
|
||||||
cmd->result = SAM_STAT_CHECK_CONDITION;
|
|
||||||
} else if (!need_sense) {
|
|
||||||
cmd->result = SAM_STAT_GOOD;
|
cmd->result = SAM_STAT_GOOD;
|
||||||
} else {
|
} else {
|
||||||
/* TODO: decide which descriptor format to use
|
/* TODO: decide which descriptor format to use
|
||||||
|
@ -67,8 +67,7 @@ extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag);
|
|||||||
extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
|
extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
|
||||||
u64 block, u32 n_block, unsigned int tf_flags,
|
u64 block, u32 n_block, unsigned int tf_flags,
|
||||||
unsigned int tag);
|
unsigned int tag);
|
||||||
extern u64 ata_tf_read_block(const struct ata_taskfile *tf,
|
extern u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev);
|
||||||
struct ata_device *dev);
|
|
||||||
extern unsigned ata_exec_internal(struct ata_device *dev,
|
extern unsigned ata_exec_internal(struct ata_device *dev,
|
||||||
struct ata_taskfile *tf, const u8 *cdb,
|
struct ata_taskfile *tf, const u8 *cdb,
|
||||||
int dma_dir, void *buf, unsigned int buflen,
|
int dma_dir, void *buf, unsigned int buflen,
|
||||||
@ -138,9 +137,6 @@ extern int ata_scsi_add_hosts(struct ata_host *host,
|
|||||||
struct scsi_host_template *sht);
|
struct scsi_host_template *sht);
|
||||||
extern void ata_scsi_scan_host(struct ata_port *ap, int sync);
|
extern void ata_scsi_scan_host(struct ata_port *ap, int sync);
|
||||||
extern int ata_scsi_offline_dev(struct ata_device *dev);
|
extern int ata_scsi_offline_dev(struct ata_device *dev);
|
||||||
extern void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq);
|
|
||||||
extern void ata_scsi_set_sense_information(struct scsi_cmnd *cmd,
|
|
||||||
const struct ata_taskfile *tf);
|
|
||||||
extern void ata_scsi_media_change_notify(struct ata_device *dev);
|
extern void ata_scsi_media_change_notify(struct ata_device *dev);
|
||||||
extern void ata_scsi_hotplug(struct work_struct *work);
|
extern void ata_scsi_hotplug(struct work_struct *work);
|
||||||
extern void ata_schedule_scsi_eh(struct Scsi_Host *shost);
|
extern void ata_schedule_scsi_eh(struct Scsi_Host *shost);
|
||||||
|
@ -1238,8 +1238,12 @@ static unsigned int pdc20621_prog_dimm_global(struct ata_host *host)
|
|||||||
readl(mmio + PDC_SDRAM_CONTROL);
|
readl(mmio + PDC_SDRAM_CONTROL);
|
||||||
|
|
||||||
/* Turn on for ECC */
|
/* Turn on for ECC */
|
||||||
pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
|
if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
|
||||||
PDC_DIMM_SPD_TYPE, &spd0);
|
PDC_DIMM_SPD_TYPE, &spd0)) {
|
||||||
|
pr_err("Failed in i2c read: device=%#x, subaddr=%#x\n",
|
||||||
|
PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
if (spd0 == 0x02) {
|
if (spd0 == 0x02) {
|
||||||
data |= (0x01 << 16);
|
data |= (0x01 << 16);
|
||||||
writel(data, mmio + PDC_SDRAM_CONTROL);
|
writel(data, mmio + PDC_SDRAM_CONTROL);
|
||||||
@ -1380,8 +1384,12 @@ static unsigned int pdc20621_dimm_init(struct ata_host *host)
|
|||||||
|
|
||||||
/* ECC initiliazation. */
|
/* ECC initiliazation. */
|
||||||
|
|
||||||
pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
|
if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
|
||||||
PDC_DIMM_SPD_TYPE, &spd0);
|
PDC_DIMM_SPD_TYPE, &spd0)) {
|
||||||
|
pr_err("Failed in i2c read: device=%#x, subaddr=%#x\n",
|
||||||
|
PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
if (spd0 == 0x02) {
|
if (spd0 == 0x02) {
|
||||||
void *buf;
|
void *buf;
|
||||||
VPRINTK("Start ECC initialization\n");
|
VPRINTK("Start ECC initialization\n");
|
||||||
|
@ -496,10 +496,9 @@ static void zram_meta_free(struct zram_meta *meta, u64 disksize)
|
|||||||
kfree(meta);
|
kfree(meta);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct zram_meta *zram_meta_alloc(int device_id, u64 disksize)
|
static struct zram_meta *zram_meta_alloc(char *pool_name, u64 disksize)
|
||||||
{
|
{
|
||||||
size_t num_pages;
|
size_t num_pages;
|
||||||
char pool_name[8];
|
|
||||||
struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
|
struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
|
||||||
|
|
||||||
if (!meta)
|
if (!meta)
|
||||||
@ -512,7 +511,6 @@ static struct zram_meta *zram_meta_alloc(int device_id, u64 disksize)
|
|||||||
goto out_error;
|
goto out_error;
|
||||||
}
|
}
|
||||||
|
|
||||||
snprintf(pool_name, sizeof(pool_name), "zram%d", device_id);
|
|
||||||
meta->mem_pool = zs_create_pool(pool_name, GFP_NOIO | __GFP_HIGHMEM);
|
meta->mem_pool = zs_create_pool(pool_name, GFP_NOIO | __GFP_HIGHMEM);
|
||||||
if (!meta->mem_pool) {
|
if (!meta->mem_pool) {
|
||||||
pr_err("Error creating memory pool\n");
|
pr_err("Error creating memory pool\n");
|
||||||
@ -1031,7 +1029,7 @@ static ssize_t disksize_store(struct device *dev,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
disksize = PAGE_ALIGN(disksize);
|
disksize = PAGE_ALIGN(disksize);
|
||||||
meta = zram_meta_alloc(zram->disk->first_minor, disksize);
|
meta = zram_meta_alloc(zram->disk->disk_name, disksize);
|
||||||
if (!meta)
|
if (!meta)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
@ -126,7 +126,7 @@ PARENTS(pxa3xx_ac97_bus) = { "ring_osc_60mhz", "ac97" };
|
|||||||
PARENTS(pxa3xx_sbus) = { "ring_osc_60mhz", "system_bus" };
|
PARENTS(pxa3xx_sbus) = { "ring_osc_60mhz", "system_bus" };
|
||||||
PARENTS(pxa3xx_smemcbus) = { "ring_osc_60mhz", "smemc" };
|
PARENTS(pxa3xx_smemcbus) = { "ring_osc_60mhz", "smemc" };
|
||||||
|
|
||||||
#define CKEN_AB(bit) ((CKEN_ ## bit > 31) ? &CKENA : &CKENB)
|
#define CKEN_AB(bit) ((CKEN_ ## bit > 31) ? &CKENB : &CKENA)
|
||||||
#define PXA3XX_CKEN(dev_id, con_id, parents, mult_lp, div_lp, mult_hp, \
|
#define PXA3XX_CKEN(dev_id, con_id, parents, mult_lp, div_lp, mult_hp, \
|
||||||
div_hp, bit, is_lp, flags) \
|
div_hp, bit, is_lp, flags) \
|
||||||
PXA_CKEN(dev_id, con_id, bit, parents, mult_lp, div_lp, \
|
PXA_CKEN(dev_id, con_id, bit, parents, mult_lp, div_lp, \
|
||||||
|
@ -661,6 +661,9 @@ static void sh_cmt_clocksource_suspend(struct clocksource *cs)
|
|||||||
{
|
{
|
||||||
struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
|
struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
|
||||||
|
|
||||||
|
if (!ch->cs_enabled)
|
||||||
|
return;
|
||||||
|
|
||||||
sh_cmt_stop(ch, FLAG_CLOCKSOURCE);
|
sh_cmt_stop(ch, FLAG_CLOCKSOURCE);
|
||||||
pm_genpd_syscore_poweroff(&ch->cmt->pdev->dev);
|
pm_genpd_syscore_poweroff(&ch->cmt->pdev->dev);
|
||||||
}
|
}
|
||||||
@ -669,6 +672,9 @@ static void sh_cmt_clocksource_resume(struct clocksource *cs)
|
|||||||
{
|
{
|
||||||
struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
|
struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
|
||||||
|
|
||||||
|
if (!ch->cs_enabled)
|
||||||
|
return;
|
||||||
|
|
||||||
pm_genpd_syscore_poweron(&ch->cmt->pdev->dev);
|
pm_genpd_syscore_poweron(&ch->cmt->pdev->dev);
|
||||||
sh_cmt_start(ch, FLAG_CLOCKSOURCE);
|
sh_cmt_start(ch, FLAG_CLOCKSOURCE);
|
||||||
}
|
}
|
||||||
|
@ -180,7 +180,7 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
|
|||||||
ret = exynos5250_cpufreq_init(exynos_info);
|
ret = exynos5250_cpufreq_init(exynos_info);
|
||||||
} else {
|
} else {
|
||||||
pr_err("%s: Unknown SoC type\n", __func__);
|
pr_err("%s: Unknown SoC type\n", __func__);
|
||||||
return -ENODEV;
|
ret = -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ret)
|
if (ret)
|
||||||
@ -188,12 +188,14 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
|
|||||||
|
|
||||||
if (exynos_info->set_freq == NULL) {
|
if (exynos_info->set_freq == NULL) {
|
||||||
dev_err(&pdev->dev, "No set_freq function (ERR)\n");
|
dev_err(&pdev->dev, "No set_freq function (ERR)\n");
|
||||||
|
ret = -EINVAL;
|
||||||
goto err_vdd_arm;
|
goto err_vdd_arm;
|
||||||
}
|
}
|
||||||
|
|
||||||
arm_regulator = regulator_get(NULL, "vdd_arm");
|
arm_regulator = regulator_get(NULL, "vdd_arm");
|
||||||
if (IS_ERR(arm_regulator)) {
|
if (IS_ERR(arm_regulator)) {
|
||||||
dev_err(&pdev->dev, "failed to get resource vdd_arm\n");
|
dev_err(&pdev->dev, "failed to get resource vdd_arm\n");
|
||||||
|
ret = -EINVAL;
|
||||||
goto err_vdd_arm;
|
goto err_vdd_arm;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -225,7 +227,7 @@ err_cpufreq_reg:
|
|||||||
regulator_put(arm_regulator);
|
regulator_put(arm_regulator);
|
||||||
err_vdd_arm:
|
err_vdd_arm:
|
||||||
kfree(exynos_info);
|
kfree(exynos_info);
|
||||||
return -EINVAL;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct platform_driver exynos_cpufreq_platdrv = {
|
static struct platform_driver exynos_cpufreq_platdrv = {
|
||||||
|
@ -909,13 +909,14 @@ static int ahash_final_ctx(struct ahash_request *req)
|
|||||||
state->buflen_1;
|
state->buflen_1;
|
||||||
u32 *sh_desc = ctx->sh_desc_fin, *desc;
|
u32 *sh_desc = ctx->sh_desc_fin, *desc;
|
||||||
dma_addr_t ptr = ctx->sh_desc_fin_dma;
|
dma_addr_t ptr = ctx->sh_desc_fin_dma;
|
||||||
int sec4_sg_bytes;
|
int sec4_sg_bytes, sec4_sg_src_index;
|
||||||
int digestsize = crypto_ahash_digestsize(ahash);
|
int digestsize = crypto_ahash_digestsize(ahash);
|
||||||
struct ahash_edesc *edesc;
|
struct ahash_edesc *edesc;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
int sh_len;
|
int sh_len;
|
||||||
|
|
||||||
sec4_sg_bytes = (1 + (buflen ? 1 : 0)) * sizeof(struct sec4_sg_entry);
|
sec4_sg_src_index = 1 + (buflen ? 1 : 0);
|
||||||
|
sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
|
||||||
|
|
||||||
/* allocate space for base edesc and hw desc commands, link tables */
|
/* allocate space for base edesc and hw desc commands, link tables */
|
||||||
edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
|
edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
|
||||||
@ -942,7 +943,7 @@ static int ahash_final_ctx(struct ahash_request *req)
|
|||||||
state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
|
state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
|
||||||
buf, state->buf_dma, buflen,
|
buf, state->buf_dma, buflen,
|
||||||
last_buflen);
|
last_buflen);
|
||||||
(edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN;
|
(edesc->sec4_sg + sec4_sg_src_index - 1)->len |= SEC4_SG_LEN_FIN;
|
||||||
|
|
||||||
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
|
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
|
||||||
sec4_sg_bytes, DMA_TO_DEVICE);
|
sec4_sg_bytes, DMA_TO_DEVICE);
|
||||||
|
@ -71,7 +71,6 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
|
|||||||
struct sha256_state *sctx = shash_desc_ctx(desc);
|
struct sha256_state *sctx = shash_desc_ctx(desc);
|
||||||
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
|
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
|
||||||
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
|
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
|
||||||
struct nx_sg *in_sg;
|
|
||||||
struct nx_sg *out_sg;
|
struct nx_sg *out_sg;
|
||||||
u64 to_process = 0, leftover, total;
|
u64 to_process = 0, leftover, total;
|
||||||
unsigned long irq_flags;
|
unsigned long irq_flags;
|
||||||
@ -97,7 +96,6 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
|
|||||||
NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
|
NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
|
||||||
NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
|
NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
|
||||||
|
|
||||||
in_sg = nx_ctx->in_sg;
|
|
||||||
max_sg_len = min_t(u64, nx_ctx->ap->sglen,
|
max_sg_len = min_t(u64, nx_ctx->ap->sglen,
|
||||||
nx_driver.of.max_sg_len/sizeof(struct nx_sg));
|
nx_driver.of.max_sg_len/sizeof(struct nx_sg));
|
||||||
max_sg_len = min_t(u64, max_sg_len,
|
max_sg_len = min_t(u64, max_sg_len,
|
||||||
@ -114,17 +112,12 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
|
|||||||
}
|
}
|
||||||
|
|
||||||
do {
|
do {
|
||||||
/*
|
int used_sgs = 0;
|
||||||
* to_process: the SHA256_BLOCK_SIZE data chunk to process in
|
struct nx_sg *in_sg = nx_ctx->in_sg;
|
||||||
* this update. This value is also restricted by the sg list
|
|
||||||
* limits.
|
|
||||||
*/
|
|
||||||
to_process = total - to_process;
|
|
||||||
to_process = to_process & ~(SHA256_BLOCK_SIZE - 1);
|
|
||||||
|
|
||||||
if (buf_len) {
|
if (buf_len) {
|
||||||
data_len = buf_len;
|
data_len = buf_len;
|
||||||
in_sg = nx_build_sg_list(nx_ctx->in_sg,
|
in_sg = nx_build_sg_list(in_sg,
|
||||||
(u8 *) sctx->buf,
|
(u8 *) sctx->buf,
|
||||||
&data_len,
|
&data_len,
|
||||||
max_sg_len);
|
max_sg_len);
|
||||||
@ -133,15 +126,27 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
|
|||||||
rc = -EINVAL;
|
rc = -EINVAL;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
used_sgs = in_sg - nx_ctx->in_sg;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* to_process: SHA256_BLOCK_SIZE aligned chunk to be
|
||||||
|
* processed in this iteration. This value is restricted
|
||||||
|
* by sg list limits and number of sgs we already used
|
||||||
|
* for leftover data. (see above)
|
||||||
|
* In ideal case, we could allow NX_PAGE_SIZE * max_sg_len,
|
||||||
|
* but because data may not be aligned, we need to account
|
||||||
|
* for that too. */
|
||||||
|
to_process = min_t(u64, total,
|
||||||
|
(max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE);
|
||||||
|
to_process = to_process & ~(SHA256_BLOCK_SIZE - 1);
|
||||||
|
|
||||||
data_len = to_process - buf_len;
|
data_len = to_process - buf_len;
|
||||||
in_sg = nx_build_sg_list(in_sg, (u8 *) data,
|
in_sg = nx_build_sg_list(in_sg, (u8 *) data,
|
||||||
&data_len, max_sg_len);
|
&data_len, max_sg_len);
|
||||||
|
|
||||||
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
|
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
|
||||||
|
|
||||||
to_process = (data_len + buf_len);
|
to_process = data_len + buf_len;
|
||||||
leftover = total - to_process;
|
leftover = total - to_process;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -71,7 +71,6 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
|
|||||||
struct sha512_state *sctx = shash_desc_ctx(desc);
|
struct sha512_state *sctx = shash_desc_ctx(desc);
|
||||||
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
|
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
|
||||||
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
|
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
|
||||||
struct nx_sg *in_sg;
|
|
||||||
struct nx_sg *out_sg;
|
struct nx_sg *out_sg;
|
||||||
u64 to_process, leftover = 0, total;
|
u64 to_process, leftover = 0, total;
|
||||||
unsigned long irq_flags;
|
unsigned long irq_flags;
|
||||||
@ -97,7 +96,6 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
|
|||||||
NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
|
NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
|
||||||
NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
|
NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
|
||||||
|
|
||||||
in_sg = nx_ctx->in_sg;
|
|
||||||
max_sg_len = min_t(u64, nx_ctx->ap->sglen,
|
max_sg_len = min_t(u64, nx_ctx->ap->sglen,
|
||||||
nx_driver.of.max_sg_len/sizeof(struct nx_sg));
|
nx_driver.of.max_sg_len/sizeof(struct nx_sg));
|
||||||
max_sg_len = min_t(u64, max_sg_len,
|
max_sg_len = min_t(u64, max_sg_len,
|
||||||
@ -114,18 +112,12 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
|
|||||||
}
|
}
|
||||||
|
|
||||||
do {
|
do {
|
||||||
/*
|
int used_sgs = 0;
|
||||||
* to_process: the SHA512_BLOCK_SIZE data chunk to process in
|
struct nx_sg *in_sg = nx_ctx->in_sg;
|
||||||
* this update. This value is also restricted by the sg list
|
|
||||||
* limits.
|
|
||||||
*/
|
|
||||||
to_process = total - leftover;
|
|
||||||
to_process = to_process & ~(SHA512_BLOCK_SIZE - 1);
|
|
||||||
leftover = total - to_process;
|
|
||||||
|
|
||||||
if (buf_len) {
|
if (buf_len) {
|
||||||
data_len = buf_len;
|
data_len = buf_len;
|
||||||
in_sg = nx_build_sg_list(nx_ctx->in_sg,
|
in_sg = nx_build_sg_list(in_sg,
|
||||||
(u8 *) sctx->buf,
|
(u8 *) sctx->buf,
|
||||||
&data_len, max_sg_len);
|
&data_len, max_sg_len);
|
||||||
|
|
||||||
@ -133,8 +125,20 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
|
|||||||
rc = -EINVAL;
|
rc = -EINVAL;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
used_sgs = in_sg - nx_ctx->in_sg;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* to_process: SHA512_BLOCK_SIZE aligned chunk to be
|
||||||
|
* processed in this iteration. This value is restricted
|
||||||
|
* by sg list limits and number of sgs we already used
|
||||||
|
* for leftover data. (see above)
|
||||||
|
* In ideal case, we could allow NX_PAGE_SIZE * max_sg_len,
|
||||||
|
* but because data may not be aligned, we need to account
|
||||||
|
* for that too. */
|
||||||
|
to_process = min_t(u64, total,
|
||||||
|
(max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE);
|
||||||
|
to_process = to_process & ~(SHA512_BLOCK_SIZE - 1);
|
||||||
|
|
||||||
data_len = to_process - buf_len;
|
data_len = to_process - buf_len;
|
||||||
in_sg = nx_build_sg_list(in_sg, (u8 *) data,
|
in_sg = nx_build_sg_list(in_sg, (u8 *) data,
|
||||||
&data_len, max_sg_len);
|
&data_len, max_sg_len);
|
||||||
@ -146,7 +150,7 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
to_process = (data_len + buf_len);
|
to_process = data_len + buf_len;
|
||||||
leftover = total - to_process;
|
leftover = total - to_process;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -689,6 +689,10 @@ struct dma_chan *dma_request_slave_channel(struct device *dev,
|
|||||||
struct dma_chan *ch = dma_request_slave_channel_reason(dev, name);
|
struct dma_chan *ch = dma_request_slave_channel_reason(dev, name);
|
||||||
if (IS_ERR(ch))
|
if (IS_ERR(ch))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
dma_cap_set(DMA_PRIVATE, ch->device->cap_mask);
|
||||||
|
ch->device->privatecnt++;
|
||||||
|
|
||||||
return ch;
|
return ch;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(dma_request_slave_channel);
|
EXPORT_SYMBOL_GPL(dma_request_slave_channel);
|
||||||
|
@ -873,9 +873,10 @@ static void drm_dp_destroy_port(struct kref *kref)
|
|||||||
from an EDID retrieval */
|
from an EDID retrieval */
|
||||||
if (port->connector) {
|
if (port->connector) {
|
||||||
mutex_lock(&mgr->destroy_connector_lock);
|
mutex_lock(&mgr->destroy_connector_lock);
|
||||||
list_add(&port->connector->destroy_list, &mgr->destroy_connector_list);
|
list_add(&port->next, &mgr->destroy_connector_list);
|
||||||
mutex_unlock(&mgr->destroy_connector_lock);
|
mutex_unlock(&mgr->destroy_connector_lock);
|
||||||
schedule_work(&mgr->destroy_connector_work);
|
schedule_work(&mgr->destroy_connector_work);
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
drm_dp_port_teardown_pdt(port, port->pdt);
|
drm_dp_port_teardown_pdt(port, port->pdt);
|
||||||
|
|
||||||
@ -2659,7 +2660,7 @@ static void drm_dp_tx_work(struct work_struct *work)
|
|||||||
static void drm_dp_destroy_connector_work(struct work_struct *work)
|
static void drm_dp_destroy_connector_work(struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
|
struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
|
||||||
struct drm_connector *connector;
|
struct drm_dp_mst_port *port;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Not a regular list traverse as we have to drop the destroy
|
* Not a regular list traverse as we have to drop the destroy
|
||||||
@ -2668,15 +2669,21 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
|
|||||||
*/
|
*/
|
||||||
for (;;) {
|
for (;;) {
|
||||||
mutex_lock(&mgr->destroy_connector_lock);
|
mutex_lock(&mgr->destroy_connector_lock);
|
||||||
connector = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_connector, destroy_list);
|
port = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_dp_mst_port, next);
|
||||||
if (!connector) {
|
if (!port) {
|
||||||
mutex_unlock(&mgr->destroy_connector_lock);
|
mutex_unlock(&mgr->destroy_connector_lock);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
list_del(&connector->destroy_list);
|
list_del(&port->next);
|
||||||
mutex_unlock(&mgr->destroy_connector_lock);
|
mutex_unlock(&mgr->destroy_connector_lock);
|
||||||
|
|
||||||
mgr->cbs->destroy_connector(mgr, connector);
|
mgr->cbs->destroy_connector(mgr, port->connector);
|
||||||
|
|
||||||
|
drm_dp_port_teardown_pdt(port, port->pdt);
|
||||||
|
|
||||||
|
if (!port->input && port->vcpi.vcpi > 0)
|
||||||
|
drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
|
||||||
|
kfree(port);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1745,7 +1745,6 @@ static int fimc_probe(struct platform_device *pdev)
|
|||||||
spin_lock_init(&ctx->lock);
|
spin_lock_init(&ctx->lock);
|
||||||
platform_set_drvdata(pdev, ctx);
|
platform_set_drvdata(pdev, ctx);
|
||||||
|
|
||||||
pm_runtime_set_active(dev);
|
|
||||||
pm_runtime_enable(dev);
|
pm_runtime_enable(dev);
|
||||||
|
|
||||||
ret = exynos_drm_ippdrv_register(ippdrv);
|
ret = exynos_drm_ippdrv_register(ippdrv);
|
||||||
|
@ -593,8 +593,7 @@ static int gsc_src_set_transf(struct device *dev,
|
|||||||
|
|
||||||
gsc_write(cfg, GSC_IN_CON);
|
gsc_write(cfg, GSC_IN_CON);
|
||||||
|
|
||||||
ctx->rotation = cfg &
|
ctx->rotation = (cfg & GSC_IN_ROT_90) ? 1 : 0;
|
||||||
(GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0;
|
|
||||||
*swap = ctx->rotation;
|
*swap = ctx->rotation;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -857,8 +856,7 @@ static int gsc_dst_set_transf(struct device *dev,
|
|||||||
|
|
||||||
gsc_write(cfg, GSC_IN_CON);
|
gsc_write(cfg, GSC_IN_CON);
|
||||||
|
|
||||||
ctx->rotation = cfg &
|
ctx->rotation = (cfg & GSC_IN_ROT_90) ? 1 : 0;
|
||||||
(GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0;
|
|
||||||
*swap = ctx->rotation;
|
*swap = ctx->rotation;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -1064,6 +1064,7 @@ static int hdmi_get_modes(struct drm_connector *connector)
|
|||||||
{
|
{
|
||||||
struct hdmi_context *hdata = ctx_from_connector(connector);
|
struct hdmi_context *hdata = ctx_from_connector(connector);
|
||||||
struct edid *edid;
|
struct edid *edid;
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (!hdata->ddc_adpt)
|
if (!hdata->ddc_adpt)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
@ -1079,7 +1080,11 @@ static int hdmi_get_modes(struct drm_connector *connector)
|
|||||||
|
|
||||||
drm_mode_connector_update_edid_property(connector, edid);
|
drm_mode_connector_update_edid_property(connector, edid);
|
||||||
|
|
||||||
return drm_add_edid_modes(connector, edid);
|
ret = drm_add_edid_modes(connector, edid);
|
||||||
|
|
||||||
|
kfree(edid);
|
||||||
|
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock)
|
static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock)
|
||||||
|
@ -718,6 +718,10 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
|
|||||||
|
|
||||||
/* handling VSYNC */
|
/* handling VSYNC */
|
||||||
if (val & MXR_INT_STATUS_VSYNC) {
|
if (val & MXR_INT_STATUS_VSYNC) {
|
||||||
|
/* vsync interrupt use different bit for read and clear */
|
||||||
|
val |= MXR_INT_CLEAR_VSYNC;
|
||||||
|
val &= ~MXR_INT_STATUS_VSYNC;
|
||||||
|
|
||||||
/* interlace scan need to check shadow register */
|
/* interlace scan need to check shadow register */
|
||||||
if (ctx->interlace) {
|
if (ctx->interlace) {
|
||||||
base = mixer_reg_read(res, MXR_GRAPHIC_BASE(0));
|
base = mixer_reg_read(res, MXR_GRAPHIC_BASE(0));
|
||||||
@ -743,11 +747,6 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
|
|||||||
|
|
||||||
out:
|
out:
|
||||||
/* clear interrupts */
|
/* clear interrupts */
|
||||||
if (~val & MXR_INT_EN_VSYNC) {
|
|
||||||
/* vsync interrupt use different bit for read and clear */
|
|
||||||
val &= ~MXR_INT_EN_VSYNC;
|
|
||||||
val |= MXR_INT_CLEAR_VSYNC;
|
|
||||||
}
|
|
||||||
mixer_reg_write(res, MXR_INT_STATUS, val);
|
mixer_reg_write(res, MXR_INT_STATUS, val);
|
||||||
|
|
||||||
spin_unlock(&res->reg_slock);
|
spin_unlock(&res->reg_slock);
|
||||||
@ -907,8 +906,8 @@ static int mixer_enable_vblank(struct exynos_drm_crtc *crtc)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* enable vsync interrupt */
|
/* enable vsync interrupt */
|
||||||
mixer_reg_writemask(res, MXR_INT_EN, MXR_INT_EN_VSYNC,
|
mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
|
||||||
MXR_INT_EN_VSYNC);
|
mixer_reg_writemask(res, MXR_INT_EN, ~0, MXR_INT_EN_VSYNC);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -918,7 +917,13 @@ static void mixer_disable_vblank(struct exynos_drm_crtc *crtc)
|
|||||||
struct mixer_context *mixer_ctx = crtc->ctx;
|
struct mixer_context *mixer_ctx = crtc->ctx;
|
||||||
struct mixer_resources *res = &mixer_ctx->mixer_res;
|
struct mixer_resources *res = &mixer_ctx->mixer_res;
|
||||||
|
|
||||||
|
if (!mixer_ctx->powered) {
|
||||||
|
mixer_ctx->int_en &= MXR_INT_EN_VSYNC;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
/* disable vsync interrupt */
|
/* disable vsync interrupt */
|
||||||
|
mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
|
||||||
mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC);
|
mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1047,6 +1052,8 @@ static void mixer_enable(struct exynos_drm_crtc *crtc)
|
|||||||
|
|
||||||
mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET);
|
mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET);
|
||||||
|
|
||||||
|
if (ctx->int_en & MXR_INT_EN_VSYNC)
|
||||||
|
mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
|
||||||
mixer_reg_write(res, MXR_INT_EN, ctx->int_en);
|
mixer_reg_write(res, MXR_INT_EN, ctx->int_en);
|
||||||
mixer_win_reset(ctx);
|
mixer_win_reset(ctx);
|
||||||
}
|
}
|
||||||
|
@ -129,8 +129,9 @@ int intel_atomic_commit(struct drm_device *dev,
|
|||||||
struct drm_atomic_state *state,
|
struct drm_atomic_state *state,
|
||||||
bool async)
|
bool async)
|
||||||
{
|
{
|
||||||
int ret;
|
struct drm_crtc_state *crtc_state;
|
||||||
int i;
|
struct drm_crtc *crtc;
|
||||||
|
int ret, i;
|
||||||
|
|
||||||
if (async) {
|
if (async) {
|
||||||
DRM_DEBUG_KMS("i915 does not yet support async commit\n");
|
DRM_DEBUG_KMS("i915 does not yet support async commit\n");
|
||||||
@ -142,48 +143,18 @@ int intel_atomic_commit(struct drm_device *dev,
|
|||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
/* Point of no return */
|
/* Point of no return */
|
||||||
|
drm_atomic_helper_swap_state(dev, state);
|
||||||
/*
|
|
||||||
* FIXME: The proper sequence here will eventually be:
|
|
||||||
*
|
|
||||||
* drm_atomic_helper_swap_state(dev, state)
|
|
||||||
* drm_atomic_helper_commit_modeset_disables(dev, state);
|
|
||||||
* drm_atomic_helper_commit_planes(dev, state);
|
|
||||||
* drm_atomic_helper_commit_modeset_enables(dev, state);
|
|
||||||
* drm_atomic_helper_wait_for_vblanks(dev, state);
|
|
||||||
* drm_atomic_helper_cleanup_planes(dev, state);
|
|
||||||
* drm_atomic_state_free(state);
|
|
||||||
*
|
|
||||||
* once we have full atomic modeset. For now, just manually update
|
|
||||||
* plane states to avoid clobbering good states with dummy states
|
|
||||||
* while nuclear pageflipping.
|
|
||||||
*/
|
|
||||||
for (i = 0; i < dev->mode_config.num_total_plane; i++) {
|
|
||||||
struct drm_plane *plane = state->planes[i];
|
|
||||||
|
|
||||||
if (!plane)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
plane->state->state = state;
|
|
||||||
swap(state->plane_states[i], plane->state);
|
|
||||||
plane->state->state = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* swap crtc_scaler_state */
|
/* swap crtc_scaler_state */
|
||||||
for (i = 0; i < dev->mode_config.num_crtc; i++) {
|
for_each_crtc_in_state(state, crtc, crtc_state, i) {
|
||||||
struct drm_crtc *crtc = state->crtcs[i];
|
to_intel_crtc(crtc)->config = to_intel_crtc_state(crtc->state);
|
||||||
if (!crtc) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
to_intel_crtc(crtc)->config->scaler_state =
|
|
||||||
to_intel_crtc_state(state->crtc_states[i])->scaler_state;
|
|
||||||
|
|
||||||
if (INTEL_INFO(dev)->gen >= 9)
|
if (INTEL_INFO(dev)->gen >= 9)
|
||||||
skl_detach_scalers(to_intel_crtc(crtc));
|
skl_detach_scalers(to_intel_crtc(crtc));
|
||||||
|
|
||||||
|
drm_atomic_helper_commit_planes_on_crtc(crtc_state);
|
||||||
}
|
}
|
||||||
|
|
||||||
drm_atomic_helper_commit_planes(dev, state);
|
|
||||||
drm_atomic_helper_wait_for_vblanks(dev, state);
|
drm_atomic_helper_wait_for_vblanks(dev, state);
|
||||||
drm_atomic_helper_cleanup_planes(dev, state);
|
drm_atomic_helper_cleanup_planes(dev, state);
|
||||||
drm_atomic_state_free(state);
|
drm_atomic_state_free(state);
|
||||||
|
@ -11826,7 +11826,9 @@ encoder_retry:
|
|||||||
goto encoder_retry;
|
goto encoder_retry;
|
||||||
}
|
}
|
||||||
|
|
||||||
pipe_config->dither = pipe_config->pipe_bpp != base_bpp;
|
/* Dithering seems to not pass-through bits correctly when it should, so
|
||||||
|
* only enable it on 6bpc panels. */
|
||||||
|
pipe_config->dither = pipe_config->pipe_bpp == 6*3;
|
||||||
DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n",
|
DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n",
|
||||||
base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
|
base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
|
||||||
|
|
||||||
@ -12624,17 +12626,17 @@ static int __intel_set_mode(struct drm_crtc *modeset_crtc,
|
|||||||
|
|
||||||
modeset_update_crtc_power_domains(state);
|
modeset_update_crtc_power_domains(state);
|
||||||
|
|
||||||
drm_atomic_helper_commit_planes(dev, state);
|
|
||||||
|
|
||||||
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
|
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
|
||||||
for_each_crtc_in_state(state, crtc, crtc_state, i) {
|
for_each_crtc_in_state(state, crtc, crtc_state, i) {
|
||||||
if (!needs_modeset(crtc->state) || !crtc->state->enable)
|
if (!needs_modeset(crtc->state) || !crtc->state->enable) {
|
||||||
|
drm_atomic_helper_commit_planes_on_crtc(crtc_state);
|
||||||
continue;
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
update_scanline_offset(to_intel_crtc(crtc));
|
update_scanline_offset(to_intel_crtc(crtc));
|
||||||
|
|
||||||
dev_priv->display.crtc_enable(crtc);
|
dev_priv->display.crtc_enable(crtc);
|
||||||
intel_crtc_enable_planes(crtc);
|
drm_atomic_helper_commit_planes_on_crtc(crtc_state);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* FIXME: add subpixel order */
|
/* FIXME: add subpixel order */
|
||||||
@ -12891,20 +12893,11 @@ intel_modeset_stage_output_state(struct drm_device *dev,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool primary_plane_visible(struct drm_crtc *crtc)
|
|
||||||
{
|
|
||||||
struct intel_plane_state *plane_state =
|
|
||||||
to_intel_plane_state(crtc->primary->state);
|
|
||||||
|
|
||||||
return plane_state->visible;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int intel_crtc_set_config(struct drm_mode_set *set)
|
static int intel_crtc_set_config(struct drm_mode_set *set)
|
||||||
{
|
{
|
||||||
struct drm_device *dev;
|
struct drm_device *dev;
|
||||||
struct drm_atomic_state *state = NULL;
|
struct drm_atomic_state *state = NULL;
|
||||||
struct intel_crtc_state *pipe_config;
|
struct intel_crtc_state *pipe_config;
|
||||||
bool primary_plane_was_visible;
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
BUG_ON(!set);
|
BUG_ON(!set);
|
||||||
@ -12943,38 +12936,8 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
|
|||||||
|
|
||||||
intel_update_pipe_size(to_intel_crtc(set->crtc));
|
intel_update_pipe_size(to_intel_crtc(set->crtc));
|
||||||
|
|
||||||
primary_plane_was_visible = primary_plane_visible(set->crtc);
|
|
||||||
|
|
||||||
ret = intel_set_mode_with_config(set->crtc, pipe_config, true);
|
ret = intel_set_mode_with_config(set->crtc, pipe_config, true);
|
||||||
|
|
||||||
if (ret == 0 &&
|
|
||||||
pipe_config->base.enable &&
|
|
||||||
pipe_config->base.planes_changed &&
|
|
||||||
!needs_modeset(&pipe_config->base)) {
|
|
||||||
struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* We need to make sure the primary plane is re-enabled if it
|
|
||||||
* has previously been turned off.
|
|
||||||
*/
|
|
||||||
if (ret == 0 && !primary_plane_was_visible &&
|
|
||||||
primary_plane_visible(set->crtc)) {
|
|
||||||
WARN_ON(!intel_crtc->active);
|
|
||||||
intel_post_enable_primary(set->crtc);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* In the fastboot case this may be our only check of the
|
|
||||||
* state after boot. It would be better to only do it on
|
|
||||||
* the first update, but we don't have a nice way of doing that
|
|
||||||
* (and really, set_config isn't used much for high freq page
|
|
||||||
* flipping, so increasing its cost here shouldn't be a big
|
|
||||||
* deal).
|
|
||||||
*/
|
|
||||||
if (i915.fastboot && ret == 0)
|
|
||||||
intel_modeset_check_state(set->crtc->dev);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
DRM_DEBUG_KMS("failed to set mode on [CRTC:%d], err = %d\n",
|
DRM_DEBUG_KMS("failed to set mode on [CRTC:%d], err = %d\n",
|
||||||
set->crtc->base.id, ret);
|
set->crtc->base.id, ret);
|
||||||
@ -13305,6 +13268,9 @@ intel_check_primary_plane(struct drm_plane *plane,
|
|||||||
*/
|
*/
|
||||||
if (IS_BROADWELL(dev))
|
if (IS_BROADWELL(dev))
|
||||||
intel_crtc->atomic.wait_vblank = true;
|
intel_crtc->atomic.wait_vblank = true;
|
||||||
|
|
||||||
|
if (crtc_state)
|
||||||
|
intel_crtc->atomic.post_enable_primary = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -13317,6 +13283,10 @@ intel_check_primary_plane(struct drm_plane *plane,
|
|||||||
if (!state->visible || !fb)
|
if (!state->visible || !fb)
|
||||||
intel_crtc->atomic.disable_ips = true;
|
intel_crtc->atomic.disable_ips = true;
|
||||||
|
|
||||||
|
if (!state->visible && old_state->visible &&
|
||||||
|
crtc_state && !needs_modeset(&crtc_state->base))
|
||||||
|
intel_crtc->atomic.pre_disable_primary = true;
|
||||||
|
|
||||||
intel_crtc->atomic.fb_bits |=
|
intel_crtc->atomic.fb_bits |=
|
||||||
INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
|
INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
|
||||||
|
|
||||||
@ -15034,6 +15004,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
|
|||||||
struct intel_plane_state *plane_state;
|
struct intel_plane_state *plane_state;
|
||||||
|
|
||||||
memset(crtc->config, 0, sizeof(*crtc->config));
|
memset(crtc->config, 0, sizeof(*crtc->config));
|
||||||
|
crtc->config->base.crtc = &crtc->base;
|
||||||
|
|
||||||
crtc->config->quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE;
|
crtc->config->quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE;
|
||||||
|
|
||||||
|
@ -165,31 +165,15 @@ gk104_fifo_context_attach(struct nvkm_object *parent,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
|
||||||
gk104_fifo_chan_kick(struct gk104_fifo_chan *chan)
|
|
||||||
{
|
|
||||||
struct nvkm_object *obj = (void *)chan;
|
|
||||||
struct gk104_fifo_priv *priv = (void *)obj->engine;
|
|
||||||
|
|
||||||
nv_wr32(priv, 0x002634, chan->base.chid);
|
|
||||||
if (!nv_wait(priv, 0x002634, 0x100000, 0x000000)) {
|
|
||||||
nv_error(priv, "channel %d [%s] kick timeout\n",
|
|
||||||
chan->base.chid, nvkm_client_name(chan));
|
|
||||||
return -EBUSY;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int
|
static int
|
||||||
gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend,
|
gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend,
|
||||||
struct nvkm_object *object)
|
struct nvkm_object *object)
|
||||||
{
|
{
|
||||||
struct nvkm_bar *bar = nvkm_bar(parent);
|
struct nvkm_bar *bar = nvkm_bar(parent);
|
||||||
|
struct gk104_fifo_priv *priv = (void *)parent->engine;
|
||||||
struct gk104_fifo_base *base = (void *)parent->parent;
|
struct gk104_fifo_base *base = (void *)parent->parent;
|
||||||
struct gk104_fifo_chan *chan = (void *)parent;
|
struct gk104_fifo_chan *chan = (void *)parent;
|
||||||
u32 addr;
|
u32 addr;
|
||||||
int ret;
|
|
||||||
|
|
||||||
switch (nv_engidx(object->engine)) {
|
switch (nv_engidx(object->engine)) {
|
||||||
case NVDEV_ENGINE_SW : return 0;
|
case NVDEV_ENGINE_SW : return 0;
|
||||||
@ -204,9 +188,13 @@ gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = gk104_fifo_chan_kick(chan);
|
nv_wr32(priv, 0x002634, chan->base.chid);
|
||||||
if (ret && suspend)
|
if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
|
||||||
return ret;
|
nv_error(priv, "channel %d [%s] kick timeout\n",
|
||||||
|
chan->base.chid, nvkm_client_name(chan));
|
||||||
|
if (suspend)
|
||||||
|
return -EBUSY;
|
||||||
|
}
|
||||||
|
|
||||||
if (addr) {
|
if (addr) {
|
||||||
nv_wo32(base, addr + 0x00, 0x00000000);
|
nv_wo32(base, addr + 0x00, 0x00000000);
|
||||||
@ -331,7 +319,6 @@ gk104_fifo_chan_fini(struct nvkm_object *object, bool suspend)
|
|||||||
gk104_fifo_runlist_update(priv, chan->engine);
|
gk104_fifo_runlist_update(priv, chan->engine);
|
||||||
}
|
}
|
||||||
|
|
||||||
gk104_fifo_chan_kick(chan);
|
|
||||||
nv_wr32(priv, 0x800000 + (chid * 8), 0x00000000);
|
nv_wr32(priv, 0x800000 + (chid * 8), 0x00000000);
|
||||||
return nvkm_fifo_channel_fini(&chan->base, suspend);
|
return nvkm_fifo_channel_fini(&chan->base, suspend);
|
||||||
}
|
}
|
||||||
|
@ -2492,7 +2492,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
|
|||||||
ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes,
|
ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes,
|
||||||
true, NULL);
|
true, NULL);
|
||||||
if (unlikely(ret != 0))
|
if (unlikely(ret != 0))
|
||||||
goto out_err;
|
goto out_err_nores;
|
||||||
|
|
||||||
ret = vmw_validate_buffers(dev_priv, sw_context);
|
ret = vmw_validate_buffers(dev_priv, sw_context);
|
||||||
if (unlikely(ret != 0))
|
if (unlikely(ret != 0))
|
||||||
@ -2536,6 +2536,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
|
|||||||
vmw_resource_relocations_free(&sw_context->res_relocations);
|
vmw_resource_relocations_free(&sw_context->res_relocations);
|
||||||
|
|
||||||
vmw_fifo_commit(dev_priv, command_size);
|
vmw_fifo_commit(dev_priv, command_size);
|
||||||
|
mutex_unlock(&dev_priv->binding_mutex);
|
||||||
|
|
||||||
vmw_query_bo_switch_commit(dev_priv, sw_context);
|
vmw_query_bo_switch_commit(dev_priv, sw_context);
|
||||||
ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
|
ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
|
||||||
@ -2551,7 +2552,6 @@ int vmw_execbuf_process(struct drm_file *file_priv,
|
|||||||
DRM_ERROR("Fence submission error. Syncing.\n");
|
DRM_ERROR("Fence submission error. Syncing.\n");
|
||||||
|
|
||||||
vmw_resource_list_unreserve(&sw_context->resource_list, false);
|
vmw_resource_list_unreserve(&sw_context->resource_list, false);
|
||||||
mutex_unlock(&dev_priv->binding_mutex);
|
|
||||||
|
|
||||||
ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
|
ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
|
||||||
(void *) fence);
|
(void *) fence);
|
||||||
|
@ -814,7 +814,7 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
|
|||||||
printk(KERN_ERR MOD
|
printk(KERN_ERR MOD
|
||||||
"Unexpected cqe_status 0x%x for QPID=0x%0x\n",
|
"Unexpected cqe_status 0x%x for QPID=0x%0x\n",
|
||||||
CQE_STATUS(&cqe), CQE_QPID(&cqe));
|
CQE_STATUS(&cqe), CQE_QPID(&cqe));
|
||||||
ret = -EINVAL;
|
wc->status = IB_WC_FATAL_ERR;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
|
@ -2245,6 +2245,9 @@ void omap3_gpmc_save_context(void)
|
|||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
if (!gpmc_base)
|
||||||
|
return;
|
||||||
|
|
||||||
gpmc_context.sysconfig = gpmc_read_reg(GPMC_SYSCONFIG);
|
gpmc_context.sysconfig = gpmc_read_reg(GPMC_SYSCONFIG);
|
||||||
gpmc_context.irqenable = gpmc_read_reg(GPMC_IRQENABLE);
|
gpmc_context.irqenable = gpmc_read_reg(GPMC_IRQENABLE);
|
||||||
gpmc_context.timeout_ctrl = gpmc_read_reg(GPMC_TIMEOUT_CONTROL);
|
gpmc_context.timeout_ctrl = gpmc_read_reg(GPMC_TIMEOUT_CONTROL);
|
||||||
@ -2277,6 +2280,9 @@ void omap3_gpmc_restore_context(void)
|
|||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
if (!gpmc_base)
|
||||||
|
return;
|
||||||
|
|
||||||
gpmc_write_reg(GPMC_SYSCONFIG, gpmc_context.sysconfig);
|
gpmc_write_reg(GPMC_SYSCONFIG, gpmc_context.sysconfig);
|
||||||
gpmc_write_reg(GPMC_IRQENABLE, gpmc_context.irqenable);
|
gpmc_write_reg(GPMC_IRQENABLE, gpmc_context.irqenable);
|
||||||
gpmc_write_reg(GPMC_TIMEOUT_CONTROL, gpmc_context.timeout_ctrl);
|
gpmc_write_reg(GPMC_TIMEOUT_CONTROL, gpmc_context.timeout_ctrl);
|
||||||
|
@ -5173,7 +5173,7 @@ static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
|
|||||||
struct device *dev = &adapter->pdev->dev;
|
struct device *dev = &adapter->pdev->dev;
|
||||||
int status;
|
int status;
|
||||||
|
|
||||||
if (lancer_chip(adapter) || BEx_chip(adapter))
|
if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
|
if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
|
||||||
@ -5220,7 +5220,7 @@ static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
|
|||||||
{
|
{
|
||||||
struct be_adapter *adapter = netdev_priv(netdev);
|
struct be_adapter *adapter = netdev_priv(netdev);
|
||||||
|
|
||||||
if (lancer_chip(adapter) || BEx_chip(adapter))
|
if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (adapter->vxlan_port != port)
|
if (adapter->vxlan_port != port)
|
||||||
|
@ -2067,6 +2067,11 @@ int startup_gfar(struct net_device *ndev)
|
|||||||
/* Start Rx/Tx DMA and enable the interrupts */
|
/* Start Rx/Tx DMA and enable the interrupts */
|
||||||
gfar_start(priv);
|
gfar_start(priv);
|
||||||
|
|
||||||
|
/* force link state update after mac reset */
|
||||||
|
priv->oldlink = 0;
|
||||||
|
priv->oldspeed = 0;
|
||||||
|
priv->oldduplex = -1;
|
||||||
|
|
||||||
phy_start(priv->phydev);
|
phy_start(priv->phydev);
|
||||||
|
|
||||||
enable_napi(priv);
|
enable_napi(priv);
|
||||||
|
@ -952,9 +952,8 @@ static int ks8842_alloc_dma_bufs(struct net_device *netdev)
|
|||||||
|
|
||||||
sg_dma_address(&tx_ctl->sg) = dma_map_single(adapter->dev,
|
sg_dma_address(&tx_ctl->sg) = dma_map_single(adapter->dev,
|
||||||
tx_ctl->buf, DMA_BUFFER_SIZE, DMA_TO_DEVICE);
|
tx_ctl->buf, DMA_BUFFER_SIZE, DMA_TO_DEVICE);
|
||||||
err = dma_mapping_error(adapter->dev,
|
if (dma_mapping_error(adapter->dev, sg_dma_address(&tx_ctl->sg))) {
|
||||||
sg_dma_address(&tx_ctl->sg));
|
err = -ENOMEM;
|
||||||
if (err) {
|
|
||||||
sg_dma_address(&tx_ctl->sg) = 0;
|
sg_dma_address(&tx_ctl->sg) = 0;
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
@ -814,6 +814,7 @@ void phy_state_machine(struct work_struct *work)
|
|||||||
bool needs_aneg = false, do_suspend = false;
|
bool needs_aneg = false, do_suspend = false;
|
||||||
enum phy_state old_state;
|
enum phy_state old_state;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
int old_link;
|
||||||
|
|
||||||
mutex_lock(&phydev->lock);
|
mutex_lock(&phydev->lock);
|
||||||
|
|
||||||
@ -899,11 +900,18 @@ void phy_state_machine(struct work_struct *work)
|
|||||||
phydev->adjust_link(phydev->attached_dev);
|
phydev->adjust_link(phydev->attached_dev);
|
||||||
break;
|
break;
|
||||||
case PHY_RUNNING:
|
case PHY_RUNNING:
|
||||||
/* Only register a CHANGE if we are
|
/* Only register a CHANGE if we are polling or ignoring
|
||||||
* polling or ignoring interrupts
|
* interrupts and link changed since latest checking.
|
||||||
*/
|
*/
|
||||||
if (!phy_interrupt_is_valid(phydev))
|
if (!phy_interrupt_is_valid(phydev)) {
|
||||||
phydev->state = PHY_CHANGELINK;
|
old_link = phydev->link;
|
||||||
|
err = phy_read_status(phydev);
|
||||||
|
if (err)
|
||||||
|
break;
|
||||||
|
|
||||||
|
if (old_link != phydev->link)
|
||||||
|
phydev->state = PHY_CHANGELINK;
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
case PHY_CHANGELINK:
|
case PHY_CHANGELINK:
|
||||||
err = phy_read_status(phydev);
|
err = phy_read_status(phydev);
|
||||||
|
@ -91,19 +91,18 @@ static int lan911x_config_init(struct phy_device *phydev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The LAN8710/LAN8720 requires a minimum of 2 link pulses within 64ms of each
|
* The LAN87xx suffers from rare absence of the ENERGYON-bit when Ethernet cable
|
||||||
* other in order to set the ENERGYON bit and exit EDPD mode. If a link partner
|
* plugs in while LAN87xx is in Energy Detect Power-Down mode. This leads to
|
||||||
* does send the pulses within this interval, the PHY will remained powered
|
* unstable detection of plugging in Ethernet cable.
|
||||||
* down.
|
* This workaround disables Energy Detect Power-Down mode and waiting for
|
||||||
*
|
* response on link pulses to detect presence of plugged Ethernet cable.
|
||||||
* This workaround will manually toggle the PHY on/off upon calls to read_status
|
* The Energy Detect Power-Down mode is enabled again in the end of procedure to
|
||||||
* in order to generate link test pulses if the link is down. If a link partner
|
* save approximately 220 mW of power if cable is unplugged.
|
||||||
* is present, it will respond to the pulses, which will cause the ENERGYON bit
|
|
||||||
* to be set and will cause the EDPD mode to be exited.
|
|
||||||
*/
|
*/
|
||||||
static int lan87xx_read_status(struct phy_device *phydev)
|
static int lan87xx_read_status(struct phy_device *phydev)
|
||||||
{
|
{
|
||||||
int err = genphy_read_status(phydev);
|
int err = genphy_read_status(phydev);
|
||||||
|
int i;
|
||||||
|
|
||||||
if (!phydev->link) {
|
if (!phydev->link) {
|
||||||
/* Disable EDPD to wake up PHY */
|
/* Disable EDPD to wake up PHY */
|
||||||
@ -116,8 +115,16 @@ static int lan87xx_read_status(struct phy_device *phydev)
|
|||||||
if (rc < 0)
|
if (rc < 0)
|
||||||
return rc;
|
return rc;
|
||||||
|
|
||||||
/* Sleep 64 ms to allow ~5 link test pulses to be sent */
|
/* Wait max 640 ms to detect energy */
|
||||||
msleep(64);
|
for (i = 0; i < 64; i++) {
|
||||||
|
/* Sleep to allow link test pulses to be sent */
|
||||||
|
msleep(10);
|
||||||
|
rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
|
||||||
|
if (rc < 0)
|
||||||
|
return rc;
|
||||||
|
if (rc & MII_LAN83C185_ENERGYON)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
/* Re-enable EDPD */
|
/* Re-enable EDPD */
|
||||||
rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
|
rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
|
||||||
@ -191,7 +198,7 @@ static struct phy_driver smsc_phy_driver[] = {
|
|||||||
|
|
||||||
/* basic functions */
|
/* basic functions */
|
||||||
.config_aneg = genphy_config_aneg,
|
.config_aneg = genphy_config_aneg,
|
||||||
.read_status = genphy_read_status,
|
.read_status = lan87xx_read_status,
|
||||||
.config_init = smsc_phy_config_init,
|
.config_init = smsc_phy_config_init,
|
||||||
.soft_reset = smsc_phy_reset,
|
.soft_reset = smsc_phy_reset,
|
||||||
|
|
||||||
|
@ -269,9 +269,9 @@ static void ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound);
|
|||||||
static void ppp_ccp_closed(struct ppp *ppp);
|
static void ppp_ccp_closed(struct ppp *ppp);
|
||||||
static struct compressor *find_compressor(int type);
|
static struct compressor *find_compressor(int type);
|
||||||
static void ppp_get_stats(struct ppp *ppp, struct ppp_stats *st);
|
static void ppp_get_stats(struct ppp *ppp, struct ppp_stats *st);
|
||||||
static struct ppp *ppp_create_interface(struct net *net, int unit, int *retp);
|
static struct ppp *ppp_create_interface(struct net *net, int unit,
|
||||||
|
struct file *file, int *retp);
|
||||||
static void init_ppp_file(struct ppp_file *pf, int kind);
|
static void init_ppp_file(struct ppp_file *pf, int kind);
|
||||||
static void ppp_shutdown_interface(struct ppp *ppp);
|
|
||||||
static void ppp_destroy_interface(struct ppp *ppp);
|
static void ppp_destroy_interface(struct ppp *ppp);
|
||||||
static struct ppp *ppp_find_unit(struct ppp_net *pn, int unit);
|
static struct ppp *ppp_find_unit(struct ppp_net *pn, int unit);
|
||||||
static struct channel *ppp_find_channel(struct ppp_net *pn, int unit);
|
static struct channel *ppp_find_channel(struct ppp_net *pn, int unit);
|
||||||
@ -392,8 +392,10 @@ static int ppp_release(struct inode *unused, struct file *file)
|
|||||||
file->private_data = NULL;
|
file->private_data = NULL;
|
||||||
if (pf->kind == INTERFACE) {
|
if (pf->kind == INTERFACE) {
|
||||||
ppp = PF_TO_PPP(pf);
|
ppp = PF_TO_PPP(pf);
|
||||||
|
rtnl_lock();
|
||||||
if (file == ppp->owner)
|
if (file == ppp->owner)
|
||||||
ppp_shutdown_interface(ppp);
|
unregister_netdevice(ppp->dev);
|
||||||
|
rtnl_unlock();
|
||||||
}
|
}
|
||||||
if (atomic_dec_and_test(&pf->refcnt)) {
|
if (atomic_dec_and_test(&pf->refcnt)) {
|
||||||
switch (pf->kind) {
|
switch (pf->kind) {
|
||||||
@ -593,8 +595,10 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
|||||||
mutex_lock(&ppp_mutex);
|
mutex_lock(&ppp_mutex);
|
||||||
if (pf->kind == INTERFACE) {
|
if (pf->kind == INTERFACE) {
|
||||||
ppp = PF_TO_PPP(pf);
|
ppp = PF_TO_PPP(pf);
|
||||||
|
rtnl_lock();
|
||||||
if (file == ppp->owner)
|
if (file == ppp->owner)
|
||||||
ppp_shutdown_interface(ppp);
|
unregister_netdevice(ppp->dev);
|
||||||
|
rtnl_unlock();
|
||||||
}
|
}
|
||||||
if (atomic_long_read(&file->f_count) < 2) {
|
if (atomic_long_read(&file->f_count) < 2) {
|
||||||
ppp_release(NULL, file);
|
ppp_release(NULL, file);
|
||||||
@ -838,11 +842,10 @@ static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
|
|||||||
/* Create a new ppp unit */
|
/* Create a new ppp unit */
|
||||||
if (get_user(unit, p))
|
if (get_user(unit, p))
|
||||||
break;
|
break;
|
||||||
ppp = ppp_create_interface(net, unit, &err);
|
ppp = ppp_create_interface(net, unit, file, &err);
|
||||||
if (!ppp)
|
if (!ppp)
|
||||||
break;
|
break;
|
||||||
file->private_data = &ppp->file;
|
file->private_data = &ppp->file;
|
||||||
ppp->owner = file;
|
|
||||||
err = -EFAULT;
|
err = -EFAULT;
|
||||||
if (put_user(ppp->file.index, p))
|
if (put_user(ppp->file.index, p))
|
||||||
break;
|
break;
|
||||||
@ -916,6 +919,16 @@ static __net_init int ppp_init_net(struct net *net)
|
|||||||
static __net_exit void ppp_exit_net(struct net *net)
|
static __net_exit void ppp_exit_net(struct net *net)
|
||||||
{
|
{
|
||||||
struct ppp_net *pn = net_generic(net, ppp_net_id);
|
struct ppp_net *pn = net_generic(net, ppp_net_id);
|
||||||
|
struct ppp *ppp;
|
||||||
|
LIST_HEAD(list);
|
||||||
|
int id;
|
||||||
|
|
||||||
|
rtnl_lock();
|
||||||
|
idr_for_each_entry(&pn->units_idr, ppp, id)
|
||||||
|
unregister_netdevice_queue(ppp->dev, &list);
|
||||||
|
|
||||||
|
unregister_netdevice_many(&list);
|
||||||
|
rtnl_unlock();
|
||||||
|
|
||||||
idr_destroy(&pn->units_idr);
|
idr_destroy(&pn->units_idr);
|
||||||
}
|
}
|
||||||
@ -1088,8 +1101,28 @@ static int ppp_dev_init(struct net_device *dev)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void ppp_dev_uninit(struct net_device *dev)
|
||||||
|
{
|
||||||
|
struct ppp *ppp = netdev_priv(dev);
|
||||||
|
struct ppp_net *pn = ppp_pernet(ppp->ppp_net);
|
||||||
|
|
||||||
|
ppp_lock(ppp);
|
||||||
|
ppp->closing = 1;
|
||||||
|
ppp_unlock(ppp);
|
||||||
|
|
||||||
|
mutex_lock(&pn->all_ppp_mutex);
|
||||||
|
unit_put(&pn->units_idr, ppp->file.index);
|
||||||
|
mutex_unlock(&pn->all_ppp_mutex);
|
||||||
|
|
||||||
|
ppp->owner = NULL;
|
||||||
|
|
||||||
|
ppp->file.dead = 1;
|
||||||
|
wake_up_interruptible(&ppp->file.rwait);
|
||||||
|
}
|
||||||
|
|
||||||
static const struct net_device_ops ppp_netdev_ops = {
|
static const struct net_device_ops ppp_netdev_ops = {
|
||||||
.ndo_init = ppp_dev_init,
|
.ndo_init = ppp_dev_init,
|
||||||
|
.ndo_uninit = ppp_dev_uninit,
|
||||||
.ndo_start_xmit = ppp_start_xmit,
|
.ndo_start_xmit = ppp_start_xmit,
|
||||||
.ndo_do_ioctl = ppp_net_ioctl,
|
.ndo_do_ioctl = ppp_net_ioctl,
|
||||||
.ndo_get_stats64 = ppp_get_stats64,
|
.ndo_get_stats64 = ppp_get_stats64,
|
||||||
@ -2667,8 +2700,8 @@ ppp_get_stats(struct ppp *ppp, struct ppp_stats *st)
|
|||||||
* or if there is already a unit with the requested number.
|
* or if there is already a unit with the requested number.
|
||||||
* unit == -1 means allocate a new number.
|
* unit == -1 means allocate a new number.
|
||||||
*/
|
*/
|
||||||
static struct ppp *
|
static struct ppp *ppp_create_interface(struct net *net, int unit,
|
||||||
ppp_create_interface(struct net *net, int unit, int *retp)
|
struct file *file, int *retp)
|
||||||
{
|
{
|
||||||
struct ppp *ppp;
|
struct ppp *ppp;
|
||||||
struct ppp_net *pn;
|
struct ppp_net *pn;
|
||||||
@ -2688,6 +2721,7 @@ ppp_create_interface(struct net *net, int unit, int *retp)
|
|||||||
ppp->mru = PPP_MRU;
|
ppp->mru = PPP_MRU;
|
||||||
init_ppp_file(&ppp->file, INTERFACE);
|
init_ppp_file(&ppp->file, INTERFACE);
|
||||||
ppp->file.hdrlen = PPP_HDRLEN - 2; /* don't count proto bytes */
|
ppp->file.hdrlen = PPP_HDRLEN - 2; /* don't count proto bytes */
|
||||||
|
ppp->owner = file;
|
||||||
for (i = 0; i < NUM_NP; ++i)
|
for (i = 0; i < NUM_NP; ++i)
|
||||||
ppp->npmode[i] = NPMODE_PASS;
|
ppp->npmode[i] = NPMODE_PASS;
|
||||||
INIT_LIST_HEAD(&ppp->channels);
|
INIT_LIST_HEAD(&ppp->channels);
|
||||||
@ -2775,34 +2809,6 @@ init_ppp_file(struct ppp_file *pf, int kind)
|
|||||||
init_waitqueue_head(&pf->rwait);
|
init_waitqueue_head(&pf->rwait);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Take down a ppp interface unit - called when the owning file
|
|
||||||
* (the one that created the unit) is closed or detached.
|
|
||||||
*/
|
|
||||||
static void ppp_shutdown_interface(struct ppp *ppp)
|
|
||||||
{
|
|
||||||
struct ppp_net *pn;
|
|
||||||
|
|
||||||
pn = ppp_pernet(ppp->ppp_net);
|
|
||||||
mutex_lock(&pn->all_ppp_mutex);
|
|
||||||
|
|
||||||
/* This will call dev_close() for us. */
|
|
||||||
ppp_lock(ppp);
|
|
||||||
if (!ppp->closing) {
|
|
||||||
ppp->closing = 1;
|
|
||||||
ppp_unlock(ppp);
|
|
||||||
unregister_netdev(ppp->dev);
|
|
||||||
unit_put(&pn->units_idr, ppp->file.index);
|
|
||||||
} else
|
|
||||||
ppp_unlock(ppp);
|
|
||||||
|
|
||||||
ppp->file.dead = 1;
|
|
||||||
ppp->owner = NULL;
|
|
||||||
wake_up_interruptible(&ppp->file.rwait);
|
|
||||||
|
|
||||||
mutex_unlock(&pn->all_ppp_mutex);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Free the memory used by a ppp unit. This is only called once
|
* Free the memory used by a ppp unit. This is only called once
|
||||||
* there are no channels connected to the unit and no file structs
|
* there are no channels connected to the unit and no file structs
|
||||||
|
@ -786,6 +786,7 @@ static const struct usb_device_id products[] = {
|
|||||||
{QMI_FIXED_INTF(0x413c, 0x81a8, 8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
|
{QMI_FIXED_INTF(0x413c, 0x81a8, 8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
|
||||||
{QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
|
{QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
|
||||||
{QMI_FIXED_INTF(0x413c, 0x81b1, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
|
{QMI_FIXED_INTF(0x413c, 0x81b1, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
|
||||||
|
{QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
|
||||||
{QMI_FIXED_INTF(0x03f0, 0x581d, 4)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */
|
{QMI_FIXED_INTF(0x03f0, 0x581d, 4)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */
|
||||||
|
|
||||||
/* 4. Gobi 1000 devices */
|
/* 4. Gobi 1000 devices */
|
||||||
|
@ -733,8 +733,6 @@ static bool fc_invoke_resp(struct fc_exch *ep, struct fc_seq *sp,
|
|||||||
if (resp) {
|
if (resp) {
|
||||||
resp(sp, fp, arg);
|
resp(sp, fp, arg);
|
||||||
res = true;
|
res = true;
|
||||||
} else if (!IS_ERR(fp)) {
|
|
||||||
fc_frame_free(fp);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock_bh(&ep->ex_lock);
|
spin_lock_bh(&ep->ex_lock);
|
||||||
@ -1596,7 +1594,8 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
|
|||||||
* If new exch resp handler is valid then call that
|
* If new exch resp handler is valid then call that
|
||||||
* first.
|
* first.
|
||||||
*/
|
*/
|
||||||
fc_invoke_resp(ep, sp, fp);
|
if (!fc_invoke_resp(ep, sp, fp))
|
||||||
|
fc_frame_free(fp);
|
||||||
|
|
||||||
fc_exch_release(ep);
|
fc_exch_release(ep);
|
||||||
return;
|
return;
|
||||||
@ -1695,7 +1694,8 @@ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)
|
|||||||
fc_exch_hold(ep);
|
fc_exch_hold(ep);
|
||||||
if (!rc)
|
if (!rc)
|
||||||
fc_exch_delete(ep);
|
fc_exch_delete(ep);
|
||||||
fc_invoke_resp(ep, sp, fp);
|
if (!fc_invoke_resp(ep, sp, fp))
|
||||||
|
fc_frame_free(fp);
|
||||||
if (has_rec)
|
if (has_rec)
|
||||||
fc_exch_timer_set(ep, ep->r_a_tov);
|
fc_exch_timer_set(ep, ep->r_a_tov);
|
||||||
fc_exch_release(ep);
|
fc_exch_release(ep);
|
||||||
|
@ -1039,11 +1039,26 @@ restart:
|
|||||||
fc_fcp_pkt_hold(fsp);
|
fc_fcp_pkt_hold(fsp);
|
||||||
spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
|
spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
|
||||||
|
|
||||||
if (!fc_fcp_lock_pkt(fsp)) {
|
spin_lock_bh(&fsp->scsi_pkt_lock);
|
||||||
|
if (!(fsp->state & FC_SRB_COMPL)) {
|
||||||
|
fsp->state |= FC_SRB_COMPL;
|
||||||
|
/*
|
||||||
|
* TODO: dropping scsi_pkt_lock and then reacquiring
|
||||||
|
* again around fc_fcp_cleanup_cmd() is required,
|
||||||
|
* since fc_fcp_cleanup_cmd() calls into
|
||||||
|
* fc_seq_set_resp() and that func preempts cpu using
|
||||||
|
* schedule. May be schedule and related code should be
|
||||||
|
* removed instead of unlocking here to avoid scheduling
|
||||||
|
* while atomic bug.
|
||||||
|
*/
|
||||||
|
spin_unlock_bh(&fsp->scsi_pkt_lock);
|
||||||
|
|
||||||
fc_fcp_cleanup_cmd(fsp, error);
|
fc_fcp_cleanup_cmd(fsp, error);
|
||||||
|
|
||||||
|
spin_lock_bh(&fsp->scsi_pkt_lock);
|
||||||
fc_io_compl(fsp);
|
fc_io_compl(fsp);
|
||||||
fc_fcp_unlock_pkt(fsp);
|
|
||||||
}
|
}
|
||||||
|
spin_unlock_bh(&fsp->scsi_pkt_lock);
|
||||||
|
|
||||||
fc_fcp_pkt_release(fsp);
|
fc_fcp_pkt_release(fsp);
|
||||||
spin_lock_irqsave(&si->scsi_queue_lock, flags);
|
spin_lock_irqsave(&si->scsi_queue_lock, flags);
|
||||||
|
@ -2941,10 +2941,10 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
|
|||||||
{
|
{
|
||||||
struct iscsi_conn *conn = cls_conn->dd_data;
|
struct iscsi_conn *conn = cls_conn->dd_data;
|
||||||
struct iscsi_session *session = conn->session;
|
struct iscsi_session *session = conn->session;
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
del_timer_sync(&conn->transport_timer);
|
del_timer_sync(&conn->transport_timer);
|
||||||
|
|
||||||
|
mutex_lock(&session->eh_mutex);
|
||||||
spin_lock_bh(&session->frwd_lock);
|
spin_lock_bh(&session->frwd_lock);
|
||||||
conn->c_stage = ISCSI_CONN_CLEANUP_WAIT;
|
conn->c_stage = ISCSI_CONN_CLEANUP_WAIT;
|
||||||
if (session->leadconn == conn) {
|
if (session->leadconn == conn) {
|
||||||
@ -2956,28 +2956,6 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
|
|||||||
}
|
}
|
||||||
spin_unlock_bh(&session->frwd_lock);
|
spin_unlock_bh(&session->frwd_lock);
|
||||||
|
|
||||||
/*
|
|
||||||
* Block until all in-progress commands for this connection
|
|
||||||
* time out or fail.
|
|
||||||
*/
|
|
||||||
for (;;) {
|
|
||||||
spin_lock_irqsave(session->host->host_lock, flags);
|
|
||||||
if (!atomic_read(&session->host->host_busy)) { /* OK for ERL == 0 */
|
|
||||||
spin_unlock_irqrestore(session->host->host_lock, flags);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
spin_unlock_irqrestore(session->host->host_lock, flags);
|
|
||||||
msleep_interruptible(500);
|
|
||||||
iscsi_conn_printk(KERN_INFO, conn, "iscsi conn_destroy(): "
|
|
||||||
"host_busy %d host_failed %d\n",
|
|
||||||
atomic_read(&session->host->host_busy),
|
|
||||||
session->host->host_failed);
|
|
||||||
/*
|
|
||||||
* force eh_abort() to unblock
|
|
||||||
*/
|
|
||||||
wake_up(&conn->ehwait);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* flush queued up work because we free the connection below */
|
/* flush queued up work because we free the connection below */
|
||||||
iscsi_suspend_tx(conn);
|
iscsi_suspend_tx(conn);
|
||||||
|
|
||||||
@ -2994,6 +2972,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
|
|||||||
if (session->leadconn == conn)
|
if (session->leadconn == conn)
|
||||||
session->leadconn = NULL;
|
session->leadconn = NULL;
|
||||||
spin_unlock_bh(&session->frwd_lock);
|
spin_unlock_bh(&session->frwd_lock);
|
||||||
|
mutex_unlock(&session->eh_mutex);
|
||||||
|
|
||||||
iscsi_destroy_conn(cls_conn);
|
iscsi_destroy_conn(cls_conn);
|
||||||
}
|
}
|
||||||
|
@ -26,7 +26,6 @@
|
|||||||
#include <linux/blkdev.h>
|
#include <linux/blkdev.h>
|
||||||
#include <linux/delay.h>
|
#include <linux/delay.h>
|
||||||
#include <linux/jiffies.h>
|
#include <linux/jiffies.h>
|
||||||
#include <asm/unaligned.h>
|
|
||||||
|
|
||||||
#include <scsi/scsi.h>
|
#include <scsi/scsi.h>
|
||||||
#include <scsi/scsi_cmnd.h>
|
#include <scsi/scsi_cmnd.h>
|
||||||
@ -2523,33 +2522,3 @@ void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(scsi_build_sense_buffer);
|
EXPORT_SYMBOL(scsi_build_sense_buffer);
|
||||||
|
|
||||||
/**
|
|
||||||
* scsi_set_sense_information - set the information field in a
|
|
||||||
* formatted sense data buffer
|
|
||||||
* @buf: Where to build sense data
|
|
||||||
* @info: 64-bit information value to be set
|
|
||||||
*
|
|
||||||
**/
|
|
||||||
void scsi_set_sense_information(u8 *buf, u64 info)
|
|
||||||
{
|
|
||||||
if ((buf[0] & 0x7f) == 0x72) {
|
|
||||||
u8 *ucp, len;
|
|
||||||
|
|
||||||
len = buf[7];
|
|
||||||
ucp = (char *)scsi_sense_desc_find(buf, len + 8, 0);
|
|
||||||
if (!ucp) {
|
|
||||||
buf[7] = len + 0xa;
|
|
||||||
ucp = buf + 8 + len;
|
|
||||||
}
|
|
||||||
ucp[0] = 0;
|
|
||||||
ucp[1] = 0xa;
|
|
||||||
ucp[2] = 0x80; /* Valid bit */
|
|
||||||
ucp[3] = 0;
|
|
||||||
put_unaligned_be64(info, &ucp[4]);
|
|
||||||
} else if ((buf[0] & 0x7f) == 0x70) {
|
|
||||||
buf[0] |= 0x80;
|
|
||||||
put_unaligned_be64(info, &buf[3]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(scsi_set_sense_information);
|
|
||||||
|
@ -2770,9 +2770,9 @@ static int sd_revalidate_disk(struct gendisk *disk)
|
|||||||
max_xfer = sdkp->max_xfer_blocks;
|
max_xfer = sdkp->max_xfer_blocks;
|
||||||
max_xfer <<= ilog2(sdp->sector_size) - 9;
|
max_xfer <<= ilog2(sdp->sector_size) - 9;
|
||||||
|
|
||||||
max_xfer = min_not_zero(queue_max_hw_sectors(sdkp->disk->queue),
|
sdkp->disk->queue->limits.max_sectors =
|
||||||
max_xfer);
|
min_not_zero(queue_max_hw_sectors(sdkp->disk->queue), max_xfer);
|
||||||
blk_queue_max_hw_sectors(sdkp->disk->queue, max_xfer);
|
|
||||||
set_capacity(disk, sdkp->capacity);
|
set_capacity(disk, sdkp->capacity);
|
||||||
sd_config_write_same(sdkp);
|
sd_config_write_same(sdkp);
|
||||||
kfree(buffer);
|
kfree(buffer);
|
||||||
|
@ -968,9 +968,9 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
|
|||||||
cmd->cmd_flags |= ICF_NON_IMMEDIATE_UNSOLICITED_DATA;
|
cmd->cmd_flags |= ICF_NON_IMMEDIATE_UNSOLICITED_DATA;
|
||||||
|
|
||||||
conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
|
conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
|
||||||
if (hdr->flags & ISCSI_FLAG_CMD_READ) {
|
if (hdr->flags & ISCSI_FLAG_CMD_READ)
|
||||||
cmd->targ_xfer_tag = session_get_next_ttt(conn->sess);
|
cmd->targ_xfer_tag = session_get_next_ttt(conn->sess);
|
||||||
} else if (hdr->flags & ISCSI_FLAG_CMD_WRITE)
|
else
|
||||||
cmd->targ_xfer_tag = 0xFFFFFFFF;
|
cmd->targ_xfer_tag = 0xFFFFFFFF;
|
||||||
cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
|
cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
|
||||||
cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn);
|
cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn);
|
||||||
|
@ -457,8 +457,15 @@ void target_unregister_template(const struct target_core_fabric_ops *fo)
|
|||||||
if (!strcmp(t->tf_ops->name, fo->name)) {
|
if (!strcmp(t->tf_ops->name, fo->name)) {
|
||||||
BUG_ON(atomic_read(&t->tf_access_cnt));
|
BUG_ON(atomic_read(&t->tf_access_cnt));
|
||||||
list_del(&t->tf_list);
|
list_del(&t->tf_list);
|
||||||
|
mutex_unlock(&g_tf_lock);
|
||||||
|
/*
|
||||||
|
* Wait for any outstanding fabric se_deve_entry->rcu_head
|
||||||
|
* callbacks to complete post kfree_rcu(), before allowing
|
||||||
|
* fabric driver unload of TFO->module to proceed.
|
||||||
|
*/
|
||||||
|
rcu_barrier();
|
||||||
kfree(t);
|
kfree(t);
|
||||||
break;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
mutex_unlock(&g_tf_lock);
|
mutex_unlock(&g_tf_lock);
|
||||||
|
@ -84,8 +84,16 @@ void target_backend_unregister(const struct target_backend_ops *ops)
|
|||||||
list_for_each_entry(tb, &backend_list, list) {
|
list_for_each_entry(tb, &backend_list, list) {
|
||||||
if (tb->ops == ops) {
|
if (tb->ops == ops) {
|
||||||
list_del(&tb->list);
|
list_del(&tb->list);
|
||||||
|
mutex_unlock(&backend_mutex);
|
||||||
|
/*
|
||||||
|
* Wait for any outstanding backend driver ->rcu_head
|
||||||
|
* callbacks to complete post TBO->free_device() ->
|
||||||
|
* call_rcu(), before allowing backend driver module
|
||||||
|
* unload of target_backend_ops->owner to proceed.
|
||||||
|
*/
|
||||||
|
rcu_barrier();
|
||||||
kfree(tb);
|
kfree(tb);
|
||||||
break;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
mutex_unlock(&backend_mutex);
|
mutex_unlock(&backend_mutex);
|
||||||
|
@ -1203,17 +1203,13 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
|
|||||||
struct se_dev_entry *deve;
|
struct se_dev_entry *deve;
|
||||||
struct se_session *sess = cmd->se_sess;
|
struct se_session *sess = cmd->se_sess;
|
||||||
struct se_node_acl *nacl;
|
struct se_node_acl *nacl;
|
||||||
|
struct scsi_lun slun;
|
||||||
unsigned char *buf;
|
unsigned char *buf;
|
||||||
u32 lun_count = 0, offset = 8;
|
u32 lun_count = 0, offset = 8;
|
||||||
|
__be32 len;
|
||||||
if (cmd->data_length < 16) {
|
|
||||||
pr_warn("REPORT LUNS allocation length %u too small\n",
|
|
||||||
cmd->data_length);
|
|
||||||
return TCM_INVALID_CDB_FIELD;
|
|
||||||
}
|
|
||||||
|
|
||||||
buf = transport_kmap_data_sg(cmd);
|
buf = transport_kmap_data_sg(cmd);
|
||||||
if (!buf)
|
if (cmd->data_length && !buf)
|
||||||
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1221,11 +1217,9 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
|
|||||||
* coming via a target_core_mod PASSTHROUGH op, and not through
|
* coming via a target_core_mod PASSTHROUGH op, and not through
|
||||||
* a $FABRIC_MOD. In that case, report LUN=0 only.
|
* a $FABRIC_MOD. In that case, report LUN=0 only.
|
||||||
*/
|
*/
|
||||||
if (!sess) {
|
if (!sess)
|
||||||
int_to_scsilun(0, (struct scsi_lun *)&buf[offset]);
|
|
||||||
lun_count = 1;
|
|
||||||
goto done;
|
goto done;
|
||||||
}
|
|
||||||
nacl = sess->se_node_acl;
|
nacl = sess->se_node_acl;
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
@ -1236,10 +1230,12 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
|
|||||||
* See SPC2-R20 7.19.
|
* See SPC2-R20 7.19.
|
||||||
*/
|
*/
|
||||||
lun_count++;
|
lun_count++;
|
||||||
if ((offset + 8) > cmd->data_length)
|
if (offset >= cmd->data_length)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)&buf[offset]);
|
int_to_scsilun(deve->mapped_lun, &slun);
|
||||||
|
memcpy(buf + offset, &slun,
|
||||||
|
min(8u, cmd->data_length - offset));
|
||||||
offset += 8;
|
offset += 8;
|
||||||
}
|
}
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
@ -1248,12 +1244,22 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
|
|||||||
* See SPC3 r07, page 159.
|
* See SPC3 r07, page 159.
|
||||||
*/
|
*/
|
||||||
done:
|
done:
|
||||||
lun_count *= 8;
|
/*
|
||||||
buf[0] = ((lun_count >> 24) & 0xff);
|
* If no LUNs are accessible, report virtual LUN 0.
|
||||||
buf[1] = ((lun_count >> 16) & 0xff);
|
*/
|
||||||
buf[2] = ((lun_count >> 8) & 0xff);
|
if (lun_count == 0) {
|
||||||
buf[3] = (lun_count & 0xff);
|
int_to_scsilun(0, &slun);
|
||||||
transport_kunmap_data_sg(cmd);
|
if (cmd->data_length > 8)
|
||||||
|
memcpy(buf + offset, &slun,
|
||||||
|
min(8u, cmd->data_length - offset));
|
||||||
|
lun_count = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (buf) {
|
||||||
|
len = cpu_to_be32(lun_count * 8);
|
||||||
|
memcpy(buf, &len, min_t(int, sizeof len, cmd->data_length));
|
||||||
|
transport_kunmap_data_sg(cmd);
|
||||||
|
}
|
||||||
|
|
||||||
target_complete_cmd_with_length(cmd, GOOD, 8 + lun_count * 8);
|
target_complete_cmd_with_length(cmd, GOOD, 8 + lun_count * 8);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -68,7 +68,7 @@ struct power_table {
|
|||||||
* registered cooling device.
|
* registered cooling device.
|
||||||
* @cpufreq_state: integer value representing the current state of cpufreq
|
* @cpufreq_state: integer value representing the current state of cpufreq
|
||||||
* cooling devices.
|
* cooling devices.
|
||||||
* @cpufreq_val: integer value representing the absolute value of the clipped
|
* @clipped_freq: integer value representing the absolute value of the clipped
|
||||||
* frequency.
|
* frequency.
|
||||||
* @max_level: maximum cooling level. One less than total number of valid
|
* @max_level: maximum cooling level. One less than total number of valid
|
||||||
* cpufreq frequencies.
|
* cpufreq frequencies.
|
||||||
@ -91,7 +91,7 @@ struct cpufreq_cooling_device {
|
|||||||
int id;
|
int id;
|
||||||
struct thermal_cooling_device *cool_dev;
|
struct thermal_cooling_device *cool_dev;
|
||||||
unsigned int cpufreq_state;
|
unsigned int cpufreq_state;
|
||||||
unsigned int cpufreq_val;
|
unsigned int clipped_freq;
|
||||||
unsigned int max_level;
|
unsigned int max_level;
|
||||||
unsigned int *freq_table; /* In descending order */
|
unsigned int *freq_table; /* In descending order */
|
||||||
struct cpumask allowed_cpus;
|
struct cpumask allowed_cpus;
|
||||||
@ -107,6 +107,9 @@ struct cpufreq_cooling_device {
|
|||||||
static DEFINE_IDR(cpufreq_idr);
|
static DEFINE_IDR(cpufreq_idr);
|
||||||
static DEFINE_MUTEX(cooling_cpufreq_lock);
|
static DEFINE_MUTEX(cooling_cpufreq_lock);
|
||||||
|
|
||||||
|
static unsigned int cpufreq_dev_count;
|
||||||
|
|
||||||
|
static DEFINE_MUTEX(cooling_list_lock);
|
||||||
static LIST_HEAD(cpufreq_dev_list);
|
static LIST_HEAD(cpufreq_dev_list);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -185,14 +188,14 @@ unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq)
|
|||||||
{
|
{
|
||||||
struct cpufreq_cooling_device *cpufreq_dev;
|
struct cpufreq_cooling_device *cpufreq_dev;
|
||||||
|
|
||||||
mutex_lock(&cooling_cpufreq_lock);
|
mutex_lock(&cooling_list_lock);
|
||||||
list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
|
list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
|
||||||
if (cpumask_test_cpu(cpu, &cpufreq_dev->allowed_cpus)) {
|
if (cpumask_test_cpu(cpu, &cpufreq_dev->allowed_cpus)) {
|
||||||
mutex_unlock(&cooling_cpufreq_lock);
|
mutex_unlock(&cooling_list_lock);
|
||||||
return get_level(cpufreq_dev, freq);
|
return get_level(cpufreq_dev, freq);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
mutex_unlock(&cooling_cpufreq_lock);
|
mutex_unlock(&cooling_list_lock);
|
||||||
|
|
||||||
pr_err("%s: cpu:%d not part of any cooling device\n", __func__, cpu);
|
pr_err("%s: cpu:%d not part of any cooling device\n", __func__, cpu);
|
||||||
return THERMAL_CSTATE_INVALID;
|
return THERMAL_CSTATE_INVALID;
|
||||||
@ -215,29 +218,35 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb,
|
|||||||
unsigned long event, void *data)
|
unsigned long event, void *data)
|
||||||
{
|
{
|
||||||
struct cpufreq_policy *policy = data;
|
struct cpufreq_policy *policy = data;
|
||||||
unsigned long max_freq = 0;
|
unsigned long clipped_freq;
|
||||||
struct cpufreq_cooling_device *cpufreq_dev;
|
struct cpufreq_cooling_device *cpufreq_dev;
|
||||||
|
|
||||||
switch (event) {
|
if (event != CPUFREQ_ADJUST)
|
||||||
|
|
||||||
case CPUFREQ_ADJUST:
|
|
||||||
mutex_lock(&cooling_cpufreq_lock);
|
|
||||||
list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
|
|
||||||
if (!cpumask_test_cpu(policy->cpu,
|
|
||||||
&cpufreq_dev->allowed_cpus))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
max_freq = cpufreq_dev->cpufreq_val;
|
|
||||||
|
|
||||||
if (policy->max != max_freq)
|
|
||||||
cpufreq_verify_within_limits(policy, 0,
|
|
||||||
max_freq);
|
|
||||||
}
|
|
||||||
mutex_unlock(&cooling_cpufreq_lock);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
return NOTIFY_DONE;
|
return NOTIFY_DONE;
|
||||||
|
|
||||||
|
mutex_lock(&cooling_list_lock);
|
||||||
|
list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
|
||||||
|
if (!cpumask_test_cpu(policy->cpu, &cpufreq_dev->allowed_cpus))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* policy->max is the maximum allowed frequency defined by user
|
||||||
|
* and clipped_freq is the maximum that thermal constraints
|
||||||
|
* allow.
|
||||||
|
*
|
||||||
|
* If clipped_freq is lower than policy->max, then we need to
|
||||||
|
* readjust policy->max.
|
||||||
|
*
|
||||||
|
* But, if clipped_freq is greater than policy->max, we don't
|
||||||
|
* need to do anything.
|
||||||
|
*/
|
||||||
|
clipped_freq = cpufreq_dev->clipped_freq;
|
||||||
|
|
||||||
|
if (policy->max > clipped_freq)
|
||||||
|
cpufreq_verify_within_limits(policy, 0, clipped_freq);
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
mutex_unlock(&cooling_list_lock);
|
||||||
|
|
||||||
return NOTIFY_OK;
|
return NOTIFY_OK;
|
||||||
}
|
}
|
||||||
@ -519,7 +528,7 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
|
|||||||
|
|
||||||
clip_freq = cpufreq_device->freq_table[state];
|
clip_freq = cpufreq_device->freq_table[state];
|
||||||
cpufreq_device->cpufreq_state = state;
|
cpufreq_device->cpufreq_state = state;
|
||||||
cpufreq_device->cpufreq_val = clip_freq;
|
cpufreq_device->clipped_freq = clip_freq;
|
||||||
|
|
||||||
cpufreq_update_policy(cpu);
|
cpufreq_update_policy(cpu);
|
||||||
|
|
||||||
@ -861,17 +870,19 @@ __cpufreq_cooling_register(struct device_node *np,
|
|||||||
pr_debug("%s: freq:%u KHz\n", __func__, freq);
|
pr_debug("%s: freq:%u KHz\n", __func__, freq);
|
||||||
}
|
}
|
||||||
|
|
||||||
cpufreq_dev->cpufreq_val = cpufreq_dev->freq_table[0];
|
cpufreq_dev->clipped_freq = cpufreq_dev->freq_table[0];
|
||||||
cpufreq_dev->cool_dev = cool_dev;
|
cpufreq_dev->cool_dev = cool_dev;
|
||||||
|
|
||||||
mutex_lock(&cooling_cpufreq_lock);
|
mutex_lock(&cooling_cpufreq_lock);
|
||||||
|
|
||||||
|
mutex_lock(&cooling_list_lock);
|
||||||
|
list_add(&cpufreq_dev->node, &cpufreq_dev_list);
|
||||||
|
mutex_unlock(&cooling_list_lock);
|
||||||
|
|
||||||
/* Register the notifier for first cpufreq cooling device */
|
/* Register the notifier for first cpufreq cooling device */
|
||||||
if (list_empty(&cpufreq_dev_list))
|
if (!cpufreq_dev_count++)
|
||||||
cpufreq_register_notifier(&thermal_cpufreq_notifier_block,
|
cpufreq_register_notifier(&thermal_cpufreq_notifier_block,
|
||||||
CPUFREQ_POLICY_NOTIFIER);
|
CPUFREQ_POLICY_NOTIFIER);
|
||||||
list_add(&cpufreq_dev->node, &cpufreq_dev_list);
|
|
||||||
|
|
||||||
mutex_unlock(&cooling_cpufreq_lock);
|
mutex_unlock(&cooling_cpufreq_lock);
|
||||||
|
|
||||||
return cool_dev;
|
return cool_dev;
|
||||||
@ -1013,13 +1024,17 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
cpufreq_dev = cdev->devdata;
|
cpufreq_dev = cdev->devdata;
|
||||||
mutex_lock(&cooling_cpufreq_lock);
|
|
||||||
list_del(&cpufreq_dev->node);
|
|
||||||
|
|
||||||
/* Unregister the notifier for the last cpufreq cooling device */
|
/* Unregister the notifier for the last cpufreq cooling device */
|
||||||
if (list_empty(&cpufreq_dev_list))
|
mutex_lock(&cooling_cpufreq_lock);
|
||||||
|
if (!--cpufreq_dev_count)
|
||||||
cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block,
|
cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block,
|
||||||
CPUFREQ_POLICY_NOTIFIER);
|
CPUFREQ_POLICY_NOTIFIER);
|
||||||
|
|
||||||
|
mutex_lock(&cooling_list_lock);
|
||||||
|
list_del(&cpufreq_dev->node);
|
||||||
|
mutex_unlock(&cooling_list_lock);
|
||||||
|
|
||||||
mutex_unlock(&cooling_cpufreq_lock);
|
mutex_unlock(&cooling_cpufreq_lock);
|
||||||
|
|
||||||
thermal_cooling_device_unregister(cpufreq_dev->cool_dev);
|
thermal_cooling_device_unregister(cpufreq_dev->cool_dev);
|
||||||
|
@ -334,7 +334,7 @@ static int allocate_power(struct thermal_zone_device *tz,
|
|||||||
max_allocatable_power, current_temp,
|
max_allocatable_power, current_temp,
|
||||||
(s32)control_temp - (s32)current_temp);
|
(s32)control_temp - (s32)current_temp);
|
||||||
|
|
||||||
devm_kfree(&tz->device, req_power);
|
kfree(req_power);
|
||||||
unlock:
|
unlock:
|
||||||
mutex_unlock(&tz->lock);
|
mutex_unlock(&tz->lock);
|
||||||
|
|
||||||
@ -426,7 +426,7 @@ static int power_allocator_bind(struct thermal_zone_device *tz)
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
params = devm_kzalloc(&tz->device, sizeof(*params), GFP_KERNEL);
|
params = kzalloc(sizeof(*params), GFP_KERNEL);
|
||||||
if (!params)
|
if (!params)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
@ -468,14 +468,14 @@ static int power_allocator_bind(struct thermal_zone_device *tz)
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
free:
|
free:
|
||||||
devm_kfree(&tz->device, params);
|
kfree(params);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void power_allocator_unbind(struct thermal_zone_device *tz)
|
static void power_allocator_unbind(struct thermal_zone_device *tz)
|
||||||
{
|
{
|
||||||
dev_dbg(&tz->device, "Unbinding from thermal zone %d\n", tz->id);
|
dev_dbg(&tz->device, "Unbinding from thermal zone %d\n", tz->id);
|
||||||
devm_kfree(&tz->device, tz->governor_data);
|
kfree(tz->governor_data);
|
||||||
tz->governor_data = NULL;
|
tz->governor_data = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2246,7 +2246,15 @@ static long fuse_dev_ioctl(struct file *file, unsigned int cmd,
|
|||||||
|
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
if (old) {
|
if (old) {
|
||||||
struct fuse_dev *fud = fuse_get_dev(old);
|
struct fuse_dev *fud = NULL;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Check against file->f_op because CUSE
|
||||||
|
* uses the same ioctl handler.
|
||||||
|
*/
|
||||||
|
if (old->f_op == file->f_op &&
|
||||||
|
old->f_cred->user_ns == file->f_cred->user_ns)
|
||||||
|
fud = fuse_get_dev(old);
|
||||||
|
|
||||||
if (fud) {
|
if (fud) {
|
||||||
mutex_lock(&fuse_mutex);
|
mutex_lock(&fuse_mutex);
|
||||||
|
@ -743,8 +743,6 @@ struct drm_connector {
|
|||||||
uint8_t num_h_tile, num_v_tile;
|
uint8_t num_h_tile, num_v_tile;
|
||||||
uint8_t tile_h_loc, tile_v_loc;
|
uint8_t tile_h_loc, tile_v_loc;
|
||||||
uint16_t tile_h_size, tile_v_size;
|
uint16_t tile_h_size, tile_v_size;
|
||||||
|
|
||||||
struct list_head destroy_list;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -385,8 +385,6 @@ enum {
|
|||||||
SATA_SSP = 0x06, /* Software Settings Preservation */
|
SATA_SSP = 0x06, /* Software Settings Preservation */
|
||||||
SATA_DEVSLP = 0x09, /* Device Sleep */
|
SATA_DEVSLP = 0x09, /* Device Sleep */
|
||||||
|
|
||||||
SETFEATURE_SENSE_DATA = 0xC3, /* Sense Data Reporting feature */
|
|
||||||
|
|
||||||
/* feature values for SET_MAX */
|
/* feature values for SET_MAX */
|
||||||
ATA_SET_MAX_ADDR = 0x00,
|
ATA_SET_MAX_ADDR = 0x00,
|
||||||
ATA_SET_MAX_PASSWD = 0x01,
|
ATA_SET_MAX_PASSWD = 0x01,
|
||||||
@ -530,8 +528,6 @@ struct ata_bmdma_prd {
|
|||||||
#define ata_id_cdb_intr(id) (((id)[ATA_ID_CONFIG] & 0x60) == 0x20)
|
#define ata_id_cdb_intr(id) (((id)[ATA_ID_CONFIG] & 0x60) == 0x20)
|
||||||
#define ata_id_has_da(id) ((id)[ATA_ID_SATA_CAPABILITY_2] & (1 << 4))
|
#define ata_id_has_da(id) ((id)[ATA_ID_SATA_CAPABILITY_2] & (1 << 4))
|
||||||
#define ata_id_has_devslp(id) ((id)[ATA_ID_FEATURE_SUPP] & (1 << 8))
|
#define ata_id_has_devslp(id) ((id)[ATA_ID_FEATURE_SUPP] & (1 << 8))
|
||||||
#define ata_id_has_ncq_autosense(id) \
|
|
||||||
((id)[ATA_ID_FEATURE_SUPP] & (1 << 7))
|
|
||||||
|
|
||||||
static inline bool ata_id_has_hipm(const u16 *id)
|
static inline bool ata_id_has_hipm(const u16 *id)
|
||||||
{
|
{
|
||||||
@ -720,20 +716,6 @@ static inline bool ata_id_has_read_log_dma_ext(const u16 *id)
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool ata_id_has_sense_reporting(const u16 *id)
|
|
||||||
{
|
|
||||||
if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15)))
|
|
||||||
return false;
|
|
||||||
return id[ATA_ID_COMMAND_SET_3] & (1 << 6);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline bool ata_id_sense_reporting_enabled(const u16 *id)
|
|
||||||
{
|
|
||||||
if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15)))
|
|
||||||
return false;
|
|
||||||
return id[ATA_ID_COMMAND_SET_4] & (1 << 6);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ata_id_major_version - get ATA level of drive
|
* ata_id_major_version - get ATA level of drive
|
||||||
* @id: Identify data
|
* @id: Identify data
|
||||||
|
@ -28,7 +28,6 @@ extern int scsi_get_sense_info_fld(const u8 * sense_buffer, int sb_len,
|
|||||||
u64 * info_out);
|
u64 * info_out);
|
||||||
|
|
||||||
extern void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq);
|
extern void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq);
|
||||||
extern void scsi_set_sense_information(u8 *buf, u64 info);
|
|
||||||
|
|
||||||
extern int scsi_ioctl_reset(struct scsi_device *, int __user *);
|
extern int scsi_ioctl_reset(struct scsi_device *, int __user *);
|
||||||
|
|
||||||
|
@ -141,6 +141,8 @@ struct snd_soc_tplg_ops {
|
|||||||
int io_ops_count;
|
int io_ops_count;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#ifdef CONFIG_SND_SOC_TOPOLOGY
|
||||||
|
|
||||||
/* gets a pointer to data from the firmware block header */
|
/* gets a pointer to data from the firmware block header */
|
||||||
static inline const void *snd_soc_tplg_get_data(struct snd_soc_tplg_hdr *hdr)
|
static inline const void *snd_soc_tplg_get_data(struct snd_soc_tplg_hdr *hdr)
|
||||||
{
|
{
|
||||||
@ -165,4 +167,14 @@ int snd_soc_tplg_widget_bind_event(struct snd_soc_dapm_widget *w,
|
|||||||
const struct snd_soc_tplg_widget_events *events, int num_events,
|
const struct snd_soc_tplg_widget_events *events, int num_events,
|
||||||
u16 event_type);
|
u16 event_type);
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
static inline int snd_soc_tplg_component_remove(struct snd_soc_component *comp,
|
||||||
|
u32 index)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -18,6 +18,12 @@
|
|||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
#include <sound/asound.h>
|
#include <sound/asound.h>
|
||||||
|
|
||||||
|
#ifndef __KERNEL__
|
||||||
|
#error This API is an early revision and not enabled in the current
|
||||||
|
#error kernel release, it will be enabled in a future kernel version
|
||||||
|
#error with incompatible changes to what is here.
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Maximum number of channels topology kcontrol can represent.
|
* Maximum number of channels topology kcontrol can represent.
|
||||||
*/
|
*/
|
||||||
|
49
ipc/sem.c
49
ipc/sem.c
@ -252,6 +252,16 @@ static void sem_rcu_free(struct rcu_head *head)
|
|||||||
ipc_rcu_free(head);
|
ipc_rcu_free(head);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* spin_unlock_wait() and !spin_is_locked() are not memory barriers, they
|
||||||
|
* are only control barriers.
|
||||||
|
* The code must pair with spin_unlock(&sem->lock) or
|
||||||
|
* spin_unlock(&sem_perm.lock), thus just the control barrier is insufficient.
|
||||||
|
*
|
||||||
|
* smp_rmb() is sufficient, as writes cannot pass the control barrier.
|
||||||
|
*/
|
||||||
|
#define ipc_smp_acquire__after_spin_is_unlocked() smp_rmb()
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Wait until all currently ongoing simple ops have completed.
|
* Wait until all currently ongoing simple ops have completed.
|
||||||
* Caller must own sem_perm.lock.
|
* Caller must own sem_perm.lock.
|
||||||
@ -275,6 +285,7 @@ static void sem_wait_array(struct sem_array *sma)
|
|||||||
sem = sma->sem_base + i;
|
sem = sma->sem_base + i;
|
||||||
spin_unlock_wait(&sem->lock);
|
spin_unlock_wait(&sem->lock);
|
||||||
}
|
}
|
||||||
|
ipc_smp_acquire__after_spin_is_unlocked();
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -327,13 +338,12 @@ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
|
|||||||
/* Then check that the global lock is free */
|
/* Then check that the global lock is free */
|
||||||
if (!spin_is_locked(&sma->sem_perm.lock)) {
|
if (!spin_is_locked(&sma->sem_perm.lock)) {
|
||||||
/*
|
/*
|
||||||
* The ipc object lock check must be visible on all
|
* We need a memory barrier with acquire semantics,
|
||||||
* cores before rechecking the complex count. Otherwise
|
* otherwise we can race with another thread that does:
|
||||||
* we can race with another thread that does:
|
|
||||||
* complex_count++;
|
* complex_count++;
|
||||||
* spin_unlock(sem_perm.lock);
|
* spin_unlock(sem_perm.lock);
|
||||||
*/
|
*/
|
||||||
smp_rmb();
|
ipc_smp_acquire__after_spin_is_unlocked();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Now repeat the test of complex_count:
|
* Now repeat the test of complex_count:
|
||||||
@ -2074,17 +2084,28 @@ void exit_sem(struct task_struct *tsk)
|
|||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
un = list_entry_rcu(ulp->list_proc.next,
|
un = list_entry_rcu(ulp->list_proc.next,
|
||||||
struct sem_undo, list_proc);
|
struct sem_undo, list_proc);
|
||||||
if (&un->list_proc == &ulp->list_proc)
|
if (&un->list_proc == &ulp->list_proc) {
|
||||||
semid = -1;
|
/*
|
||||||
else
|
* We must wait for freeary() before freeing this ulp,
|
||||||
semid = un->semid;
|
* in case we raced with last sem_undo. There is a small
|
||||||
|
* possibility where we exit while freeary() didn't
|
||||||
if (semid == -1) {
|
* finish unlocking sem_undo_list.
|
||||||
|
*/
|
||||||
|
spin_unlock_wait(&ulp->lock);
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
spin_lock(&ulp->lock);
|
||||||
|
semid = un->semid;
|
||||||
|
spin_unlock(&ulp->lock);
|
||||||
|
|
||||||
sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, un->semid);
|
/* exit_sem raced with IPC_RMID, nothing to do */
|
||||||
|
if (semid == -1) {
|
||||||
|
rcu_read_unlock();
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, semid);
|
||||||
/* exit_sem raced with IPC_RMID, nothing to do */
|
/* exit_sem raced with IPC_RMID, nothing to do */
|
||||||
if (IS_ERR(sma)) {
|
if (IS_ERR(sma)) {
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
@ -2112,9 +2133,11 @@ void exit_sem(struct task_struct *tsk)
|
|||||||
ipc_assert_locked_object(&sma->sem_perm);
|
ipc_assert_locked_object(&sma->sem_perm);
|
||||||
list_del(&un->list_id);
|
list_del(&un->list_id);
|
||||||
|
|
||||||
spin_lock(&ulp->lock);
|
/* we are the last process using this ulp, acquiring ulp->lock
|
||||||
|
* isn't required. Besides that, we are also protected against
|
||||||
|
* IPC_RMID as we hold sma->sem_perm lock now
|
||||||
|
*/
|
||||||
list_del_rcu(&un->list_proc);
|
list_del_rcu(&un->list_proc);
|
||||||
spin_unlock(&ulp->lock);
|
|
||||||
|
|
||||||
/* perform adjustments registered in un */
|
/* perform adjustments registered in un */
|
||||||
for (i = 0; i < sma->sem_nsems; i++) {
|
for (i = 0; i < sma->sem_nsems; i++) {
|
||||||
|
@ -1223,7 +1223,7 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
|
|||||||
spin_unlock_irq(&callback_lock);
|
spin_unlock_irq(&callback_lock);
|
||||||
|
|
||||||
/* use trialcs->mems_allowed as a temp variable */
|
/* use trialcs->mems_allowed as a temp variable */
|
||||||
update_nodemasks_hier(cs, &cs->mems_allowed);
|
update_nodemasks_hier(cs, &trialcs->mems_allowed);
|
||||||
done:
|
done:
|
||||||
return retval;
|
return retval;
|
||||||
}
|
}
|
||||||
|
@ -1868,8 +1868,6 @@ event_sched_in(struct perf_event *event,
|
|||||||
|
|
||||||
perf_pmu_disable(event->pmu);
|
perf_pmu_disable(event->pmu);
|
||||||
|
|
||||||
event->tstamp_running += tstamp - event->tstamp_stopped;
|
|
||||||
|
|
||||||
perf_set_shadow_time(event, ctx, tstamp);
|
perf_set_shadow_time(event, ctx, tstamp);
|
||||||
|
|
||||||
perf_log_itrace_start(event);
|
perf_log_itrace_start(event);
|
||||||
@ -1881,6 +1879,8 @@ event_sched_in(struct perf_event *event,
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
event->tstamp_running += tstamp - event->tstamp_stopped;
|
||||||
|
|
||||||
if (!is_software_event(event))
|
if (!is_software_event(event))
|
||||||
cpuctx->active_oncpu++;
|
cpuctx->active_oncpu++;
|
||||||
if (!ctx->nr_active++)
|
if (!ctx->nr_active++)
|
||||||
@ -4011,28 +4011,21 @@ static void perf_event_for_each(struct perf_event *event,
|
|||||||
perf_event_for_each_child(sibling, func);
|
perf_event_for_each_child(sibling, func);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int perf_event_period(struct perf_event *event, u64 __user *arg)
|
struct period_event {
|
||||||
{
|
struct perf_event *event;
|
||||||
struct perf_event_context *ctx = event->ctx;
|
|
||||||
int ret = 0, active;
|
|
||||||
u64 value;
|
u64 value;
|
||||||
|
};
|
||||||
|
|
||||||
if (!is_sampling_event(event))
|
static int __perf_event_period(void *info)
|
||||||
return -EINVAL;
|
{
|
||||||
|
struct period_event *pe = info;
|
||||||
|
struct perf_event *event = pe->event;
|
||||||
|
struct perf_event_context *ctx = event->ctx;
|
||||||
|
u64 value = pe->value;
|
||||||
|
bool active;
|
||||||
|
|
||||||
if (copy_from_user(&value, arg, sizeof(value)))
|
raw_spin_lock(&ctx->lock);
|
||||||
return -EFAULT;
|
|
||||||
|
|
||||||
if (!value)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
raw_spin_lock_irq(&ctx->lock);
|
|
||||||
if (event->attr.freq) {
|
if (event->attr.freq) {
|
||||||
if (value > sysctl_perf_event_sample_rate) {
|
|
||||||
ret = -EINVAL;
|
|
||||||
goto unlock;
|
|
||||||
}
|
|
||||||
|
|
||||||
event->attr.sample_freq = value;
|
event->attr.sample_freq = value;
|
||||||
} else {
|
} else {
|
||||||
event->attr.sample_period = value;
|
event->attr.sample_period = value;
|
||||||
@ -4051,11 +4044,53 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
|
|||||||
event->pmu->start(event, PERF_EF_RELOAD);
|
event->pmu->start(event, PERF_EF_RELOAD);
|
||||||
perf_pmu_enable(ctx->pmu);
|
perf_pmu_enable(ctx->pmu);
|
||||||
}
|
}
|
||||||
|
raw_spin_unlock(&ctx->lock);
|
||||||
|
|
||||||
unlock:
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int perf_event_period(struct perf_event *event, u64 __user *arg)
|
||||||
|
{
|
||||||
|
struct period_event pe = { .event = event, };
|
||||||
|
struct perf_event_context *ctx = event->ctx;
|
||||||
|
struct task_struct *task;
|
||||||
|
u64 value;
|
||||||
|
|
||||||
|
if (!is_sampling_event(event))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (copy_from_user(&value, arg, sizeof(value)))
|
||||||
|
return -EFAULT;
|
||||||
|
|
||||||
|
if (!value)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (event->attr.freq && value > sysctl_perf_event_sample_rate)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
task = ctx->task;
|
||||||
|
pe.value = value;
|
||||||
|
|
||||||
|
if (!task) {
|
||||||
|
cpu_function_call(event->cpu, __perf_event_period, &pe);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
retry:
|
||||||
|
if (!task_function_call(task, __perf_event_period, &pe))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
raw_spin_lock_irq(&ctx->lock);
|
||||||
|
if (ctx->is_active) {
|
||||||
|
raw_spin_unlock_irq(&ctx->lock);
|
||||||
|
task = ctx->task;
|
||||||
|
goto retry;
|
||||||
|
}
|
||||||
|
|
||||||
|
__perf_event_period(&pe);
|
||||||
raw_spin_unlock_irq(&ctx->lock);
|
raw_spin_unlock_irq(&ctx->lock);
|
||||||
|
|
||||||
return ret;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct file_operations perf_fops;
|
static const struct file_operations perf_fops;
|
||||||
@ -4793,12 +4828,20 @@ static const struct file_operations perf_fops = {
|
|||||||
* to user-space before waking everybody up.
|
* to user-space before waking everybody up.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
static inline struct fasync_struct **perf_event_fasync(struct perf_event *event)
|
||||||
|
{
|
||||||
|
/* only the parent has fasync state */
|
||||||
|
if (event->parent)
|
||||||
|
event = event->parent;
|
||||||
|
return &event->fasync;
|
||||||
|
}
|
||||||
|
|
||||||
void perf_event_wakeup(struct perf_event *event)
|
void perf_event_wakeup(struct perf_event *event)
|
||||||
{
|
{
|
||||||
ring_buffer_wakeup(event);
|
ring_buffer_wakeup(event);
|
||||||
|
|
||||||
if (event->pending_kill) {
|
if (event->pending_kill) {
|
||||||
kill_fasync(&event->fasync, SIGIO, event->pending_kill);
|
kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill);
|
||||||
event->pending_kill = 0;
|
event->pending_kill = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -6177,7 +6220,7 @@ static int __perf_event_overflow(struct perf_event *event,
|
|||||||
else
|
else
|
||||||
perf_event_output(event, data, regs);
|
perf_event_output(event, data, regs);
|
||||||
|
|
||||||
if (event->fasync && event->pending_kill) {
|
if (*perf_event_fasync(event) && event->pending_kill) {
|
||||||
event->pending_wakeup = 1;
|
event->pending_wakeup = 1;
|
||||||
irq_work_queue(&event->pending);
|
irq_work_queue(&event->pending);
|
||||||
}
|
}
|
||||||
|
@ -559,11 +559,13 @@ static void __rb_free_aux(struct ring_buffer *rb)
|
|||||||
rb->aux_priv = NULL;
|
rb->aux_priv = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (pg = 0; pg < rb->aux_nr_pages; pg++)
|
if (rb->aux_nr_pages) {
|
||||||
rb_free_aux_page(rb, pg);
|
for (pg = 0; pg < rb->aux_nr_pages; pg++)
|
||||||
|
rb_free_aux_page(rb, pg);
|
||||||
|
|
||||||
kfree(rb->aux_pages);
|
kfree(rb->aux_pages);
|
||||||
rb->aux_nr_pages = 0;
|
rb->aux_nr_pages = 0;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void rb_free_aux(struct ring_buffer *rb)
|
void rb_free_aux(struct ring_buffer *rb)
|
||||||
|
@ -4,6 +4,7 @@
|
|||||||
|
|
||||||
#include <linux/hash.h>
|
#include <linux/hash.h>
|
||||||
#include <linux/bootmem.h>
|
#include <linux/bootmem.h>
|
||||||
|
#include <linux/debug_locks.h>
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Implement paravirt qspinlocks; the general idea is to halt the vcpus instead
|
* Implement paravirt qspinlocks; the general idea is to halt the vcpus instead
|
||||||
@ -286,15 +287,23 @@ __visible void __pv_queued_spin_unlock(struct qspinlock *lock)
|
|||||||
{
|
{
|
||||||
struct __qspinlock *l = (void *)lock;
|
struct __qspinlock *l = (void *)lock;
|
||||||
struct pv_node *node;
|
struct pv_node *node;
|
||||||
|
u8 lockval = cmpxchg(&l->locked, _Q_LOCKED_VAL, 0);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We must not unlock if SLOW, because in that case we must first
|
* We must not unlock if SLOW, because in that case we must first
|
||||||
* unhash. Otherwise it would be possible to have multiple @lock
|
* unhash. Otherwise it would be possible to have multiple @lock
|
||||||
* entries, which would be BAD.
|
* entries, which would be BAD.
|
||||||
*/
|
*/
|
||||||
if (likely(cmpxchg(&l->locked, _Q_LOCKED_VAL, 0) == _Q_LOCKED_VAL))
|
if (likely(lockval == _Q_LOCKED_VAL))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
if (unlikely(lockval != _Q_SLOW_VAL)) {
|
||||||
|
if (debug_locks_silent)
|
||||||
|
return;
|
||||||
|
WARN(1, "pvqspinlock: lock %p has corrupted value 0x%x!\n", lock, atomic_read(&lock->val));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Since the above failed to release, this must be the SLOW path.
|
* Since the above failed to release, this must be the SLOW path.
|
||||||
* Therefore start by looking up the blocked node and unhashing it.
|
* Therefore start by looking up the blocked node and unhashing it.
|
||||||
|
2
mm/cma.h
2
mm/cma.h
@ -16,7 +16,7 @@ struct cma {
|
|||||||
extern struct cma cma_areas[MAX_CMA_AREAS];
|
extern struct cma cma_areas[MAX_CMA_AREAS];
|
||||||
extern unsigned cma_area_count;
|
extern unsigned cma_area_count;
|
||||||
|
|
||||||
static unsigned long cma_bitmap_maxno(struct cma *cma)
|
static inline unsigned long cma_bitmap_maxno(struct cma *cma)
|
||||||
{
|
{
|
||||||
return cma->count >> cma->order_per_bit;
|
return cma->count >> cma->order_per_bit;
|
||||||
}
|
}
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
* This file contains shadow memory manipulation code.
|
* This file contains shadow memory manipulation code.
|
||||||
*
|
*
|
||||||
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
|
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
|
||||||
* Author: Andrey Ryabinin <a.ryabinin@samsung.com>
|
* Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
|
||||||
*
|
*
|
||||||
* Some of code borrowed from https://github.com/xairy/linux by
|
* Some of code borrowed from https://github.com/xairy/linux by
|
||||||
* Andrey Konovalov <adech.fo@gmail.com>
|
* Andrey Konovalov <adech.fo@gmail.com>
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
* This file contains error reporting code.
|
* This file contains error reporting code.
|
||||||
*
|
*
|
||||||
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
|
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
|
||||||
* Author: Andrey Ryabinin <a.ryabinin@samsung.com>
|
* Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
|
||||||
*
|
*
|
||||||
* Some of code borrowed from https://github.com/xairy/linux by
|
* Some of code borrowed from https://github.com/xairy/linux by
|
||||||
* Andrey Konovalov <adech.fo@gmail.com>
|
* Andrey Konovalov <adech.fo@gmail.com>
|
||||||
|
@ -1146,8 +1146,11 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (!PageHuge(p) && PageTransHuge(hpage)) {
|
if (!PageHuge(p) && PageTransHuge(hpage)) {
|
||||||
if (unlikely(split_huge_page(hpage))) {
|
if (!PageAnon(hpage) || unlikely(split_huge_page(hpage))) {
|
||||||
pr_err("MCE: %#lx: thp split failed\n", pfn);
|
if (!PageAnon(hpage))
|
||||||
|
pr_err("MCE: %#lx: non anonymous thp\n", pfn);
|
||||||
|
else
|
||||||
|
pr_err("MCE: %#lx: thp split failed\n", pfn);
|
||||||
if (TestClearPageHWPoison(p))
|
if (TestClearPageHWPoison(p))
|
||||||
atomic_long_sub(nr_pages, &num_poisoned_pages);
|
atomic_long_sub(nr_pages, &num_poisoned_pages);
|
||||||
put_page(p);
|
put_page(p);
|
||||||
@ -1538,6 +1541,8 @@ static int get_any_page(struct page *page, unsigned long pfn, int flags)
|
|||||||
*/
|
*/
|
||||||
ret = __get_any_page(page, pfn, 0);
|
ret = __get_any_page(page, pfn, 0);
|
||||||
if (!PageLRU(page)) {
|
if (!PageLRU(page)) {
|
||||||
|
/* Drop page reference which is from __get_any_page() */
|
||||||
|
put_page(page);
|
||||||
pr_info("soft_offline: %#lx: unknown non LRU page type %lx\n",
|
pr_info("soft_offline: %#lx: unknown non LRU page type %lx\n",
|
||||||
pfn, page->flags);
|
pfn, page->flags);
|
||||||
return -EIO;
|
return -EIO;
|
||||||
@ -1567,13 +1572,12 @@ static int soft_offline_huge_page(struct page *page, int flags)
|
|||||||
unlock_page(hpage);
|
unlock_page(hpage);
|
||||||
|
|
||||||
ret = isolate_huge_page(hpage, &pagelist);
|
ret = isolate_huge_page(hpage, &pagelist);
|
||||||
if (ret) {
|
/*
|
||||||
/*
|
* get_any_page() and isolate_huge_page() takes a refcount each,
|
||||||
* get_any_page() and isolate_huge_page() takes a refcount each,
|
* so need to drop one here.
|
||||||
* so need to drop one here.
|
*/
|
||||||
*/
|
put_page(hpage);
|
||||||
put_page(hpage);
|
if (!ret) {
|
||||||
} else {
|
|
||||||
pr_info("soft offline: %#lx hugepage failed to isolate\n", pfn);
|
pr_info("soft offline: %#lx hugepage failed to isolate\n", pfn);
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
}
|
}
|
||||||
|
@ -1277,6 +1277,7 @@ int __ref add_memory(int nid, u64 start, u64 size)
|
|||||||
|
|
||||||
/* create new memmap entry */
|
/* create new memmap entry */
|
||||||
firmware_map_add_hotplug(start, start + size, "System RAM");
|
firmware_map_add_hotplug(start, start + size, "System RAM");
|
||||||
|
memblock_add_node(start, size, nid);
|
||||||
|
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
@ -2013,6 +2014,8 @@ void __ref remove_memory(int nid, u64 start, u64 size)
|
|||||||
|
|
||||||
/* remove memmap entry */
|
/* remove memmap entry */
|
||||||
firmware_map_remove(start, start + size, "System RAM");
|
firmware_map_remove(start, start + size, "System RAM");
|
||||||
|
memblock_free(start, size);
|
||||||
|
memblock_remove(start, size);
|
||||||
|
|
||||||
arch_remove_memory(start, size);
|
arch_remove_memory(start, size);
|
||||||
|
|
||||||
|
@ -5060,6 +5060,10 @@ static unsigned long __meminit zone_spanned_pages_in_node(int nid,
|
|||||||
{
|
{
|
||||||
unsigned long zone_start_pfn, zone_end_pfn;
|
unsigned long zone_start_pfn, zone_end_pfn;
|
||||||
|
|
||||||
|
/* When hotadd a new node, the node should be empty */
|
||||||
|
if (!node_start_pfn && !node_end_pfn)
|
||||||
|
return 0;
|
||||||
|
|
||||||
/* Get the start and end of the zone */
|
/* Get the start and end of the zone */
|
||||||
zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
|
zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
|
||||||
zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
|
zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
|
||||||
@ -5123,6 +5127,10 @@ static unsigned long __meminit zone_absent_pages_in_node(int nid,
|
|||||||
unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
|
unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
|
||||||
unsigned long zone_start_pfn, zone_end_pfn;
|
unsigned long zone_start_pfn, zone_end_pfn;
|
||||||
|
|
||||||
|
/* When hotadd a new node, the node should be empty */
|
||||||
|
if (!node_start_pfn && !node_end_pfn)
|
||||||
|
return 0;
|
||||||
|
|
||||||
zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
|
zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
|
||||||
zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
|
zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
|
||||||
|
|
||||||
|
@ -596,8 +596,11 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
|
|||||||
/* increase the refcounter of the related vlan */
|
/* increase the refcounter of the related vlan */
|
||||||
vlan = batadv_softif_vlan_get(bat_priv, vid);
|
vlan = batadv_softif_vlan_get(bat_priv, vid);
|
||||||
if (WARN(!vlan, "adding TT local entry %pM to non-existent VLAN %d",
|
if (WARN(!vlan, "adding TT local entry %pM to non-existent VLAN %d",
|
||||||
addr, BATADV_PRINT_VID(vid)))
|
addr, BATADV_PRINT_VID(vid))) {
|
||||||
|
kfree(tt_local);
|
||||||
|
tt_local = NULL;
|
||||||
goto out;
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
batadv_dbg(BATADV_DBG_TT, bat_priv,
|
batadv_dbg(BATADV_DBG_TT, bat_priv,
|
||||||
"Creating new local tt entry: %pM (vid: %d, ttvn: %d)\n",
|
"Creating new local tt entry: %pM (vid: %d, ttvn: %d)\n",
|
||||||
|
@ -1608,7 +1608,7 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (skb_trimmed)
|
if (skb_trimmed && skb_trimmed != skb)
|
||||||
kfree_skb(skb_trimmed);
|
kfree_skb(skb_trimmed);
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
@ -1653,7 +1653,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (skb_trimmed)
|
if (skb_trimmed && skb_trimmed != skb)
|
||||||
kfree_skb(skb_trimmed);
|
kfree_skb(skb_trimmed);
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
|
@ -4022,8 +4022,8 @@ EXPORT_SYMBOL(skb_checksum_setup);
|
|||||||
* Otherwise returns the provided skb. Returns NULL in error cases
|
* Otherwise returns the provided skb. Returns NULL in error cases
|
||||||
* (e.g. transport_len exceeds skb length or out-of-memory).
|
* (e.g. transport_len exceeds skb length or out-of-memory).
|
||||||
*
|
*
|
||||||
* Caller needs to set the skb transport header and release the returned skb.
|
* Caller needs to set the skb transport header and free any returned skb if it
|
||||||
* Provided skb is consumed.
|
* differs from the provided skb.
|
||||||
*/
|
*/
|
||||||
static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb,
|
static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb,
|
||||||
unsigned int transport_len)
|
unsigned int transport_len)
|
||||||
@ -4032,16 +4032,12 @@ static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb,
|
|||||||
unsigned int len = skb_transport_offset(skb) + transport_len;
|
unsigned int len = skb_transport_offset(skb) + transport_len;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (skb->len < len) {
|
if (skb->len < len)
|
||||||
kfree_skb(skb);
|
|
||||||
return NULL;
|
return NULL;
|
||||||
} else if (skb->len == len) {
|
else if (skb->len == len)
|
||||||
return skb;
|
return skb;
|
||||||
}
|
|
||||||
|
|
||||||
skb_chk = skb_clone(skb, GFP_ATOMIC);
|
skb_chk = skb_clone(skb, GFP_ATOMIC);
|
||||||
kfree_skb(skb);
|
|
||||||
|
|
||||||
if (!skb_chk)
|
if (!skb_chk)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
@ -4066,8 +4062,8 @@ static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb,
|
|||||||
* If the skb has data beyond the given transport length, then a
|
* If the skb has data beyond the given transport length, then a
|
||||||
* trimmed & cloned skb is checked and returned.
|
* trimmed & cloned skb is checked and returned.
|
||||||
*
|
*
|
||||||
* Caller needs to set the skb transport header and release the returned skb.
|
* Caller needs to set the skb transport header and free any returned skb if it
|
||||||
* Provided skb is consumed.
|
* differs from the provided skb.
|
||||||
*/
|
*/
|
||||||
struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
|
struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
|
||||||
unsigned int transport_len,
|
unsigned int transport_len,
|
||||||
@ -4079,23 +4075,26 @@ struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
|
|||||||
|
|
||||||
skb_chk = skb_checksum_maybe_trim(skb, transport_len);
|
skb_chk = skb_checksum_maybe_trim(skb, transport_len);
|
||||||
if (!skb_chk)
|
if (!skb_chk)
|
||||||
return NULL;
|
goto err;
|
||||||
|
|
||||||
if (!pskb_may_pull(skb_chk, offset)) {
|
if (!pskb_may_pull(skb_chk, offset))
|
||||||
kfree_skb(skb_chk);
|
goto err;
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
__skb_pull(skb_chk, offset);
|
__skb_pull(skb_chk, offset);
|
||||||
ret = skb_chkf(skb_chk);
|
ret = skb_chkf(skb_chk);
|
||||||
__skb_push(skb_chk, offset);
|
__skb_push(skb_chk, offset);
|
||||||
|
|
||||||
if (ret) {
|
if (ret)
|
||||||
kfree_skb(skb_chk);
|
goto err;
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
return skb_chk;
|
return skb_chk;
|
||||||
|
|
||||||
|
err:
|
||||||
|
if (skb_chk && skb_chk != skb)
|
||||||
|
kfree_skb(skb_chk);
|
||||||
|
|
||||||
|
return NULL;
|
||||||
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(skb_checksum_trimmed);
|
EXPORT_SYMBOL(skb_checksum_trimmed);
|
||||||
|
|
||||||
|
@ -2468,7 +2468,7 @@ static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter,
|
|||||||
key = l->key + 1;
|
key = l->key + 1;
|
||||||
iter->pos++;
|
iter->pos++;
|
||||||
|
|
||||||
if (pos-- <= 0)
|
if (--pos <= 0)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
l = NULL;
|
l = NULL;
|
||||||
|
@ -1435,33 +1435,35 @@ static int __ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed)
|
|||||||
struct sk_buff *skb_chk;
|
struct sk_buff *skb_chk;
|
||||||
unsigned int transport_len;
|
unsigned int transport_len;
|
||||||
unsigned int len = skb_transport_offset(skb) + sizeof(struct igmphdr);
|
unsigned int len = skb_transport_offset(skb) + sizeof(struct igmphdr);
|
||||||
int ret;
|
int ret = -EINVAL;
|
||||||
|
|
||||||
transport_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb);
|
transport_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb);
|
||||||
|
|
||||||
skb_get(skb);
|
|
||||||
skb_chk = skb_checksum_trimmed(skb, transport_len,
|
skb_chk = skb_checksum_trimmed(skb, transport_len,
|
||||||
ip_mc_validate_checksum);
|
ip_mc_validate_checksum);
|
||||||
if (!skb_chk)
|
if (!skb_chk)
|
||||||
return -EINVAL;
|
goto err;
|
||||||
|
|
||||||
if (!pskb_may_pull(skb_chk, len)) {
|
if (!pskb_may_pull(skb_chk, len))
|
||||||
kfree_skb(skb_chk);
|
goto err;
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = ip_mc_check_igmp_msg(skb_chk);
|
ret = ip_mc_check_igmp_msg(skb_chk);
|
||||||
if (ret) {
|
if (ret)
|
||||||
kfree_skb(skb_chk);
|
goto err;
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (skb_trimmed)
|
if (skb_trimmed)
|
||||||
*skb_trimmed = skb_chk;
|
*skb_trimmed = skb_chk;
|
||||||
else
|
/* free now unneeded clone */
|
||||||
|
else if (skb_chk != skb)
|
||||||
kfree_skb(skb_chk);
|
kfree_skb(skb_chk);
|
||||||
|
|
||||||
return 0;
|
ret = 0;
|
||||||
|
|
||||||
|
err:
|
||||||
|
if (ret && skb_chk && skb_chk != skb)
|
||||||
|
kfree_skb(skb_chk);
|
||||||
|
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -1470,7 +1472,7 @@ static int __ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed)
|
|||||||
* @skb_trimmed: to store an skb pointer trimmed to IPv4 packet tail (optional)
|
* @skb_trimmed: to store an skb pointer trimmed to IPv4 packet tail (optional)
|
||||||
*
|
*
|
||||||
* Checks whether an IPv4 packet is a valid IGMP packet. If so sets
|
* Checks whether an IPv4 packet is a valid IGMP packet. If so sets
|
||||||
* skb network and transport headers accordingly and returns zero.
|
* skb transport header accordingly and returns zero.
|
||||||
*
|
*
|
||||||
* -EINVAL: A broken packet was detected, i.e. it violates some internet
|
* -EINVAL: A broken packet was detected, i.e. it violates some internet
|
||||||
* standard
|
* standard
|
||||||
@ -1485,7 +1487,8 @@ static int __ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed)
|
|||||||
* to leave the original skb and its full frame unchanged (which might be
|
* to leave the original skb and its full frame unchanged (which might be
|
||||||
* desirable for layer 2 frame jugglers).
|
* desirable for layer 2 frame jugglers).
|
||||||
*
|
*
|
||||||
* The caller needs to release a reference count from any returned skb_trimmed.
|
* Caller needs to set the skb network header and free any returned skb if it
|
||||||
|
* differs from the provided skb.
|
||||||
*/
|
*/
|
||||||
int ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed)
|
int ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed)
|
||||||
{
|
{
|
||||||
|
@ -593,7 +593,7 @@ static bool reqsk_queue_unlink(struct request_sock_queue *queue,
|
|||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock(&queue->syn_wait_lock);
|
spin_unlock(&queue->syn_wait_lock);
|
||||||
if (del_timer_sync(&req->rsk_timer))
|
if (timer_pending(&req->rsk_timer) && del_timer_sync(&req->rsk_timer))
|
||||||
reqsk_put(req);
|
reqsk_put(req);
|
||||||
return found;
|
return found;
|
||||||
}
|
}
|
||||||
|
@ -41,8 +41,6 @@ static int tcp_syn_retries_min = 1;
|
|||||||
static int tcp_syn_retries_max = MAX_TCP_SYNCNT;
|
static int tcp_syn_retries_max = MAX_TCP_SYNCNT;
|
||||||
static int ip_ping_group_range_min[] = { 0, 0 };
|
static int ip_ping_group_range_min[] = { 0, 0 };
|
||||||
static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
|
static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
|
||||||
static int min_sndbuf = SOCK_MIN_SNDBUF;
|
|
||||||
static int min_rcvbuf = SOCK_MIN_RCVBUF;
|
|
||||||
|
|
||||||
/* Update system visible IP port range */
|
/* Update system visible IP port range */
|
||||||
static void set_local_port_range(struct net *net, int range[2])
|
static void set_local_port_range(struct net *net, int range[2])
|
||||||
@ -530,7 +528,7 @@ static struct ctl_table ipv4_table[] = {
|
|||||||
.maxlen = sizeof(sysctl_tcp_wmem),
|
.maxlen = sizeof(sysctl_tcp_wmem),
|
||||||
.mode = 0644,
|
.mode = 0644,
|
||||||
.proc_handler = proc_dointvec_minmax,
|
.proc_handler = proc_dointvec_minmax,
|
||||||
.extra1 = &min_sndbuf,
|
.extra1 = &one,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
.procname = "tcp_notsent_lowat",
|
.procname = "tcp_notsent_lowat",
|
||||||
@ -545,7 +543,7 @@ static struct ctl_table ipv4_table[] = {
|
|||||||
.maxlen = sizeof(sysctl_tcp_rmem),
|
.maxlen = sizeof(sysctl_tcp_rmem),
|
||||||
.mode = 0644,
|
.mode = 0644,
|
||||||
.proc_handler = proc_dointvec_minmax,
|
.proc_handler = proc_dointvec_minmax,
|
||||||
.extra1 = &min_rcvbuf,
|
.extra1 = &one,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
.procname = "tcp_app_win",
|
.procname = "tcp_app_win",
|
||||||
@ -758,7 +756,7 @@ static struct ctl_table ipv4_table[] = {
|
|||||||
.maxlen = sizeof(sysctl_udp_rmem_min),
|
.maxlen = sizeof(sysctl_udp_rmem_min),
|
||||||
.mode = 0644,
|
.mode = 0644,
|
||||||
.proc_handler = proc_dointvec_minmax,
|
.proc_handler = proc_dointvec_minmax,
|
||||||
.extra1 = &min_rcvbuf,
|
.extra1 = &one
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
.procname = "udp_wmem_min",
|
.procname = "udp_wmem_min",
|
||||||
@ -766,7 +764,7 @@ static struct ctl_table ipv4_table[] = {
|
|||||||
.maxlen = sizeof(sysctl_udp_wmem_min),
|
.maxlen = sizeof(sysctl_udp_wmem_min),
|
||||||
.mode = 0644,
|
.mode = 0644,
|
||||||
.proc_handler = proc_dointvec_minmax,
|
.proc_handler = proc_dointvec_minmax,
|
||||||
.extra1 = &min_sndbuf,
|
.extra1 = &one
|
||||||
},
|
},
|
||||||
{ }
|
{ }
|
||||||
};
|
};
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user