mirror of
https://github.com/torvalds/linux.git
synced 2024-11-24 13:11:40 +00:00
Merge branch 'linus' into perf/core, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
commit
c52b5c5f96
@ -300,12 +300,6 @@ unattached instance are:
|
||||
The ioctl calls available on an instance of /dev/ppp attached to a
|
||||
channel are:
|
||||
|
||||
* PPPIOCDETACH detaches the instance from the channel. This ioctl is
|
||||
deprecated since the same effect can be achieved by closing the
|
||||
instance. In order to prevent possible races this ioctl will fail
|
||||
with an EINVAL error if more than one file descriptor refers to this
|
||||
instance (i.e. as a result of dup(), dup2() or fork()).
|
||||
|
||||
* PPPIOCCONNECT connects this channel to a PPP interface. The
|
||||
argument should point to an int containing the interface unit
|
||||
number. It will return an EINVAL error if the channel is already
|
||||
|
16
MAINTAINERS
16
MAINTAINERS
@ -2332,7 +2332,7 @@ F: drivers/gpio/gpio-ath79.c
|
||||
F: Documentation/devicetree/bindings/gpio/gpio-ath79.txt
|
||||
|
||||
ATHEROS ATH GENERIC UTILITIES
|
||||
M: "Luis R. Rodriguez" <mcgrof@do-not-panic.com>
|
||||
M: Kalle Valo <kvalo@codeaurora.org>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/net/wireless/ath/*
|
||||
@ -2347,7 +2347,7 @@ S: Maintained
|
||||
F: drivers/net/wireless/ath/ath5k/
|
||||
|
||||
ATHEROS ATH6KL WIRELESS DRIVER
|
||||
M: Kalle Valo <kvalo@qca.qualcomm.com>
|
||||
M: Kalle Valo <kvalo@codeaurora.org>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
W: http://wireless.kernel.org/en/users/Drivers/ath6kl
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
|
||||
@ -6503,9 +6503,15 @@ F: Documentation/networking/hinic.txt
|
||||
F: drivers/net/ethernet/huawei/hinic/
|
||||
|
||||
HUGETLB FILESYSTEM
|
||||
M: Nadia Yvette Chambers <nyc@holomorphy.com>
|
||||
M: Mike Kravetz <mike.kravetz@oracle.com>
|
||||
L: linux-mm@kvack.org
|
||||
S: Maintained
|
||||
F: fs/hugetlbfs/
|
||||
F: mm/hugetlb.c
|
||||
F: include/linux/hugetlb.h
|
||||
F: Documentation/admin-guide/mm/hugetlbpage.rst
|
||||
F: Documentation/vm/hugetlbfs_reserv.rst
|
||||
F: Documentation/ABI/testing/sysfs-kernel-mm-hugepages
|
||||
|
||||
HVA ST MEDIA DRIVER
|
||||
M: Jean-Christophe Trotin <jean-christophe.trotin@st.com>
|
||||
@ -11626,7 +11632,7 @@ S: Maintained
|
||||
F: drivers/media/tuners/qt1010*
|
||||
|
||||
QUALCOMM ATHEROS ATH10K WIRELESS DRIVER
|
||||
M: Kalle Valo <kvalo@qca.qualcomm.com>
|
||||
M: Kalle Valo <kvalo@codeaurora.org>
|
||||
L: ath10k@lists.infradead.org
|
||||
W: http://wireless.kernel.org/en/users/Drivers/ath10k
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
|
||||
@ -11677,7 +11683,7 @@ S: Maintained
|
||||
F: drivers/media/platform/qcom/venus/
|
||||
|
||||
QUALCOMM WCN36XX WIRELESS DRIVER
|
||||
M: Eugene Krasnikov <k.eugene.e@gmail.com>
|
||||
M: Kalle Valo <kvalo@codeaurora.org>
|
||||
L: wcn36xx@lists.infradead.org
|
||||
W: http://wireless.kernel.org/en/users/Drivers/wcn36xx
|
||||
T: git git://github.com/KrasnikovEugene/wcn36xx.git
|
||||
|
11
Makefile
11
Makefile
@ -2,7 +2,7 @@
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 17
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc6
|
||||
EXTRAVERSION = -rc7
|
||||
NAME = Merciless Moray
|
||||
|
||||
# *DOCUMENTATION*
|
||||
@ -500,6 +500,9 @@ RETPOLINE_CFLAGS_CLANG := -mretpoline-external-thunk
|
||||
RETPOLINE_CFLAGS := $(call cc-option,$(RETPOLINE_CFLAGS_GCC),$(call cc-option,$(RETPOLINE_CFLAGS_CLANG)))
|
||||
export RETPOLINE_CFLAGS
|
||||
|
||||
KBUILD_CFLAGS += $(call cc-option,-fno-PIE)
|
||||
KBUILD_AFLAGS += $(call cc-option,-fno-PIE)
|
||||
|
||||
# check for 'asm goto'
|
||||
ifeq ($(call shell-cached,$(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y)
|
||||
CC_HAVE_ASM_GOTO := 1
|
||||
@ -621,9 +624,9 @@ endif # $(dot-config)
|
||||
# Defaults to vmlinux, but the arch makefile usually adds further targets
|
||||
all: vmlinux
|
||||
|
||||
KBUILD_CFLAGS += $(call cc-option,-fno-PIE)
|
||||
KBUILD_AFLAGS += $(call cc-option,-fno-PIE)
|
||||
CFLAGS_GCOV := -fprofile-arcs -ftest-coverage -fno-tree-loop-im $(call cc-disable-warning,maybe-uninitialized,)
|
||||
CFLAGS_GCOV := -fprofile-arcs -ftest-coverage \
|
||||
$(call cc-option,-fno-tree-loop-im) \
|
||||
$(call cc-disable-warning,maybe-uninitialized,)
|
||||
export CFLAGS_GCOV CFLAGS_KCOV
|
||||
|
||||
# The arch Makefile can set ARCH_{CPP,A,C}FLAGS to override the default
|
||||
|
@ -76,7 +76,7 @@
|
||||
allwinner,pipeline = "de_fe0-de_be0-lcd0-hdmi";
|
||||
clocks = <&ccu CLK_AHB_LCD0>, <&ccu CLK_AHB_HDMI0>,
|
||||
<&ccu CLK_AHB_DE_BE0>, <&ccu CLK_AHB_DE_FE0>,
|
||||
<&ccu CLK_DE_BE0>, <&ccu CLK_AHB_DE_FE0>,
|
||||
<&ccu CLK_DE_BE0>, <&ccu CLK_DE_FE0>,
|
||||
<&ccu CLK_TCON0_CH1>, <&ccu CLK_HDMI>,
|
||||
<&ccu CLK_DRAM_DE_FE0>, <&ccu CLK_DRAM_DE_BE0>;
|
||||
status = "disabled";
|
||||
@ -88,7 +88,7 @@
|
||||
allwinner,pipeline = "de_fe0-de_be0-lcd0";
|
||||
clocks = <&ccu CLK_AHB_LCD0>, <&ccu CLK_AHB_DE_BE0>,
|
||||
<&ccu CLK_AHB_DE_FE0>, <&ccu CLK_DE_BE0>,
|
||||
<&ccu CLK_AHB_DE_FE0>, <&ccu CLK_TCON0_CH0>,
|
||||
<&ccu CLK_DE_FE0>, <&ccu CLK_TCON0_CH0>,
|
||||
<&ccu CLK_DRAM_DE_FE0>, <&ccu CLK_DRAM_DE_BE0>;
|
||||
status = "disabled";
|
||||
};
|
||||
@ -99,7 +99,7 @@
|
||||
allwinner,pipeline = "de_fe0-de_be0-lcd0-tve0";
|
||||
clocks = <&ccu CLK_AHB_TVE0>, <&ccu CLK_AHB_LCD0>,
|
||||
<&ccu CLK_AHB_DE_BE0>, <&ccu CLK_AHB_DE_FE0>,
|
||||
<&ccu CLK_DE_BE0>, <&ccu CLK_AHB_DE_FE0>,
|
||||
<&ccu CLK_DE_BE0>, <&ccu CLK_DE_FE0>,
|
||||
<&ccu CLK_TCON0_CH1>, <&ccu CLK_DRAM_TVE0>,
|
||||
<&ccu CLK_DRAM_DE_FE0>, <&ccu CLK_DRAM_DE_BE0>;
|
||||
status = "disabled";
|
||||
|
@ -117,6 +117,7 @@
|
||||
phy-handle = <&int_mii_phy>;
|
||||
phy-mode = "mii";
|
||||
allwinner,leds-active-low;
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
&hdmi {
|
||||
|
@ -51,7 +51,7 @@
|
||||
|
||||
leds {
|
||||
/* The LEDs use PG0~2 pins, which conflict with MMC1 */
|
||||
status = "disbaled";
|
||||
status = "disabled";
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -323,7 +323,7 @@ void __init ep93xx_register_eth(struct ep93xx_eth_data *data, int copy_addr)
|
||||
|
||||
/* All EP93xx devices use the same two GPIO pins for I2C bit-banging */
|
||||
static struct gpiod_lookup_table ep93xx_i2c_gpiod_table = {
|
||||
.dev_id = "i2c-gpio",
|
||||
.dev_id = "i2c-gpio.0",
|
||||
.table = {
|
||||
/* Use local offsets on gpiochip/port "G" */
|
||||
GPIO_LOOKUP_IDX("G", 1, NULL, 0,
|
||||
|
@ -51,7 +51,7 @@ static struct platform_device avila_flash = {
|
||||
};
|
||||
|
||||
static struct gpiod_lookup_table avila_i2c_gpiod_table = {
|
||||
.dev_id = "i2c-gpio",
|
||||
.dev_id = "i2c-gpio.0",
|
||||
.table = {
|
||||
GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", AVILA_SDA_PIN,
|
||||
NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
|
||||
|
@ -70,7 +70,7 @@ static struct platform_device dsmg600_flash = {
|
||||
};
|
||||
|
||||
static struct gpiod_lookup_table dsmg600_i2c_gpiod_table = {
|
||||
.dev_id = "i2c-gpio",
|
||||
.dev_id = "i2c-gpio.0",
|
||||
.table = {
|
||||
GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", DSMG600_SDA_PIN,
|
||||
NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
|
||||
|
@ -56,7 +56,7 @@ static struct platform_device fsg_flash = {
|
||||
};
|
||||
|
||||
static struct gpiod_lookup_table fsg_i2c_gpiod_table = {
|
||||
.dev_id = "i2c-gpio",
|
||||
.dev_id = "i2c-gpio.0",
|
||||
.table = {
|
||||
GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", FSG_SDA_PIN,
|
||||
NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
|
||||
|
@ -124,7 +124,7 @@ static struct platform_device ixdp425_flash_nand = {
|
||||
#endif /* CONFIG_MTD_NAND_PLATFORM */
|
||||
|
||||
static struct gpiod_lookup_table ixdp425_i2c_gpiod_table = {
|
||||
.dev_id = "i2c-gpio",
|
||||
.dev_id = "i2c-gpio.0",
|
||||
.table = {
|
||||
GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", IXDP425_SDA_PIN,
|
||||
NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
|
||||
|
@ -102,7 +102,7 @@ static struct platform_device nas100d_leds = {
|
||||
};
|
||||
|
||||
static struct gpiod_lookup_table nas100d_i2c_gpiod_table = {
|
||||
.dev_id = "i2c-gpio",
|
||||
.dev_id = "i2c-gpio.0",
|
||||
.table = {
|
||||
GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", NAS100D_SDA_PIN,
|
||||
NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
|
||||
|
@ -70,7 +70,7 @@ static struct platform_device nslu2_flash = {
|
||||
};
|
||||
|
||||
static struct gpiod_lookup_table nslu2_i2c_gpiod_table = {
|
||||
.dev_id = "i2c-gpio",
|
||||
.dev_id = "i2c-gpio.0",
|
||||
.table = {
|
||||
GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", NSLU2_SDA_PIN,
|
||||
NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
|
||||
|
@ -322,7 +322,7 @@ static struct soc_camera_link palmz72_iclink = {
|
||||
};
|
||||
|
||||
static struct gpiod_lookup_table palmz72_i2c_gpiod_table = {
|
||||
.dev_id = "i2c-gpio",
|
||||
.dev_id = "i2c-gpio.0",
|
||||
.table = {
|
||||
GPIO_LOOKUP_IDX("gpio-pxa", 118, NULL, 0,
|
||||
GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
|
||||
|
@ -460,7 +460,7 @@ static struct platform_device smc91x_device = {
|
||||
|
||||
/* i2c */
|
||||
static struct gpiod_lookup_table viper_i2c_gpiod_table = {
|
||||
.dev_id = "i2c-gpio",
|
||||
.dev_id = "i2c-gpio.1",
|
||||
.table = {
|
||||
GPIO_LOOKUP_IDX("gpio-pxa", VIPER_RTC_I2C_SDA_GPIO,
|
||||
NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
|
||||
@ -789,7 +789,7 @@ static int __init viper_tpm_setup(char *str)
|
||||
__setup("tpm=", viper_tpm_setup);
|
||||
|
||||
struct gpiod_lookup_table viper_tpm_i2c_gpiod_table = {
|
||||
.dev_id = "i2c-gpio",
|
||||
.dev_id = "i2c-gpio.2",
|
||||
.table = {
|
||||
GPIO_LOOKUP_IDX("gpio-pxa", VIPER_TPM_I2C_SDA_GPIO,
|
||||
NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
|
||||
|
@ -327,7 +327,7 @@ static struct platform_device simpad_gpio_leds = {
|
||||
* i2c
|
||||
*/
|
||||
static struct gpiod_lookup_table simpad_i2c_gpiod_table = {
|
||||
.dev_id = "i2c-gpio",
|
||||
.dev_id = "i2c-gpio.0",
|
||||
.table = {
|
||||
GPIO_LOOKUP_IDX("gpio", 21, NULL, 0,
|
||||
GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
|
||||
|
@ -299,7 +299,6 @@
|
||||
/* GPIO blocks 16 thru 19 do not appear to be routed to pins */
|
||||
|
||||
dwmmc_0: dwmmc0@f723d000 {
|
||||
max-frequency = <150000000>;
|
||||
cap-mmc-highspeed;
|
||||
mmc-hs200-1_8v;
|
||||
non-removable;
|
||||
|
@ -117,7 +117,7 @@ static inline void atomic_and(int i, atomic_t *v)
|
||||
/* LSE atomics */
|
||||
" mvn %w[i], %w[i]\n"
|
||||
" stclr %w[i], %[v]")
|
||||
: [i] "+r" (w0), [v] "+Q" (v->counter)
|
||||
: [i] "+&r" (w0), [v] "+Q" (v->counter)
|
||||
: "r" (x1)
|
||||
: __LL_SC_CLOBBERS);
|
||||
}
|
||||
@ -135,7 +135,7 @@ static inline int atomic_fetch_and##name(int i, atomic_t *v) \
|
||||
/* LSE atomics */ \
|
||||
" mvn %w[i], %w[i]\n" \
|
||||
" ldclr" #mb " %w[i], %w[i], %[v]") \
|
||||
: [i] "+r" (w0), [v] "+Q" (v->counter) \
|
||||
: [i] "+&r" (w0), [v] "+Q" (v->counter) \
|
||||
: "r" (x1) \
|
||||
: __LL_SC_CLOBBERS, ##cl); \
|
||||
\
|
||||
@ -161,7 +161,7 @@ static inline void atomic_sub(int i, atomic_t *v)
|
||||
/* LSE atomics */
|
||||
" neg %w[i], %w[i]\n"
|
||||
" stadd %w[i], %[v]")
|
||||
: [i] "+r" (w0), [v] "+Q" (v->counter)
|
||||
: [i] "+&r" (w0), [v] "+Q" (v->counter)
|
||||
: "r" (x1)
|
||||
: __LL_SC_CLOBBERS);
|
||||
}
|
||||
@ -180,7 +180,7 @@ static inline int atomic_sub_return##name(int i, atomic_t *v) \
|
||||
" neg %w[i], %w[i]\n" \
|
||||
" ldadd" #mb " %w[i], w30, %[v]\n" \
|
||||
" add %w[i], %w[i], w30") \
|
||||
: [i] "+r" (w0), [v] "+Q" (v->counter) \
|
||||
: [i] "+&r" (w0), [v] "+Q" (v->counter) \
|
||||
: "r" (x1) \
|
||||
: __LL_SC_CLOBBERS , ##cl); \
|
||||
\
|
||||
@ -207,7 +207,7 @@ static inline int atomic_fetch_sub##name(int i, atomic_t *v) \
|
||||
/* LSE atomics */ \
|
||||
" neg %w[i], %w[i]\n" \
|
||||
" ldadd" #mb " %w[i], %w[i], %[v]") \
|
||||
: [i] "+r" (w0), [v] "+Q" (v->counter) \
|
||||
: [i] "+&r" (w0), [v] "+Q" (v->counter) \
|
||||
: "r" (x1) \
|
||||
: __LL_SC_CLOBBERS, ##cl); \
|
||||
\
|
||||
@ -314,7 +314,7 @@ static inline void atomic64_and(long i, atomic64_t *v)
|
||||
/* LSE atomics */
|
||||
" mvn %[i], %[i]\n"
|
||||
" stclr %[i], %[v]")
|
||||
: [i] "+r" (x0), [v] "+Q" (v->counter)
|
||||
: [i] "+&r" (x0), [v] "+Q" (v->counter)
|
||||
: "r" (x1)
|
||||
: __LL_SC_CLOBBERS);
|
||||
}
|
||||
@ -332,7 +332,7 @@ static inline long atomic64_fetch_and##name(long i, atomic64_t *v) \
|
||||
/* LSE atomics */ \
|
||||
" mvn %[i], %[i]\n" \
|
||||
" ldclr" #mb " %[i], %[i], %[v]") \
|
||||
: [i] "+r" (x0), [v] "+Q" (v->counter) \
|
||||
: [i] "+&r" (x0), [v] "+Q" (v->counter) \
|
||||
: "r" (x1) \
|
||||
: __LL_SC_CLOBBERS, ##cl); \
|
||||
\
|
||||
@ -358,7 +358,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
|
||||
/* LSE atomics */
|
||||
" neg %[i], %[i]\n"
|
||||
" stadd %[i], %[v]")
|
||||
: [i] "+r" (x0), [v] "+Q" (v->counter)
|
||||
: [i] "+&r" (x0), [v] "+Q" (v->counter)
|
||||
: "r" (x1)
|
||||
: __LL_SC_CLOBBERS);
|
||||
}
|
||||
@ -377,7 +377,7 @@ static inline long atomic64_sub_return##name(long i, atomic64_t *v) \
|
||||
" neg %[i], %[i]\n" \
|
||||
" ldadd" #mb " %[i], x30, %[v]\n" \
|
||||
" add %[i], %[i], x30") \
|
||||
: [i] "+r" (x0), [v] "+Q" (v->counter) \
|
||||
: [i] "+&r" (x0), [v] "+Q" (v->counter) \
|
||||
: "r" (x1) \
|
||||
: __LL_SC_CLOBBERS, ##cl); \
|
||||
\
|
||||
@ -404,7 +404,7 @@ static inline long atomic64_fetch_sub##name(long i, atomic64_t *v) \
|
||||
/* LSE atomics */ \
|
||||
" neg %[i], %[i]\n" \
|
||||
" ldadd" #mb " %[i], %[i], %[v]") \
|
||||
: [i] "+r" (x0), [v] "+Q" (v->counter) \
|
||||
: [i] "+&r" (x0), [v] "+Q" (v->counter) \
|
||||
: "r" (x1) \
|
||||
: __LL_SC_CLOBBERS, ##cl); \
|
||||
\
|
||||
@ -435,7 +435,7 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
|
||||
" sub x30, x30, %[ret]\n"
|
||||
" cbnz x30, 1b\n"
|
||||
"2:")
|
||||
: [ret] "+r" (x0), [v] "+Q" (v->counter)
|
||||
: [ret] "+&r" (x0), [v] "+Q" (v->counter)
|
||||
:
|
||||
: __LL_SC_CLOBBERS, "cc", "memory");
|
||||
|
||||
@ -516,7 +516,7 @@ static inline long __cmpxchg_double##name(unsigned long old1, \
|
||||
" eor %[old1], %[old1], %[oldval1]\n" \
|
||||
" eor %[old2], %[old2], %[oldval2]\n" \
|
||||
" orr %[old1], %[old1], %[old2]") \
|
||||
: [old1] "+r" (x0), [old2] "+r" (x1), \
|
||||
: [old1] "+&r" (x0), [old2] "+&r" (x1), \
|
||||
[v] "+Q" (*(unsigned long *)ptr) \
|
||||
: [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4), \
|
||||
[oldval1] "r" (oldval1), [oldval2] "r" (oldval2) \
|
||||
|
@ -75,3 +75,11 @@ NOKPROBE_SYMBOL(_mcount);
|
||||
/* arm-smccc */
|
||||
EXPORT_SYMBOL(__arm_smccc_smc);
|
||||
EXPORT_SYMBOL(__arm_smccc_hvc);
|
||||
|
||||
/* tishift.S */
|
||||
extern long long __ashlti3(long long a, int b);
|
||||
EXPORT_SYMBOL(__ashlti3);
|
||||
extern long long __ashrti3(long long a, int b);
|
||||
EXPORT_SYMBOL(__ashrti3);
|
||||
extern long long __lshrti3(long long a, int b);
|
||||
EXPORT_SYMBOL(__lshrti3);
|
||||
|
@ -1,17 +1,6 @@
|
||||
/*
|
||||
* Copyright (C) 2017 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
|
||||
/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
* Copyright (C) 2017-2018 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
|
@ -293,6 +293,57 @@ static void __do_kernel_fault(unsigned long addr, unsigned int esr,
|
||||
static void __do_user_fault(struct siginfo *info, unsigned int esr)
|
||||
{
|
||||
current->thread.fault_address = (unsigned long)info->si_addr;
|
||||
|
||||
/*
|
||||
* If the faulting address is in the kernel, we must sanitize the ESR.
|
||||
* From userspace's point of view, kernel-only mappings don't exist
|
||||
* at all, so we report them as level 0 translation faults.
|
||||
* (This is not quite the way that "no mapping there at all" behaves:
|
||||
* an alignment fault not caused by the memory type would take
|
||||
* precedence over translation fault for a real access to empty
|
||||
* space. Unfortunately we can't easily distinguish "alignment fault
|
||||
* not caused by memory type" from "alignment fault caused by memory
|
||||
* type", so we ignore this wrinkle and just return the translation
|
||||
* fault.)
|
||||
*/
|
||||
if (current->thread.fault_address >= TASK_SIZE) {
|
||||
switch (ESR_ELx_EC(esr)) {
|
||||
case ESR_ELx_EC_DABT_LOW:
|
||||
/*
|
||||
* These bits provide only information about the
|
||||
* faulting instruction, which userspace knows already.
|
||||
* We explicitly clear bits which are architecturally
|
||||
* RES0 in case they are given meanings in future.
|
||||
* We always report the ESR as if the fault was taken
|
||||
* to EL1 and so ISV and the bits in ISS[23:14] are
|
||||
* clear. (In fact it always will be a fault to EL1.)
|
||||
*/
|
||||
esr &= ESR_ELx_EC_MASK | ESR_ELx_IL |
|
||||
ESR_ELx_CM | ESR_ELx_WNR;
|
||||
esr |= ESR_ELx_FSC_FAULT;
|
||||
break;
|
||||
case ESR_ELx_EC_IABT_LOW:
|
||||
/*
|
||||
* Claim a level 0 translation fault.
|
||||
* All other bits are architecturally RES0 for faults
|
||||
* reported with that DFSC value, so we clear them.
|
||||
*/
|
||||
esr &= ESR_ELx_EC_MASK | ESR_ELx_IL;
|
||||
esr |= ESR_ELx_FSC_FAULT;
|
||||
break;
|
||||
default:
|
||||
/*
|
||||
* This should never happen (entry.S only brings us
|
||||
* into this code for insn and data aborts from a lower
|
||||
* exception level). Fail safe by not providing an ESR
|
||||
* context record at all.
|
||||
*/
|
||||
WARN(1, "ESR 0x%x is not DABT or IABT from EL0\n", esr);
|
||||
esr = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
current->thread.fault_code = esr;
|
||||
arm64_force_sig_info(info, esr_to_fault_info(esr)->name, current);
|
||||
}
|
||||
|
@ -933,13 +933,15 @@ int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot)
|
||||
{
|
||||
pgprot_t sect_prot = __pgprot(PUD_TYPE_SECT |
|
||||
pgprot_val(mk_sect_prot(prot)));
|
||||
pud_t new_pud = pfn_pud(__phys_to_pfn(phys), sect_prot);
|
||||
|
||||
/* ioremap_page_range doesn't honour BBM */
|
||||
if (pud_present(READ_ONCE(*pudp)))
|
||||
/* Only allow permission changes for now */
|
||||
if (!pgattr_change_is_safe(READ_ONCE(pud_val(*pudp)),
|
||||
pud_val(new_pud)))
|
||||
return 0;
|
||||
|
||||
BUG_ON(phys & ~PUD_MASK);
|
||||
set_pud(pudp, pfn_pud(__phys_to_pfn(phys), sect_prot));
|
||||
set_pud(pudp, new_pud);
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -947,13 +949,15 @@ int pmd_set_huge(pmd_t *pmdp, phys_addr_t phys, pgprot_t prot)
|
||||
{
|
||||
pgprot_t sect_prot = __pgprot(PMD_TYPE_SECT |
|
||||
pgprot_val(mk_sect_prot(prot)));
|
||||
pmd_t new_pmd = pfn_pmd(__phys_to_pfn(phys), sect_prot);
|
||||
|
||||
/* ioremap_page_range doesn't honour BBM */
|
||||
if (pmd_present(READ_ONCE(*pmdp)))
|
||||
/* Only allow permission changes for now */
|
||||
if (!pgattr_change_is_safe(READ_ONCE(pmd_val(*pmdp)),
|
||||
pmd_val(new_pmd)))
|
||||
return 0;
|
||||
|
||||
BUG_ON(phys & ~PMD_MASK);
|
||||
set_pmd(pmdp, pfn_pmd(__phys_to_pfn(phys), sect_prot));
|
||||
set_pmd(pmdp, new_pmd);
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -9,6 +9,12 @@ config NDS32
|
||||
select CLKSRC_MMIO
|
||||
select CLONE_BACKWARDS
|
||||
select COMMON_CLK
|
||||
select GENERIC_ASHLDI3
|
||||
select GENERIC_ASHRDI3
|
||||
select GENERIC_LSHRDI3
|
||||
select GENERIC_CMPDI2
|
||||
select GENERIC_MULDI3
|
||||
select GENERIC_UCMPDI2
|
||||
select GENERIC_ATOMIC64
|
||||
select GENERIC_CPU_DEVICES
|
||||
select GENERIC_CLOCKEVENTS
|
||||
@ -82,6 +88,7 @@ endmenu
|
||||
|
||||
menu "Kernel Features"
|
||||
source "kernel/Kconfig.preempt"
|
||||
source "kernel/Kconfig.freezer"
|
||||
source "mm/Kconfig"
|
||||
source "kernel/Kconfig.hz"
|
||||
endmenu
|
||||
|
@ -1,10 +1,11 @@
|
||||
comment "Processor Features"
|
||||
|
||||
config CPU_BIG_ENDIAN
|
||||
bool "Big endian"
|
||||
def_bool !CPU_LITTLE_ENDIAN
|
||||
|
||||
config CPU_LITTLE_ENDIAN
|
||||
def_bool !CPU_BIG_ENDIAN
|
||||
bool "Little endian"
|
||||
default y
|
||||
|
||||
config HWZOL
|
||||
bool "hardware zero overhead loop support"
|
||||
|
@ -23,9 +23,6 @@ export TEXTADDR
|
||||
# If we have a machine-specific directory, then include it in the build.
|
||||
core-y += arch/nds32/kernel/ arch/nds32/mm/
|
||||
libs-y += arch/nds32/lib/
|
||||
LIBGCC_PATH := \
|
||||
$(shell $(CC) $(KBUILD_CFLAGS) $(KCFLAGS) -print-libgcc-file-name)
|
||||
libs-y += $(LIBGCC_PATH)
|
||||
|
||||
ifneq '$(CONFIG_NDS32_BUILTIN_DTB)' '""'
|
||||
BUILTIN_DTB := y
|
||||
@ -35,8 +32,12 @@ endif
|
||||
|
||||
ifdef CONFIG_CPU_LITTLE_ENDIAN
|
||||
KBUILD_CFLAGS += $(call cc-option, -EL)
|
||||
KBUILD_AFLAGS += $(call cc-option, -EL)
|
||||
LDFLAGS += $(call cc-option, -EL)
|
||||
else
|
||||
KBUILD_CFLAGS += $(call cc-option, -EB)
|
||||
KBUILD_AFLAGS += $(call cc-option, -EB)
|
||||
LDFLAGS += $(call cc-option, -EB)
|
||||
endif
|
||||
|
||||
boot := arch/nds32/boot
|
||||
|
@ -16,6 +16,7 @@ generic-y += dma.h
|
||||
generic-y += emergency-restart.h
|
||||
generic-y += errno.h
|
||||
generic-y += exec.h
|
||||
generic-y += export.h
|
||||
generic-y += fb.h
|
||||
generic-y += fcntl.h
|
||||
generic-y += ftrace.h
|
||||
@ -49,6 +50,7 @@ generic-y += switch_to.h
|
||||
generic-y += timex.h
|
||||
generic-y += topology.h
|
||||
generic-y += trace_clock.h
|
||||
generic-y += xor.h
|
||||
generic-y += unaligned.h
|
||||
generic-y += user.h
|
||||
generic-y += vga.h
|
||||
|
@ -336,7 +336,7 @@
|
||||
#define INT_MASK_mskIDIVZE ( 0x1 << INT_MASK_offIDIVZE )
|
||||
#define INT_MASK_mskDSSIM ( 0x1 << INT_MASK_offDSSIM )
|
||||
|
||||
#define INT_MASK_INITAIAL_VAL 0x10003
|
||||
#define INT_MASK_INITAIAL_VAL (INT_MASK_mskDSSIM|INT_MASK_mskIDIVZE)
|
||||
|
||||
/******************************************************************************
|
||||
* ir15: INT_PEND (Interrupt Pending Register)
|
||||
@ -396,6 +396,7 @@
|
||||
#define MMU_CTL_D8KB 1
|
||||
#define MMU_CTL_UNA ( 0x1 << MMU_CTL_offUNA )
|
||||
|
||||
#define MMU_CTL_CACHEABLE_NON 0
|
||||
#define MMU_CTL_CACHEABLE_WB 2
|
||||
#define MMU_CTL_CACHEABLE_WT 3
|
||||
|
||||
|
@ -32,6 +32,8 @@ void flush_anon_page(struct vm_area_struct *vma,
|
||||
|
||||
#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
|
||||
void flush_kernel_dcache_page(struct page *page);
|
||||
void flush_kernel_vmap_range(void *addr, int size);
|
||||
void invalidate_kernel_vmap_range(void *addr, int size);
|
||||
void flush_icache_range(unsigned long start, unsigned long end);
|
||||
void flush_icache_page(struct vm_area_struct *vma, struct page *page);
|
||||
#define flush_dcache_mmap_lock(mapping) xa_lock_irq(&(mapping)->i_pages)
|
||||
|
@ -4,6 +4,8 @@
|
||||
#ifndef __ASM_NDS32_IO_H
|
||||
#define __ASM_NDS32_IO_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
extern void iounmap(volatile void __iomem *addr);
|
||||
#define __raw_writeb __raw_writeb
|
||||
static inline void __raw_writeb(u8 val, volatile void __iomem *addr)
|
||||
|
@ -27,6 +27,9 @@ extern void copy_user_highpage(struct page *to, struct page *from,
|
||||
unsigned long vaddr, struct vm_area_struct *vma);
|
||||
extern void clear_user_highpage(struct page *page, unsigned long vaddr);
|
||||
|
||||
void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
|
||||
struct page *to);
|
||||
void clear_user_page(void *addr, unsigned long vaddr, struct page *page);
|
||||
#define __HAVE_ARCH_COPY_USER_HIGHPAGE
|
||||
#define clear_user_highpage clear_user_highpage
|
||||
#else
|
||||
|
@ -152,6 +152,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
|
||||
#define PAGE_CACHE_L1 __pgprot(_HAVE_PAGE_L | _PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE)
|
||||
#define PAGE_MEMORY __pgprot(_HAVE_PAGE_L | _PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE_SHRD)
|
||||
#define PAGE_KERNEL __pgprot(_PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE_SHRD)
|
||||
#define PAGE_SHARED __pgprot(_PAGE_V | _PAGE_M_URW_KRW | _PAGE_D | _PAGE_CACHE_SHRD)
|
||||
#define PAGE_DEVICE __pgprot(_PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_G | _PAGE_C_DEV)
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
|
@ -118,7 +118,7 @@ common_exception_handler:
|
||||
/* interrupt */
|
||||
2:
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
jal arch_trace_hardirqs_off
|
||||
jal trace_hardirqs_off
|
||||
#endif
|
||||
move $r0, $sp
|
||||
sethi $lp, hi20(ret_from_intr)
|
||||
|
@ -57,14 +57,32 @@ _nodtb:
|
||||
isb
|
||||
mtsr $r4, $L1_PPTB ! load page table pointer\n"
|
||||
|
||||
/* set NTC0 cacheable/writeback, mutliple page size in use */
|
||||
mfsr $r3, $MMU_CTL
|
||||
li $r0, #~MMU_CTL_mskNTC0
|
||||
and $r3, $r3, $r0
|
||||
#ifdef CONFIG_ANDES_PAGE_SIZE_4KB
|
||||
ori $r3, $r3, #(MMU_CTL_mskMPZIU|(MMU_CTL_CACHEABLE_WB << MMU_CTL_offNTC0))
|
||||
#ifdef CONFIG_CPU_DCACHE_DISABLE
|
||||
#define MMU_CTL_NTCC MMU_CTL_CACHEABLE_NON
|
||||
#else
|
||||
ori $r3, $r3, #(MMU_CTL_mskMPZIU|(MMU_CTL_CACHEABLE_WB << MMU_CTL_offNTC0)|MMU_CTL_D8KB)
|
||||
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
|
||||
#define MMU_CTL_NTCC MMU_CTL_CACHEABLE_WT
|
||||
#else
|
||||
#define MMU_CTL_NTCC MMU_CTL_CACHEABLE_WB
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/* set NTC cacheability, mutliple page size in use */
|
||||
mfsr $r3, $MMU_CTL
|
||||
#if CONFIG_MEMORY_START >= 0xc0000000
|
||||
ori $r3, $r3, (MMU_CTL_NTCC << MMU_CTL_offNTC3)
|
||||
#elif CONFIG_MEMORY_START >= 0x80000000
|
||||
ori $r3, $r3, (MMU_CTL_NTCC << MMU_CTL_offNTC2)
|
||||
#elif CONFIG_MEMORY_START >= 0x40000000
|
||||
ori $r3, $r3, (MMU_CTL_NTCC << MMU_CTL_offNTC1)
|
||||
#else
|
||||
ori $r3, $r3, (MMU_CTL_NTCC << MMU_CTL_offNTC0)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ANDES_PAGE_SIZE_4KB
|
||||
ori $r3, $r3, #(MMU_CTL_mskMPZIU)
|
||||
#else
|
||||
ori $r3, $r3, #(MMU_CTL_mskMPZIU|MMU_CTL_D8KB)
|
||||
#endif
|
||||
#ifdef CONFIG_HW_SUPPORT_UNALIGNMENT_ACCESS
|
||||
li $r0, #MMU_CTL_UNA
|
||||
|
@ -293,6 +293,9 @@ void __init setup_arch(char **cmdline_p)
|
||||
/* paging_init() sets up the MMU and marks all pages as reserved */
|
||||
paging_init();
|
||||
|
||||
/* invalidate all TLB entries because the new mapping is created */
|
||||
__nds32__tlbop_flua();
|
||||
|
||||
/* use generic way to parse */
|
||||
parse_early_param();
|
||||
|
||||
|
@ -9,6 +9,7 @@ void save_stack_trace(struct stack_trace *trace)
|
||||
{
|
||||
save_stack_trace_tsk(current, trace);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(save_stack_trace);
|
||||
|
||||
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
|
||||
{
|
||||
@ -45,3 +46,4 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
|
||||
fpn = (unsigned long *)fpp;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
|
||||
|
@ -23,7 +23,7 @@
|
||||
#include <asm/vdso_timer_info.h>
|
||||
#include <asm/cache_info.h>
|
||||
extern struct cache_info L1_cache_info[2];
|
||||
extern char vdso_start, vdso_end;
|
||||
extern char vdso_start[], vdso_end[];
|
||||
static unsigned long vdso_pages __ro_after_init;
|
||||
static unsigned long timer_mapping_base;
|
||||
|
||||
@ -66,16 +66,16 @@ static int __init vdso_init(void)
|
||||
int i;
|
||||
struct page **vdso_pagelist;
|
||||
|
||||
if (memcmp(&vdso_start, "\177ELF", 4)) {
|
||||
if (memcmp(vdso_start, "\177ELF", 4)) {
|
||||
pr_err("vDSO is not a valid ELF object!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
/* Creat a timer io mapping to get clock cycles counter */
|
||||
get_timer_node_info();
|
||||
|
||||
vdso_pages = (&vdso_end - &vdso_start) >> PAGE_SHIFT;
|
||||
vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
|
||||
pr_info("vdso: %ld pages (%ld code @ %p, %ld data @ %p)\n",
|
||||
vdso_pages + 1, vdso_pages, &vdso_start, 1L, vdso_data);
|
||||
vdso_pages + 1, vdso_pages, vdso_start, 1L, vdso_data);
|
||||
|
||||
/* Allocate the vDSO pagelist */
|
||||
vdso_pagelist = kcalloc(vdso_pages, sizeof(struct page *), GFP_KERNEL);
|
||||
@ -83,7 +83,7 @@ static int __init vdso_init(void)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < vdso_pages; i++)
|
||||
vdso_pagelist[i] = virt_to_page(&vdso_start + i * PAGE_SIZE);
|
||||
vdso_pagelist[i] = virt_to_page(vdso_start + i * PAGE_SIZE);
|
||||
vdso_spec[1].pages = &vdso_pagelist[0];
|
||||
|
||||
return 0;
|
||||
|
@ -2,6 +2,7 @@
|
||||
// Copyright (C) 2005-2017 Andes Technology Corporation
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/export.h>
|
||||
#include <asm/page.h>
|
||||
|
||||
.text
|
||||
@ -16,6 +17,7 @@ ENTRY(copy_page)
|
||||
popm $r2, $r10
|
||||
ret
|
||||
ENDPROC(copy_page)
|
||||
EXPORT_SYMBOL(copy_page)
|
||||
|
||||
ENTRY(clear_page)
|
||||
pushm $r1, $r9
|
||||
@ -35,3 +37,4 @@ ENTRY(clear_page)
|
||||
popm $r1, $r9
|
||||
ret
|
||||
ENDPROC(clear_page)
|
||||
EXPORT_SYMBOL(clear_page)
|
||||
|
@ -19,7 +19,7 @@
|
||||
#define RA(inst) (((inst) >> 15) & 0x1FUL)
|
||||
#define RB(inst) (((inst) >> 10) & 0x1FUL)
|
||||
#define SV(inst) (((inst) >> 8) & 0x3UL)
|
||||
#define IMM(inst) (((inst) >> 0) & 0x3FFFUL)
|
||||
#define IMM(inst) (((inst) >> 0) & 0x7FFFUL)
|
||||
|
||||
#define RA3(inst) (((inst) >> 3) & 0x7UL)
|
||||
#define RT3(inst) (((inst) >> 6) & 0x7UL)
|
||||
@ -28,6 +28,9 @@
|
||||
#define RA5(inst) (((inst) >> 0) & 0x1FUL)
|
||||
#define RT4(inst) (((inst) >> 5) & 0xFUL)
|
||||
|
||||
#define GET_IMMSVAL(imm_value) \
|
||||
(((imm_value >> 14) & 0x1) ? (imm_value - 0x8000) : imm_value)
|
||||
|
||||
#define __get8_data(val,addr,err) \
|
||||
__asm__( \
|
||||
"1: lbi.bi %1, [%2], #1\n" \
|
||||
@ -467,7 +470,7 @@ static inline int do_32(unsigned long inst, struct pt_regs *regs)
|
||||
}
|
||||
|
||||
if (imm)
|
||||
shift = IMM(inst) * len;
|
||||
shift = GET_IMMSVAL(IMM(inst)) * len;
|
||||
else
|
||||
shift = *idx_to_addr(regs, RB(inst)) << SV(inst);
|
||||
|
||||
@ -552,7 +555,7 @@ static struct ctl_table alignment_tbl[3] = {
|
||||
|
||||
static struct ctl_table nds32_sysctl_table[2] = {
|
||||
{
|
||||
.procname = "unaligned_acess",
|
||||
.procname = "unaligned_access",
|
||||
.mode = 0555,
|
||||
.child = alignment_tbl},
|
||||
{}
|
||||
|
@ -147,6 +147,25 @@ void flush_cache_vunmap(unsigned long start, unsigned long end)
|
||||
cpu_icache_inval_all();
|
||||
}
|
||||
|
||||
void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
|
||||
struct page *to)
|
||||
{
|
||||
cpu_dcache_wbinval_page((unsigned long)vaddr);
|
||||
cpu_icache_inval_page((unsigned long)vaddr);
|
||||
copy_page(vto, vfrom);
|
||||
cpu_dcache_wbinval_page((unsigned long)vto);
|
||||
cpu_icache_inval_page((unsigned long)vto);
|
||||
}
|
||||
|
||||
void clear_user_page(void *addr, unsigned long vaddr, struct page *page)
|
||||
{
|
||||
cpu_dcache_wbinval_page((unsigned long)vaddr);
|
||||
cpu_icache_inval_page((unsigned long)vaddr);
|
||||
clear_page(addr);
|
||||
cpu_dcache_wbinval_page((unsigned long)addr);
|
||||
cpu_icache_inval_page((unsigned long)addr);
|
||||
}
|
||||
|
||||
void copy_user_highpage(struct page *to, struct page *from,
|
||||
unsigned long vaddr, struct vm_area_struct *vma)
|
||||
{
|
||||
@ -156,11 +175,9 @@ void copy_user_highpage(struct page *to, struct page *from,
|
||||
pto = page_to_phys(to);
|
||||
pfrom = page_to_phys(from);
|
||||
|
||||
local_irq_save(flags);
|
||||
if (aliasing(vaddr, (unsigned long)kfrom))
|
||||
cpu_dcache_wb_page((unsigned long)kfrom);
|
||||
if (aliasing(vaddr, (unsigned long)kto))
|
||||
cpu_dcache_inval_page((unsigned long)kto);
|
||||
local_irq_save(flags);
|
||||
vto = kremap0(vaddr, pto);
|
||||
vfrom = kremap1(vaddr, pfrom);
|
||||
copy_page((void *)vto, (void *)vfrom);
|
||||
@ -198,21 +215,25 @@ void flush_dcache_page(struct page *page)
|
||||
if (mapping && !mapping_mapped(mapping))
|
||||
set_bit(PG_dcache_dirty, &page->flags);
|
||||
else {
|
||||
int i, pc;
|
||||
unsigned long vto, kaddr, flags;
|
||||
unsigned long kaddr, flags;
|
||||
|
||||
kaddr = (unsigned long)page_address(page);
|
||||
cpu_dcache_wbinval_page(kaddr);
|
||||
pc = CACHE_SET(DCACHE) * CACHE_LINE_SIZE(DCACHE) / PAGE_SIZE;
|
||||
local_irq_save(flags);
|
||||
for (i = 0; i < pc; i++) {
|
||||
vto =
|
||||
kremap0(kaddr + i * PAGE_SIZE, page_to_phys(page));
|
||||
cpu_dcache_wbinval_page(vto);
|
||||
kunmap01(vto);
|
||||
cpu_dcache_wbinval_page(kaddr);
|
||||
if (mapping) {
|
||||
unsigned long vaddr, kto;
|
||||
|
||||
vaddr = page->index << PAGE_SHIFT;
|
||||
if (aliasing(vaddr, kaddr)) {
|
||||
kto = kremap0(vaddr, page_to_phys(page));
|
||||
cpu_dcache_wbinval_page(kto);
|
||||
kunmap01(kto);
|
||||
}
|
||||
}
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(flush_dcache_page);
|
||||
|
||||
void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
|
||||
unsigned long vaddr, void *dst, void *src, int len)
|
||||
@ -251,7 +272,7 @@ void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
|
||||
void flush_anon_page(struct vm_area_struct *vma,
|
||||
struct page *page, unsigned long vaddr)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned long kaddr, flags, ktmp;
|
||||
if (!PageAnon(page))
|
||||
return;
|
||||
|
||||
@ -261,7 +282,12 @@ void flush_anon_page(struct vm_area_struct *vma,
|
||||
local_irq_save(flags);
|
||||
if (vma->vm_flags & VM_EXEC)
|
||||
cpu_icache_inval_page(vaddr & PAGE_MASK);
|
||||
cpu_dcache_wbinval_page((unsigned long)page_address(page));
|
||||
kaddr = (unsigned long)page_address(page);
|
||||
if (aliasing(vaddr, kaddr)) {
|
||||
ktmp = kremap0(vaddr, page_to_phys(page));
|
||||
cpu_dcache_wbinval_page(ktmp);
|
||||
kunmap01(ktmp);
|
||||
}
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
@ -272,6 +298,25 @@ void flush_kernel_dcache_page(struct page *page)
|
||||
cpu_dcache_wbinval_page((unsigned long)page_address(page));
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
EXPORT_SYMBOL(flush_kernel_dcache_page);
|
||||
|
||||
void flush_kernel_vmap_range(void *addr, int size)
|
||||
{
|
||||
unsigned long flags;
|
||||
local_irq_save(flags);
|
||||
cpu_dcache_wb_range((unsigned long)addr, (unsigned long)addr + size);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
EXPORT_SYMBOL(flush_kernel_vmap_range);
|
||||
|
||||
void invalidate_kernel_vmap_range(void *addr, int size)
|
||||
{
|
||||
unsigned long flags;
|
||||
local_irq_save(flags);
|
||||
cpu_dcache_inval_range((unsigned long)addr, (unsigned long)addr + size);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
EXPORT_SYMBOL(invalidate_kernel_vmap_range);
|
||||
|
||||
void flush_icache_range(unsigned long start, unsigned long end)
|
||||
{
|
||||
@ -283,6 +328,7 @@ void flush_icache_range(unsigned long start, unsigned long end)
|
||||
cpu_cache_wbinval_range(start, end, 1);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
EXPORT_SYMBOL(flush_icache_range);
|
||||
|
||||
void flush_icache_page(struct vm_area_struct *vma, struct page *page)
|
||||
{
|
||||
|
@ -30,6 +30,7 @@ extern unsigned long phys_initrd_size;
|
||||
* zero-initialized data and COW.
|
||||
*/
|
||||
struct page *empty_zero_page;
|
||||
EXPORT_SYMBOL(empty_zero_page);
|
||||
|
||||
static void __init zone_sizes_init(void)
|
||||
{
|
||||
|
@ -96,6 +96,7 @@ struct kvmppc_vcore {
|
||||
struct kvm_vcpu *runner;
|
||||
struct kvm *kvm;
|
||||
u64 tb_offset; /* guest timebase - host timebase */
|
||||
u64 tb_offset_applied; /* timebase offset currently in force */
|
||||
ulong lpcr;
|
||||
u32 arch_compat;
|
||||
ulong pcr;
|
||||
|
@ -562,6 +562,7 @@ int main(void)
|
||||
OFFSET(VCORE_NAPPING_THREADS, kvmppc_vcore, napping_threads);
|
||||
OFFSET(VCORE_KVM, kvmppc_vcore, kvm);
|
||||
OFFSET(VCORE_TB_OFFSET, kvmppc_vcore, tb_offset);
|
||||
OFFSET(VCORE_TB_OFFSET_APPL, kvmppc_vcore, tb_offset_applied);
|
||||
OFFSET(VCORE_LPCR, kvmppc_vcore, lpcr);
|
||||
OFFSET(VCORE_PCR, kvmppc_vcore, pcr);
|
||||
OFFSET(VCORE_DPDES, kvmppc_vcore, dpdes);
|
||||
|
@ -28,6 +28,7 @@ _GLOBAL(__setup_cpu_power7)
|
||||
beqlr
|
||||
li r0,0
|
||||
mtspr SPRN_LPID,r0
|
||||
mtspr SPRN_PCR,r0
|
||||
mfspr r3,SPRN_LPCR
|
||||
li r4,(LPCR_LPES1 >> LPCR_LPES_SH)
|
||||
bl __init_LPCR_ISA206
|
||||
@ -41,6 +42,7 @@ _GLOBAL(__restore_cpu_power7)
|
||||
beqlr
|
||||
li r0,0
|
||||
mtspr SPRN_LPID,r0
|
||||
mtspr SPRN_PCR,r0
|
||||
mfspr r3,SPRN_LPCR
|
||||
li r4,(LPCR_LPES1 >> LPCR_LPES_SH)
|
||||
bl __init_LPCR_ISA206
|
||||
@ -57,6 +59,7 @@ _GLOBAL(__setup_cpu_power8)
|
||||
beqlr
|
||||
li r0,0
|
||||
mtspr SPRN_LPID,r0
|
||||
mtspr SPRN_PCR,r0
|
||||
mfspr r3,SPRN_LPCR
|
||||
ori r3, r3, LPCR_PECEDH
|
||||
li r4,0 /* LPES = 0 */
|
||||
@ -78,6 +81,7 @@ _GLOBAL(__restore_cpu_power8)
|
||||
beqlr
|
||||
li r0,0
|
||||
mtspr SPRN_LPID,r0
|
||||
mtspr SPRN_PCR,r0
|
||||
mfspr r3,SPRN_LPCR
|
||||
ori r3, r3, LPCR_PECEDH
|
||||
li r4,0 /* LPES = 0 */
|
||||
@ -99,6 +103,7 @@ _GLOBAL(__setup_cpu_power9)
|
||||
mtspr SPRN_PSSCR,r0
|
||||
mtspr SPRN_LPID,r0
|
||||
mtspr SPRN_PID,r0
|
||||
mtspr SPRN_PCR,r0
|
||||
mfspr r3,SPRN_LPCR
|
||||
LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE | LPCR_HEIC)
|
||||
or r3, r3, r4
|
||||
@ -123,6 +128,7 @@ _GLOBAL(__restore_cpu_power9)
|
||||
mtspr SPRN_PSSCR,r0
|
||||
mtspr SPRN_LPID,r0
|
||||
mtspr SPRN_PID,r0
|
||||
mtspr SPRN_PCR,r0
|
||||
mfspr r3,SPRN_LPCR
|
||||
LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE | LPCR_HEIC)
|
||||
or r3, r3, r4
|
||||
|
@ -101,6 +101,7 @@ static void __restore_cpu_cpufeatures(void)
|
||||
if (hv_mode) {
|
||||
mtspr(SPRN_LPID, 0);
|
||||
mtspr(SPRN_HFSCR, system_registers.hfscr);
|
||||
mtspr(SPRN_PCR, 0);
|
||||
}
|
||||
mtspr(SPRN_FSCR, system_registers.fscr);
|
||||
|
||||
|
@ -162,7 +162,7 @@ static void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
|
||||
if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG))
|
||||
asm volatile(PPC_TLBIE_5(%0, %1, 0, 0, 1)
|
||||
: : "r" (addr), "r" (kvm->arch.lpid) : "memory");
|
||||
asm volatile("ptesync": : :"memory");
|
||||
asm volatile("eieio ; tlbsync ; ptesync": : :"memory");
|
||||
}
|
||||
|
||||
static void kvmppc_radix_flush_pwc(struct kvm *kvm, unsigned long addr)
|
||||
@ -173,7 +173,7 @@ static void kvmppc_radix_flush_pwc(struct kvm *kvm, unsigned long addr)
|
||||
/* RIC=1 PRS=0 R=1 IS=2 */
|
||||
asm volatile(PPC_TLBIE_5(%0, %1, 1, 0, 1)
|
||||
: : "r" (rb), "r" (kvm->arch.lpid) : "memory");
|
||||
asm volatile("ptesync": : :"memory");
|
||||
asm volatile("eieio ; tlbsync ; ptesync": : :"memory");
|
||||
}
|
||||
|
||||
unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep,
|
||||
@ -584,7 +584,7 @@ int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
|
||||
|
||||
ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
|
||||
if (ptep && pte_present(*ptep)) {
|
||||
old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_PRESENT, 0,
|
||||
old = kvmppc_radix_update_pte(kvm, ptep, ~0UL, 0,
|
||||
gpa, shift);
|
||||
kvmppc_radix_tlbie_page(kvm, gpa, shift);
|
||||
if ((old & _PAGE_DIRTY) && memslot->dirty_bitmap) {
|
||||
|
@ -2441,6 +2441,7 @@ static void init_vcore_to_run(struct kvmppc_vcore *vc)
|
||||
vc->in_guest = 0;
|
||||
vc->napping_threads = 0;
|
||||
vc->conferring_threads = 0;
|
||||
vc->tb_offset_applied = 0;
|
||||
}
|
||||
|
||||
static bool can_dynamic_split(struct kvmppc_vcore *vc, struct core_info *cip)
|
||||
|
@ -692,6 +692,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
|
||||
22: ld r8,VCORE_TB_OFFSET(r5)
|
||||
cmpdi r8,0
|
||||
beq 37f
|
||||
std r8, VCORE_TB_OFFSET_APPL(r5)
|
||||
mftb r6 /* current host timebase */
|
||||
add r8,r8,r6
|
||||
mtspr SPRN_TBU40,r8 /* update upper 40 bits */
|
||||
@ -940,18 +941,6 @@ FTR_SECTION_ELSE
|
||||
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
|
||||
8:
|
||||
|
||||
/*
|
||||
* Set the decrementer to the guest decrementer.
|
||||
*/
|
||||
ld r8,VCPU_DEC_EXPIRES(r4)
|
||||
/* r8 is a host timebase value here, convert to guest TB */
|
||||
ld r5,HSTATE_KVM_VCORE(r13)
|
||||
ld r6,VCORE_TB_OFFSET(r5)
|
||||
add r8,r8,r6
|
||||
mftb r7
|
||||
subf r3,r7,r8
|
||||
mtspr SPRN_DEC,r3
|
||||
|
||||
ld r5, VCPU_SPRG0(r4)
|
||||
ld r6, VCPU_SPRG1(r4)
|
||||
ld r7, VCPU_SPRG2(r4)
|
||||
@ -1005,6 +994,18 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
|
||||
mtspr SPRN_LPCR,r8
|
||||
isync
|
||||
|
||||
/*
|
||||
* Set the decrementer to the guest decrementer.
|
||||
*/
|
||||
ld r8,VCPU_DEC_EXPIRES(r4)
|
||||
/* r8 is a host timebase value here, convert to guest TB */
|
||||
ld r5,HSTATE_KVM_VCORE(r13)
|
||||
ld r6,VCORE_TB_OFFSET_APPL(r5)
|
||||
add r8,r8,r6
|
||||
mftb r7
|
||||
subf r3,r7,r8
|
||||
mtspr SPRN_DEC,r3
|
||||
|
||||
/* Check if HDEC expires soon */
|
||||
mfspr r3, SPRN_HDEC
|
||||
EXTEND_HDEC(r3)
|
||||
@ -1597,8 +1598,27 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
|
||||
|
||||
guest_bypass:
|
||||
stw r12, STACK_SLOT_TRAP(r1)
|
||||
mr r3, r12
|
||||
|
||||
/* Save DEC */
|
||||
/* Do this before kvmhv_commence_exit so we know TB is guest TB */
|
||||
ld r3, HSTATE_KVM_VCORE(r13)
|
||||
mfspr r5,SPRN_DEC
|
||||
mftb r6
|
||||
/* On P9, if the guest has large decr enabled, don't sign extend */
|
||||
BEGIN_FTR_SECTION
|
||||
ld r4, VCORE_LPCR(r3)
|
||||
andis. r4, r4, LPCR_LD@h
|
||||
bne 16f
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
|
||||
extsw r5,r5
|
||||
16: add r5,r5,r6
|
||||
/* r5 is a guest timebase value here, convert to host TB */
|
||||
ld r4,VCORE_TB_OFFSET_APPL(r3)
|
||||
subf r5,r4,r5
|
||||
std r5,VCPU_DEC_EXPIRES(r9)
|
||||
|
||||
/* Increment exit count, poke other threads to exit */
|
||||
mr r3, r12
|
||||
bl kvmhv_commence_exit
|
||||
nop
|
||||
ld r9, HSTATE_KVM_VCPU(r13)
|
||||
@ -1639,23 +1659,6 @@ guest_bypass:
|
||||
mtspr SPRN_PURR,r3
|
||||
mtspr SPRN_SPURR,r4
|
||||
|
||||
/* Save DEC */
|
||||
ld r3, HSTATE_KVM_VCORE(r13)
|
||||
mfspr r5,SPRN_DEC
|
||||
mftb r6
|
||||
/* On P9, if the guest has large decr enabled, don't sign extend */
|
||||
BEGIN_FTR_SECTION
|
||||
ld r4, VCORE_LPCR(r3)
|
||||
andis. r4, r4, LPCR_LD@h
|
||||
bne 16f
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
|
||||
extsw r5,r5
|
||||
16: add r5,r5,r6
|
||||
/* r5 is a guest timebase value here, convert to host TB */
|
||||
ld r4,VCORE_TB_OFFSET(r3)
|
||||
subf r5,r4,r5
|
||||
std r5,VCPU_DEC_EXPIRES(r9)
|
||||
|
||||
BEGIN_FTR_SECTION
|
||||
b 8f
|
||||
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
|
||||
@ -1905,6 +1908,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
|
||||
cmpwi cr2, r0, 0
|
||||
beq cr2, 4f
|
||||
|
||||
/*
|
||||
* Radix: do eieio; tlbsync; ptesync sequence in case we
|
||||
* interrupted the guest between a tlbie and a ptesync.
|
||||
*/
|
||||
eieio
|
||||
tlbsync
|
||||
ptesync
|
||||
|
||||
/* Radix: Handle the case where the guest used an illegal PID */
|
||||
LOAD_REG_ADDR(r4, mmu_base_pid)
|
||||
lwz r3, VCPU_GUEST_PID(r9)
|
||||
@ -2017,9 +2028,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
|
||||
|
||||
27:
|
||||
/* Subtract timebase offset from timebase */
|
||||
ld r8,VCORE_TB_OFFSET(r5)
|
||||
ld r8, VCORE_TB_OFFSET_APPL(r5)
|
||||
cmpdi r8,0
|
||||
beq 17f
|
||||
li r0, 0
|
||||
std r0, VCORE_TB_OFFSET_APPL(r5)
|
||||
mftb r6 /* current guest timebase */
|
||||
subf r8,r8,r6
|
||||
mtspr SPRN_TBU40,r8 /* update upper 40 bits */
|
||||
@ -2700,7 +2713,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
|
||||
add r3, r3, r5
|
||||
ld r4, HSTATE_KVM_VCPU(r13)
|
||||
ld r5, HSTATE_KVM_VCORE(r13)
|
||||
ld r6, VCORE_TB_OFFSET(r5)
|
||||
ld r6, VCORE_TB_OFFSET_APPL(r5)
|
||||
subf r3, r6, r3 /* convert to host TB value */
|
||||
std r3, VCPU_DEC_EXPIRES(r4)
|
||||
|
||||
@ -2799,7 +2812,7 @@ END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
|
||||
/* Restore guest decrementer */
|
||||
ld r3, VCPU_DEC_EXPIRES(r4)
|
||||
ld r5, HSTATE_KVM_VCORE(r13)
|
||||
ld r6, VCORE_TB_OFFSET(r5)
|
||||
ld r6, VCORE_TB_OFFSET_APPL(r5)
|
||||
add r3, r3, r6 /* convert host TB to guest TB value */
|
||||
mftb r7
|
||||
subf r3, r7, r3
|
||||
@ -3606,12 +3619,9 @@ kvmppc_fix_pmao:
|
||||
*/
|
||||
kvmhv_start_timing:
|
||||
ld r5, HSTATE_KVM_VCORE(r13)
|
||||
lbz r6, VCORE_IN_GUEST(r5)
|
||||
cmpwi r6, 0
|
||||
beq 5f /* if in guest, need to */
|
||||
ld r6, VCORE_TB_OFFSET(r5) /* subtract timebase offset */
|
||||
5: mftb r5
|
||||
subf r5, r6, r5
|
||||
ld r6, VCORE_TB_OFFSET_APPL(r5)
|
||||
mftb r5
|
||||
subf r5, r6, r5 /* subtract current timebase offset */
|
||||
std r3, VCPU_CUR_ACTIVITY(r4)
|
||||
std r5, VCPU_ACTIVITY_START(r4)
|
||||
blr
|
||||
@ -3622,15 +3632,12 @@ kvmhv_start_timing:
|
||||
*/
|
||||
kvmhv_accumulate_time:
|
||||
ld r5, HSTATE_KVM_VCORE(r13)
|
||||
lbz r8, VCORE_IN_GUEST(r5)
|
||||
cmpwi r8, 0
|
||||
beq 4f /* if in guest, need to */
|
||||
ld r8, VCORE_TB_OFFSET(r5) /* subtract timebase offset */
|
||||
4: ld r5, VCPU_CUR_ACTIVITY(r4)
|
||||
ld r8, VCORE_TB_OFFSET_APPL(r5)
|
||||
ld r5, VCPU_CUR_ACTIVITY(r4)
|
||||
ld r6, VCPU_ACTIVITY_START(r4)
|
||||
std r3, VCPU_CUR_ACTIVITY(r4)
|
||||
mftb r7
|
||||
subf r7, r8, r7
|
||||
subf r7, r8, r7 /* subtract current timebase offset */
|
||||
std r7, VCPU_ACTIVITY_START(r4)
|
||||
cmpdi r5, 0
|
||||
beqlr
|
||||
|
@ -11,6 +11,9 @@
|
||||
#define XGLUE(a,b) a##b
|
||||
#define GLUE(a,b) XGLUE(a,b)
|
||||
|
||||
/* Dummy interrupt used when taking interrupts out of a queue in H_CPPR */
|
||||
#define XICS_DUMMY 1
|
||||
|
||||
static void GLUE(X_PFX,ack_pending)(struct kvmppc_xive_vcpu *xc)
|
||||
{
|
||||
u8 cppr;
|
||||
@ -205,6 +208,10 @@ skip_ipi:
|
||||
goto skip_ipi;
|
||||
}
|
||||
|
||||
/* If it's the dummy interrupt, continue searching */
|
||||
if (hirq == XICS_DUMMY)
|
||||
goto skip_ipi;
|
||||
|
||||
/* If fetching, update queue pointers */
|
||||
if (scan_type == scan_fetch) {
|
||||
q->idx = idx;
|
||||
@ -385,9 +392,76 @@ static void GLUE(X_PFX,push_pending_to_hw)(struct kvmppc_xive_vcpu *xc)
|
||||
__x_writeb(prio, __x_tima + TM_SPC_SET_OS_PENDING);
|
||||
}
|
||||
|
||||
static void GLUE(X_PFX,scan_for_rerouted_irqs)(struct kvmppc_xive *xive,
|
||||
struct kvmppc_xive_vcpu *xc)
|
||||
{
|
||||
unsigned int prio;
|
||||
|
||||
/* For each priority that is now masked */
|
||||
for (prio = xc->cppr; prio < KVMPPC_XIVE_Q_COUNT; prio++) {
|
||||
struct xive_q *q = &xc->queues[prio];
|
||||
struct kvmppc_xive_irq_state *state;
|
||||
struct kvmppc_xive_src_block *sb;
|
||||
u32 idx, toggle, entry, irq, hw_num;
|
||||
struct xive_irq_data *xd;
|
||||
__be32 *qpage;
|
||||
u16 src;
|
||||
|
||||
idx = q->idx;
|
||||
toggle = q->toggle;
|
||||
qpage = READ_ONCE(q->qpage);
|
||||
if (!qpage)
|
||||
continue;
|
||||
|
||||
/* For each interrupt in the queue */
|
||||
for (;;) {
|
||||
entry = be32_to_cpup(qpage + idx);
|
||||
|
||||
/* No more ? */
|
||||
if ((entry >> 31) == toggle)
|
||||
break;
|
||||
irq = entry & 0x7fffffff;
|
||||
|
||||
/* Skip dummies and IPIs */
|
||||
if (irq == XICS_DUMMY || irq == XICS_IPI)
|
||||
goto next;
|
||||
sb = kvmppc_xive_find_source(xive, irq, &src);
|
||||
if (!sb)
|
||||
goto next;
|
||||
state = &sb->irq_state[src];
|
||||
|
||||
/* Has it been rerouted ? */
|
||||
if (xc->server_num == state->act_server)
|
||||
goto next;
|
||||
|
||||
/*
|
||||
* Allright, it *has* been re-routed, kill it from
|
||||
* the queue.
|
||||
*/
|
||||
qpage[idx] = cpu_to_be32((entry & 0x80000000) | XICS_DUMMY);
|
||||
|
||||
/* Find the HW interrupt */
|
||||
kvmppc_xive_select_irq(state, &hw_num, &xd);
|
||||
|
||||
/* If it's not an LSI, set PQ to 11 the EOI will force a resend */
|
||||
if (!(xd->flags & XIVE_IRQ_FLAG_LSI))
|
||||
GLUE(X_PFX,esb_load)(xd, XIVE_ESB_SET_PQ_11);
|
||||
|
||||
/* EOI the source */
|
||||
GLUE(X_PFX,source_eoi)(hw_num, xd);
|
||||
|
||||
next:
|
||||
idx = (idx + 1) & q->msk;
|
||||
if (idx == 0)
|
||||
toggle ^= 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
X_STATIC int GLUE(X_PFX,h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr)
|
||||
{
|
||||
struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
|
||||
struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
|
||||
u8 old_cppr;
|
||||
|
||||
pr_devel("H_CPPR(cppr=%ld)\n", cppr);
|
||||
@ -407,14 +481,34 @@ X_STATIC int GLUE(X_PFX,h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr)
|
||||
*/
|
||||
smp_mb();
|
||||
|
||||
/*
|
||||
* We are masking less, we need to look for pending things
|
||||
* to deliver and set VP pending bits accordingly to trigger
|
||||
* a new interrupt otherwise we might miss MFRR changes for
|
||||
* which we have optimized out sending an IPI signal.
|
||||
*/
|
||||
if (cppr > old_cppr)
|
||||
if (cppr > old_cppr) {
|
||||
/*
|
||||
* We are masking less, we need to look for pending things
|
||||
* to deliver and set VP pending bits accordingly to trigger
|
||||
* a new interrupt otherwise we might miss MFRR changes for
|
||||
* which we have optimized out sending an IPI signal.
|
||||
*/
|
||||
GLUE(X_PFX,push_pending_to_hw)(xc);
|
||||
} else {
|
||||
/*
|
||||
* We are masking more, we need to check the queue for any
|
||||
* interrupt that has been routed to another CPU, take
|
||||
* it out (replace it with the dummy) and retrigger it.
|
||||
*
|
||||
* This is necessary since those interrupts may otherwise
|
||||
* never be processed, at least not until this CPU restores
|
||||
* its CPPR.
|
||||
*
|
||||
* This is in theory racy vs. HW adding new interrupts to
|
||||
* the queue. In practice this works because the interesting
|
||||
* cases are when the guest has done a set_xive() to move the
|
||||
* interrupt away, which flushes the xive, followed by the
|
||||
* target CPU doing a H_CPPR. So any new interrupt coming into
|
||||
* the queue must still be routed to us and isn't a source
|
||||
* of concern.
|
||||
*/
|
||||
GLUE(X_PFX,scan_for_rerouted_irqs)(xive, xc);
|
||||
}
|
||||
|
||||
/* Apply new CPPR */
|
||||
xc->hw_cppr = cppr;
|
||||
|
@ -578,7 +578,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
|
||||
|
||||
gpa = READ_ONCE(scb_o->itdba) & ~0xffUL;
|
||||
if (gpa && (scb_s->ecb & ECB_TE)) {
|
||||
if (!(gpa & ~0x1fffU)) {
|
||||
if (!(gpa & ~0x1fffUL)) {
|
||||
rc = set_validity_icpt(scb_s, 0x0080U);
|
||||
goto unpin;
|
||||
}
|
||||
|
@ -942,12 +942,8 @@ static const __initconst struct x86_cpu_id cpu_no_meltdown[] = {
|
||||
{}
|
||||
};
|
||||
|
||||
/* Only list CPUs which speculate but are non susceptible to SSB */
|
||||
static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = {
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PINEVIEW },
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_LINCROFT },
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PENWELL },
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CLOVERVIEW },
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CEDARVIEW },
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 },
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT },
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT2 },
|
||||
@ -955,14 +951,10 @@ static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = {
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_CORE_YONAH },
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL },
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM },
|
||||
{ X86_VENDOR_CENTAUR, 5, },
|
||||
{ X86_VENDOR_INTEL, 5, },
|
||||
{ X86_VENDOR_NSC, 5, },
|
||||
{ X86_VENDOR_AMD, 0x12, },
|
||||
{ X86_VENDOR_AMD, 0x11, },
|
||||
{ X86_VENDOR_AMD, 0x10, },
|
||||
{ X86_VENDOR_AMD, 0xf, },
|
||||
{ X86_VENDOR_ANY, 4, },
|
||||
{}
|
||||
};
|
||||
|
||||
@ -970,6 +962,12 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
|
||||
{
|
||||
u64 ia32_cap = 0;
|
||||
|
||||
if (x86_match_cpu(cpu_no_speculation))
|
||||
return;
|
||||
|
||||
setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
|
||||
setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
|
||||
|
||||
if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES))
|
||||
rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
|
||||
|
||||
@ -977,12 +975,6 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
|
||||
!(ia32_cap & ARCH_CAP_SSB_NO))
|
||||
setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
|
||||
|
||||
if (x86_match_cpu(cpu_no_speculation))
|
||||
return;
|
||||
|
||||
setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
|
||||
setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
|
||||
|
||||
if (x86_match_cpu(cpu_no_meltdown))
|
||||
return;
|
||||
|
||||
|
@ -407,8 +407,8 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
|
||||
|
||||
/* cpuid 7.0.edx*/
|
||||
const u32 kvm_cpuid_7_0_edx_x86_features =
|
||||
F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) | F(SSBD) |
|
||||
F(ARCH_CAPABILITIES);
|
||||
F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) |
|
||||
F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES);
|
||||
|
||||
/* all calls to cpuid_count() should be made on the same cpu */
|
||||
get_cpu();
|
||||
@ -495,6 +495,11 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
|
||||
entry->ecx &= ~F(PKU);
|
||||
entry->edx &= kvm_cpuid_7_0_edx_x86_features;
|
||||
cpuid_mask(&entry->edx, CPUID_7_EDX);
|
||||
/*
|
||||
* We emulate ARCH_CAPABILITIES in software even
|
||||
* if the host doesn't support it.
|
||||
*/
|
||||
entry->edx |= F(ARCH_CAPABILITIES);
|
||||
} else {
|
||||
entry->ebx = 0;
|
||||
entry->ecx = 0;
|
||||
|
@ -1260,12 +1260,16 @@ static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
|
||||
}
|
||||
}
|
||||
|
||||
static int kvm_hv_hypercall_complete(struct kvm_vcpu *vcpu, u64 result)
|
||||
{
|
||||
kvm_hv_hypercall_set_result(vcpu, result);
|
||||
++vcpu->stat.hypercalls;
|
||||
return kvm_skip_emulated_instruction(vcpu);
|
||||
}
|
||||
|
||||
static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_run *run = vcpu->run;
|
||||
|
||||
kvm_hv_hypercall_set_result(vcpu, run->hyperv.u.hcall.result);
|
||||
return kvm_skip_emulated_instruction(vcpu);
|
||||
return kvm_hv_hypercall_complete(vcpu, vcpu->run->hyperv.u.hcall.result);
|
||||
}
|
||||
|
||||
static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, bool fast, u64 param)
|
||||
@ -1350,7 +1354,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
|
||||
/* Hypercall continuation is not supported yet */
|
||||
if (rep_cnt || rep_idx) {
|
||||
ret = HV_STATUS_INVALID_HYPERCALL_CODE;
|
||||
goto set_result;
|
||||
goto out;
|
||||
}
|
||||
|
||||
switch (code) {
|
||||
@ -1381,9 +1385,8 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
|
||||
break;
|
||||
}
|
||||
|
||||
set_result:
|
||||
kvm_hv_hypercall_set_result(vcpu, ret);
|
||||
return 1;
|
||||
out:
|
||||
return kvm_hv_hypercall_complete(vcpu, ret);
|
||||
}
|
||||
|
||||
void kvm_hv_init_vm(struct kvm *kvm)
|
||||
|
@ -1522,11 +1522,23 @@ static bool set_target_expiration(struct kvm_lapic *apic)
|
||||
|
||||
static void advance_periodic_target_expiration(struct kvm_lapic *apic)
|
||||
{
|
||||
apic->lapic_timer.tscdeadline +=
|
||||
nsec_to_cycles(apic->vcpu, apic->lapic_timer.period);
|
||||
ktime_t now = ktime_get();
|
||||
u64 tscl = rdtsc();
|
||||
ktime_t delta;
|
||||
|
||||
/*
|
||||
* Synchronize both deadlines to the same time source or
|
||||
* differences in the periods (caused by differences in the
|
||||
* underlying clocks or numerical approximation errors) will
|
||||
* cause the two to drift apart over time as the errors
|
||||
* accumulate.
|
||||
*/
|
||||
apic->lapic_timer.target_expiration =
|
||||
ktime_add_ns(apic->lapic_timer.target_expiration,
|
||||
apic->lapic_timer.period);
|
||||
delta = ktime_sub(apic->lapic_timer.target_expiration, now);
|
||||
apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) +
|
||||
nsec_to_cycles(apic->vcpu, delta);
|
||||
}
|
||||
|
||||
static void start_sw_period(struct kvm_lapic *apic)
|
||||
|
@ -6671,11 +6671,8 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
|
||||
unsigned long nr, a0, a1, a2, a3, ret;
|
||||
int op_64_bit;
|
||||
|
||||
if (kvm_hv_hypercall_enabled(vcpu->kvm)) {
|
||||
if (!kvm_hv_hypercall(vcpu))
|
||||
return 0;
|
||||
goto out;
|
||||
}
|
||||
if (kvm_hv_hypercall_enabled(vcpu->kvm))
|
||||
return kvm_hv_hypercall(vcpu);
|
||||
|
||||
nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
|
||||
a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
|
||||
@ -6696,7 +6693,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
|
||||
|
||||
if (kvm_x86_ops->get_cpl(vcpu) != 0) {
|
||||
ret = -KVM_EPERM;
|
||||
goto out_error;
|
||||
goto out;
|
||||
}
|
||||
|
||||
switch (nr) {
|
||||
@ -6716,12 +6713,11 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
|
||||
ret = -KVM_ENOSYS;
|
||||
break;
|
||||
}
|
||||
out_error:
|
||||
out:
|
||||
if (!op_64_bit)
|
||||
ret = (u32)ret;
|
||||
kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
|
||||
|
||||
out:
|
||||
++vcpu->stat.hypercalls;
|
||||
return kvm_skip_emulated_instruction(vcpu);
|
||||
}
|
||||
@ -7980,6 +7976,7 @@ static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
|
||||
{
|
||||
struct msr_data apic_base_msr;
|
||||
int mmu_reset_needed = 0;
|
||||
int cpuid_update_needed = 0;
|
||||
int pending_vec, max_bits, idx;
|
||||
struct desc_ptr dt;
|
||||
int ret = -EINVAL;
|
||||
@ -8018,8 +8015,10 @@ static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
|
||||
vcpu->arch.cr0 = sregs->cr0;
|
||||
|
||||
mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
|
||||
cpuid_update_needed |= ((kvm_read_cr4(vcpu) ^ sregs->cr4) &
|
||||
(X86_CR4_OSXSAVE | X86_CR4_PKE));
|
||||
kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
|
||||
if (sregs->cr4 & (X86_CR4_OSXSAVE | X86_CR4_PKE))
|
||||
if (cpuid_update_needed)
|
||||
kvm_update_cpuid(vcpu);
|
||||
|
||||
idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||
|
@ -490,7 +490,8 @@ int unregister_mem_sect_under_nodes(struct memory_block *mem_blk,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int link_mem_sections(int nid, unsigned long start_pfn, unsigned long nr_pages)
|
||||
int link_mem_sections(int nid, unsigned long start_pfn, unsigned long nr_pages,
|
||||
bool check_nid)
|
||||
{
|
||||
unsigned long end_pfn = start_pfn + nr_pages;
|
||||
unsigned long pfn;
|
||||
@ -514,7 +515,7 @@ int link_mem_sections(int nid, unsigned long start_pfn, unsigned long nr_pages)
|
||||
|
||||
mem_blk = find_memory_block_hinted(mem_sect, mem_blk);
|
||||
|
||||
ret = register_mem_sect_under_node(mem_blk, nid, true);
|
||||
ret = register_mem_sect_under_node(mem_blk, nid, check_nid);
|
||||
if (!err)
|
||||
err = ret;
|
||||
|
||||
|
@ -184,7 +184,7 @@ static void bcma_core_mips_print_irq(struct bcma_device *dev, unsigned int irq)
|
||||
{
|
||||
int i;
|
||||
static const char *irq_name[] = {"2(S)", "3", "4", "5", "6", "D", "I"};
|
||||
char interrupts[20];
|
||||
char interrupts[25];
|
||||
char *ints = interrupts;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(irq_name); i++)
|
||||
|
@ -147,7 +147,7 @@ static u32 smc(u32 cmd_addr)
|
||||
"smc #0 @ switch to secure world\n"
|
||||
: "=r" (r0)
|
||||
: "r" (r0), "r" (r1), "r" (r2)
|
||||
: "r3");
|
||||
: "r3", "r12");
|
||||
} while (r0 == QCOM_SCM_INTERRUPTED);
|
||||
|
||||
return r0;
|
||||
@ -263,7 +263,7 @@ static s32 qcom_scm_call_atomic1(u32 svc, u32 cmd, u32 arg1)
|
||||
"smc #0 @ switch to secure world\n"
|
||||
: "=r" (r0)
|
||||
: "r" (r0), "r" (r1), "r" (r2)
|
||||
: "r3");
|
||||
: "r3", "r12");
|
||||
return r0;
|
||||
}
|
||||
|
||||
@ -298,7 +298,7 @@ static s32 qcom_scm_call_atomic2(u32 svc, u32 cmd, u32 arg1, u32 arg2)
|
||||
"smc #0 @ switch to secure world\n"
|
||||
: "=r" (r0)
|
||||
: "r" (r0), "r" (r1), "r" (r2), "r" (r3)
|
||||
);
|
||||
: "r12");
|
||||
return r0;
|
||||
}
|
||||
|
||||
@ -328,7 +328,7 @@ u32 qcom_scm_get_version(void)
|
||||
"smc #0 @ switch to secure world\n"
|
||||
: "=r" (r0), "=r" (r1)
|
||||
: "r" (r0), "r" (r1)
|
||||
: "r2", "r3");
|
||||
: "r2", "r3", "r12");
|
||||
} while (r0 == QCOM_SCM_INTERRUPTED);
|
||||
|
||||
version = r1;
|
||||
|
@ -88,6 +88,9 @@ static int rcar_lvds_connector_atomic_check(struct drm_connector *connector,
|
||||
const struct drm_display_mode *panel_mode;
|
||||
struct drm_crtc_state *crtc_state;
|
||||
|
||||
if (!state->crtc)
|
||||
return 0;
|
||||
|
||||
if (list_empty(&connector->modes)) {
|
||||
dev_dbg(lvds->dev, "connector: empty modes list\n");
|
||||
return -EINVAL;
|
||||
|
@ -1278,8 +1278,6 @@ static void vmw_master_drop(struct drm_device *dev,
|
||||
dev_priv->active_master = &dev_priv->fbdev_master;
|
||||
ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
|
||||
ttm_vt_unlock(&dev_priv->fbdev_master.lock);
|
||||
|
||||
vmw_fb_refresh(dev_priv);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1483,7 +1481,6 @@ static int vmw_pm_freeze(struct device *kdev)
|
||||
vmw_kms_resume(dev);
|
||||
if (dev_priv->enable_fb)
|
||||
vmw_fb_on(dev_priv);
|
||||
vmw_fb_refresh(dev_priv);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
@ -1523,8 +1520,6 @@ static int vmw_pm_restore(struct device *kdev)
|
||||
if (dev_priv->enable_fb)
|
||||
vmw_fb_on(dev_priv);
|
||||
|
||||
vmw_fb_refresh(dev_priv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -910,7 +910,6 @@ int vmw_fb_init(struct vmw_private *vmw_priv);
|
||||
int vmw_fb_close(struct vmw_private *dev_priv);
|
||||
int vmw_fb_off(struct vmw_private *vmw_priv);
|
||||
int vmw_fb_on(struct vmw_private *vmw_priv);
|
||||
void vmw_fb_refresh(struct vmw_private *vmw_priv);
|
||||
|
||||
/**
|
||||
* Kernel modesetting - vmwgfx_kms.c
|
||||
|
@ -866,21 +866,13 @@ int vmw_fb_on(struct vmw_private *vmw_priv)
|
||||
spin_lock_irqsave(&par->dirty.lock, flags);
|
||||
par->dirty.active = true;
|
||||
spin_unlock_irqrestore(&par->dirty.lock, flags);
|
||||
|
||||
|
||||
/*
|
||||
* Need to reschedule a dirty update, because otherwise that's
|
||||
* only done in dirty_mark() if the previous coalesced
|
||||
* dirty region was empty.
|
||||
*/
|
||||
schedule_delayed_work(&par->local_work, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_fb_refresh - Refresh fb display
|
||||
*
|
||||
* @vmw_priv: Pointer to device private
|
||||
*
|
||||
* Call into kms to show the fbdev display(s).
|
||||
*/
|
||||
void vmw_fb_refresh(struct vmw_private *vmw_priv)
|
||||
{
|
||||
if (!vmw_priv->fb_info)
|
||||
return;
|
||||
|
||||
vmw_fb_set_par(vmw_priv->fb_info);
|
||||
}
|
||||
|
@ -329,8 +329,6 @@ int vmw_host_get_guestinfo(const char *guest_info_param,
|
||||
struct rpc_channel channel;
|
||||
char *msg, *reply = NULL;
|
||||
size_t reply_len = 0;
|
||||
int ret = 0;
|
||||
|
||||
|
||||
if (!vmw_msg_enabled)
|
||||
return -ENODEV;
|
||||
@ -344,15 +342,14 @@ int vmw_host_get_guestinfo(const char *guest_info_param,
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM) ||
|
||||
vmw_send_msg(&channel, msg) ||
|
||||
vmw_recv_msg(&channel, (void *) &reply, &reply_len) ||
|
||||
vmw_close_channel(&channel)) {
|
||||
DRM_ERROR("Failed to get %s", guest_info_param);
|
||||
if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM))
|
||||
goto out_open;
|
||||
|
||||
ret = -EINVAL;
|
||||
}
|
||||
if (vmw_send_msg(&channel, msg) ||
|
||||
vmw_recv_msg(&channel, (void *) &reply, &reply_len))
|
||||
goto out_msg;
|
||||
|
||||
vmw_close_channel(&channel);
|
||||
if (buffer && reply && reply_len > 0) {
|
||||
/* Remove reply code, which are the first 2 characters of
|
||||
* the reply
|
||||
@ -369,7 +366,17 @@ int vmw_host_get_guestinfo(const char *guest_info_param,
|
||||
kfree(reply);
|
||||
kfree(msg);
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
|
||||
out_msg:
|
||||
vmw_close_channel(&channel);
|
||||
kfree(reply);
|
||||
out_open:
|
||||
*length = 0;
|
||||
kfree(msg);
|
||||
DRM_ERROR("Failed to get %s", guest_info_param);
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
||||
@ -400,15 +407,22 @@ int vmw_host_log(const char *log)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM) ||
|
||||
vmw_send_msg(&channel, msg) ||
|
||||
vmw_close_channel(&channel)) {
|
||||
DRM_ERROR("Failed to send log\n");
|
||||
if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM))
|
||||
goto out_open;
|
||||
|
||||
ret = -EINVAL;
|
||||
}
|
||||
if (vmw_send_msg(&channel, msg))
|
||||
goto out_msg;
|
||||
|
||||
vmw_close_channel(&channel);
|
||||
kfree(msg);
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
|
||||
out_msg:
|
||||
vmw_close_channel(&channel);
|
||||
out_open:
|
||||
kfree(msg);
|
||||
DRM_ERROR("Failed to send log\n");
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -135,17 +135,24 @@
|
||||
|
||||
#else
|
||||
|
||||
/* In the 32-bit version of this macro, we use "m" because there is no
|
||||
* more register left for bp
|
||||
/*
|
||||
* In the 32-bit version of this macro, we store bp in a memory location
|
||||
* because we've ran out of registers.
|
||||
* Now we can't reference that memory location while we've modified
|
||||
* %esp or %ebp, so we first push it on the stack, just before we push
|
||||
* %ebp, and then when we need it we read it from the stack where we
|
||||
* just pushed it.
|
||||
*/
|
||||
#define VMW_PORT_HB_OUT(cmd, in_ecx, in_si, in_di, \
|
||||
port_num, magic, bp, \
|
||||
eax, ebx, ecx, edx, si, di) \
|
||||
({ \
|
||||
asm volatile ("push %%ebp;" \
|
||||
"mov %12, %%ebp;" \
|
||||
asm volatile ("push %12;" \
|
||||
"push %%ebp;" \
|
||||
"mov 0x04(%%esp), %%ebp;" \
|
||||
"rep outsb;" \
|
||||
"pop %%ebp;" : \
|
||||
"pop %%ebp;" \
|
||||
"add $0x04, %%esp;" : \
|
||||
"=a"(eax), \
|
||||
"=b"(ebx), \
|
||||
"=c"(ecx), \
|
||||
@ -167,10 +174,12 @@
|
||||
port_num, magic, bp, \
|
||||
eax, ebx, ecx, edx, si, di) \
|
||||
({ \
|
||||
asm volatile ("push %%ebp;" \
|
||||
"mov %12, %%ebp;" \
|
||||
asm volatile ("push %12;" \
|
||||
"push %%ebp;" \
|
||||
"mov 0x04(%%esp), %%ebp;" \
|
||||
"rep insb;" \
|
||||
"pop %%ebp" : \
|
||||
"pop %%ebp;" \
|
||||
"add $0x04, %%esp;" : \
|
||||
"=a"(eax), \
|
||||
"=b"(ebx), \
|
||||
"=c"(ecx), \
|
||||
|
@ -388,10 +388,10 @@ void divasa_xdi_driver_unload(void)
|
||||
** Receive and process command from user mode utility
|
||||
*/
|
||||
void *diva_xdi_open_adapter(void *os_handle, const void __user *src,
|
||||
int length,
|
||||
int length, void *mptr,
|
||||
divas_xdi_copy_from_user_fn_t cp_fn)
|
||||
{
|
||||
diva_xdi_um_cfg_cmd_t msg;
|
||||
diva_xdi_um_cfg_cmd_t *msg = (diva_xdi_um_cfg_cmd_t *)mptr;
|
||||
diva_os_xdi_adapter_t *a = NULL;
|
||||
diva_os_spin_lock_magic_t old_irql;
|
||||
struct list_head *tmp;
|
||||
@ -401,21 +401,21 @@ void *diva_xdi_open_adapter(void *os_handle, const void __user *src,
|
||||
length, sizeof(diva_xdi_um_cfg_cmd_t)))
|
||||
return NULL;
|
||||
}
|
||||
if ((*cp_fn) (os_handle, &msg, src, sizeof(msg)) <= 0) {
|
||||
if ((*cp_fn) (os_handle, msg, src, sizeof(*msg)) <= 0) {
|
||||
DBG_ERR(("A: A(?) open, write error"))
|
||||
return NULL;
|
||||
}
|
||||
diva_os_enter_spin_lock(&adapter_lock, &old_irql, "open_adapter");
|
||||
list_for_each(tmp, &adapter_queue) {
|
||||
a = list_entry(tmp, diva_os_xdi_adapter_t, link);
|
||||
if (a->controller == (int)msg.adapter)
|
||||
if (a->controller == (int)msg->adapter)
|
||||
break;
|
||||
a = NULL;
|
||||
}
|
||||
diva_os_leave_spin_lock(&adapter_lock, &old_irql, "open_adapter");
|
||||
|
||||
if (!a) {
|
||||
DBG_ERR(("A: A(%d) open, adapter not found", msg.adapter))
|
||||
DBG_ERR(("A: A(%d) open, adapter not found", msg->adapter))
|
||||
}
|
||||
|
||||
return (a);
|
||||
@ -437,8 +437,10 @@ void diva_xdi_close_adapter(void *adapter, void *os_handle)
|
||||
|
||||
int
|
||||
diva_xdi_write(void *adapter, void *os_handle, const void __user *src,
|
||||
int length, divas_xdi_copy_from_user_fn_t cp_fn)
|
||||
int length, void *mptr,
|
||||
divas_xdi_copy_from_user_fn_t cp_fn)
|
||||
{
|
||||
diva_xdi_um_cfg_cmd_t *msg = (diva_xdi_um_cfg_cmd_t *)mptr;
|
||||
diva_os_xdi_adapter_t *a = (diva_os_xdi_adapter_t *) adapter;
|
||||
void *data;
|
||||
|
||||
@ -459,7 +461,13 @@ diva_xdi_write(void *adapter, void *os_handle, const void __user *src,
|
||||
return (-2);
|
||||
}
|
||||
|
||||
length = (*cp_fn) (os_handle, data, src, length);
|
||||
if (msg) {
|
||||
*(diva_xdi_um_cfg_cmd_t *)data = *msg;
|
||||
length = (*cp_fn) (os_handle, (char *)data + sizeof(*msg),
|
||||
src + sizeof(*msg), length - sizeof(*msg));
|
||||
} else {
|
||||
length = (*cp_fn) (os_handle, data, src, length);
|
||||
}
|
||||
if (length > 0) {
|
||||
if ((*(a->interface.cmd_proc))
|
||||
(a, (diva_xdi_um_cfg_cmd_t *) data, length)) {
|
||||
|
@ -20,10 +20,11 @@ int diva_xdi_read(void *adapter, void *os_handle, void __user *dst,
|
||||
int max_length, divas_xdi_copy_to_user_fn_t cp_fn);
|
||||
|
||||
int diva_xdi_write(void *adapter, void *os_handle, const void __user *src,
|
||||
int length, divas_xdi_copy_from_user_fn_t cp_fn);
|
||||
int length, void *msg,
|
||||
divas_xdi_copy_from_user_fn_t cp_fn);
|
||||
|
||||
void *diva_xdi_open_adapter(void *os_handle, const void __user *src,
|
||||
int length,
|
||||
int length, void *msg,
|
||||
divas_xdi_copy_from_user_fn_t cp_fn);
|
||||
|
||||
void diva_xdi_close_adapter(void *adapter, void *os_handle);
|
||||
|
@ -591,19 +591,22 @@ static int divas_release(struct inode *inode, struct file *file)
|
||||
static ssize_t divas_write(struct file *file, const char __user *buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
diva_xdi_um_cfg_cmd_t msg;
|
||||
int ret = -EINVAL;
|
||||
|
||||
if (!file->private_data) {
|
||||
file->private_data = diva_xdi_open_adapter(file, buf,
|
||||
count,
|
||||
count, &msg,
|
||||
xdi_copy_from_user);
|
||||
}
|
||||
if (!file->private_data) {
|
||||
return (-ENODEV);
|
||||
if (!file->private_data)
|
||||
return (-ENODEV);
|
||||
ret = diva_xdi_write(file->private_data, file,
|
||||
buf, count, &msg, xdi_copy_from_user);
|
||||
} else {
|
||||
ret = diva_xdi_write(file->private_data, file,
|
||||
buf, count, NULL, xdi_copy_from_user);
|
||||
}
|
||||
|
||||
ret = diva_xdi_write(file->private_data, file,
|
||||
buf, count, xdi_copy_from_user);
|
||||
switch (ret) {
|
||||
case -1: /* Message should be removed from rx mailbox first */
|
||||
ret = -EBUSY;
|
||||
@ -622,11 +625,12 @@ static ssize_t divas_write(struct file *file, const char __user *buf,
|
||||
static ssize_t divas_read(struct file *file, char __user *buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
diva_xdi_um_cfg_cmd_t msg;
|
||||
int ret = -EINVAL;
|
||||
|
||||
if (!file->private_data) {
|
||||
file->private_data = diva_xdi_open_adapter(file, buf,
|
||||
count,
|
||||
count, &msg,
|
||||
xdi_copy_from_user);
|
||||
}
|
||||
if (!file->private_data) {
|
||||
|
@ -2485,7 +2485,7 @@ static long mmc_rpmb_ioctl(struct file *filp, unsigned int cmd,
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
|
@ -33,6 +33,8 @@ struct sdhci_iproc_host {
|
||||
const struct sdhci_iproc_data *data;
|
||||
u32 shadow_cmd;
|
||||
u32 shadow_blk;
|
||||
bool is_cmd_shadowed;
|
||||
bool is_blk_shadowed;
|
||||
};
|
||||
|
||||
#define REG_OFFSET_IN_BITS(reg) ((reg) << 3 & 0x18)
|
||||
@ -48,8 +50,22 @@ static inline u32 sdhci_iproc_readl(struct sdhci_host *host, int reg)
|
||||
|
||||
static u16 sdhci_iproc_readw(struct sdhci_host *host, int reg)
|
||||
{
|
||||
u32 val = sdhci_iproc_readl(host, (reg & ~3));
|
||||
u16 word = val >> REG_OFFSET_IN_BITS(reg) & 0xffff;
|
||||
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
||||
struct sdhci_iproc_host *iproc_host = sdhci_pltfm_priv(pltfm_host);
|
||||
u32 val;
|
||||
u16 word;
|
||||
|
||||
if ((reg == SDHCI_TRANSFER_MODE) && iproc_host->is_cmd_shadowed) {
|
||||
/* Get the saved transfer mode */
|
||||
val = iproc_host->shadow_cmd;
|
||||
} else if ((reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) &&
|
||||
iproc_host->is_blk_shadowed) {
|
||||
/* Get the saved block info */
|
||||
val = iproc_host->shadow_blk;
|
||||
} else {
|
||||
val = sdhci_iproc_readl(host, (reg & ~3));
|
||||
}
|
||||
word = val >> REG_OFFSET_IN_BITS(reg) & 0xffff;
|
||||
return word;
|
||||
}
|
||||
|
||||
@ -105,13 +121,15 @@ static void sdhci_iproc_writew(struct sdhci_host *host, u16 val, int reg)
|
||||
|
||||
if (reg == SDHCI_COMMAND) {
|
||||
/* Write the block now as we are issuing a command */
|
||||
if (iproc_host->shadow_blk != 0) {
|
||||
if (iproc_host->is_blk_shadowed) {
|
||||
sdhci_iproc_writel(host, iproc_host->shadow_blk,
|
||||
SDHCI_BLOCK_SIZE);
|
||||
iproc_host->shadow_blk = 0;
|
||||
iproc_host->is_blk_shadowed = false;
|
||||
}
|
||||
oldval = iproc_host->shadow_cmd;
|
||||
} else if (reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) {
|
||||
iproc_host->is_cmd_shadowed = false;
|
||||
} else if ((reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) &&
|
||||
iproc_host->is_blk_shadowed) {
|
||||
/* Block size and count are stored in shadow reg */
|
||||
oldval = iproc_host->shadow_blk;
|
||||
} else {
|
||||
@ -123,9 +141,11 @@ static void sdhci_iproc_writew(struct sdhci_host *host, u16 val, int reg)
|
||||
if (reg == SDHCI_TRANSFER_MODE) {
|
||||
/* Save the transfer mode until the command is issued */
|
||||
iproc_host->shadow_cmd = newval;
|
||||
iproc_host->is_cmd_shadowed = true;
|
||||
} else if (reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) {
|
||||
/* Save the block info until the command is issued */
|
||||
iproc_host->shadow_blk = newval;
|
||||
iproc_host->is_blk_shadowed = true;
|
||||
} else {
|
||||
/* Command or other regular 32-bit write */
|
||||
sdhci_iproc_writel(host, newval, reg & ~3);
|
||||
@ -166,7 +186,7 @@ static const struct sdhci_ops sdhci_iproc_32only_ops = {
|
||||
|
||||
static const struct sdhci_pltfm_data sdhci_iproc_cygnus_pltfm_data = {
|
||||
.quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK,
|
||||
.quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN,
|
||||
.quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN | SDHCI_QUIRK2_HOST_OFF_CARD_ON,
|
||||
.ops = &sdhci_iproc_32only_ops,
|
||||
};
|
||||
|
||||
@ -206,7 +226,6 @@ static const struct sdhci_iproc_data iproc_data = {
|
||||
.caps1 = SDHCI_DRIVER_TYPE_C |
|
||||
SDHCI_DRIVER_TYPE_D |
|
||||
SDHCI_SUPPORT_DDR50,
|
||||
.mmc_caps = MMC_CAP_1_8V_DDR,
|
||||
};
|
||||
|
||||
static const struct sdhci_pltfm_data sdhci_bcm2835_pltfm_data = {
|
||||
|
@ -1552,22 +1552,26 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
if (!ioaddr) {
|
||||
if (pcnet32_debug & NETIF_MSG_PROBE)
|
||||
pr_err("card has no PCI IO resources, aborting\n");
|
||||
return -ENODEV;
|
||||
err = -ENODEV;
|
||||
goto err_disable_dev;
|
||||
}
|
||||
|
||||
err = pci_set_dma_mask(pdev, PCNET32_DMA_MASK);
|
||||
if (err) {
|
||||
if (pcnet32_debug & NETIF_MSG_PROBE)
|
||||
pr_err("architecture does not support 32bit PCI busmaster DMA\n");
|
||||
return err;
|
||||
goto err_disable_dev;
|
||||
}
|
||||
if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) {
|
||||
if (pcnet32_debug & NETIF_MSG_PROBE)
|
||||
pr_err("io address range already allocated\n");
|
||||
return -EBUSY;
|
||||
err = -EBUSY;
|
||||
goto err_disable_dev;
|
||||
}
|
||||
|
||||
err = pcnet32_probe1(ioaddr, 1, pdev);
|
||||
|
||||
err_disable_dev:
|
||||
if (err < 0)
|
||||
pci_disable_device(pdev);
|
||||
|
||||
|
@ -2747,11 +2747,11 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
pci_set_master(pdev);
|
||||
|
||||
/* Query PCI controller on system for DMA addressing
|
||||
* limitation for the device. Try 64-bit first, and
|
||||
* limitation for the device. Try 47-bit first, and
|
||||
* fail to 32-bit.
|
||||
*/
|
||||
|
||||
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
|
||||
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(47));
|
||||
if (err) {
|
||||
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
|
||||
if (err) {
|
||||
@ -2765,10 +2765,10 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
goto err_out_release_regions;
|
||||
}
|
||||
} else {
|
||||
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
|
||||
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(47));
|
||||
if (err) {
|
||||
dev_err(dev, "Unable to obtain %u-bit DMA "
|
||||
"for consistent allocations, aborting\n", 64);
|
||||
"for consistent allocations, aborting\n", 47);
|
||||
goto err_out_release_regions;
|
||||
}
|
||||
using_dac = 1;
|
||||
|
@ -1,3 +1,4 @@
|
||||
// SPDX-License-Identifier: GPL-2.0+
|
||||
/*
|
||||
* Fast Ethernet Controller (FEC) driver for Motorola MPC8xx.
|
||||
* Copyright (c) 1997 Dan Malek (dmalek@jlc.net)
|
||||
|
@ -1,20 +1,8 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Fast Ethernet Controller (ENET) PTP driver for MX6x.
|
||||
*
|
||||
* Copyright (C) 2012 Freescale Semiconductor, Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* this program; if not, write to the Free Software Foundation, Inc.,
|
||||
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
@ -796,9 +796,11 @@ static int ibmvnic_login(struct net_device *netdev)
|
||||
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
|
||||
unsigned long timeout = msecs_to_jiffies(30000);
|
||||
int retry_count = 0;
|
||||
bool retry;
|
||||
int rc;
|
||||
|
||||
do {
|
||||
retry = false;
|
||||
if (retry_count > IBMVNIC_MAX_QUEUES) {
|
||||
netdev_warn(netdev, "Login attempts exceeded\n");
|
||||
return -1;
|
||||
@ -822,6 +824,9 @@ static int ibmvnic_login(struct net_device *netdev)
|
||||
retry_count++;
|
||||
release_sub_crqs(adapter, 1);
|
||||
|
||||
retry = true;
|
||||
netdev_dbg(netdev,
|
||||
"Received partial success, retrying...\n");
|
||||
adapter->init_done_rc = 0;
|
||||
reinit_completion(&adapter->init_done);
|
||||
send_cap_queries(adapter);
|
||||
@ -849,7 +854,7 @@ static int ibmvnic_login(struct net_device *netdev)
|
||||
netdev_warn(netdev, "Adapter login failed\n");
|
||||
return -1;
|
||||
}
|
||||
} while (adapter->init_done_rc == PARTIALSUCCESS);
|
||||
} while (retry);
|
||||
|
||||
/* handle pending MAC address changes after successful login */
|
||||
if (adapter->mac_change_pending) {
|
||||
@ -2617,18 +2622,21 @@ static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
|
||||
{
|
||||
struct device *dev = &adapter->vdev->dev;
|
||||
unsigned long rc;
|
||||
u64 val;
|
||||
|
||||
if (scrq->hw_irq > 0x100000000ULL) {
|
||||
dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
|
||||
return 1;
|
||||
}
|
||||
|
||||
val = (0xff000000) | scrq->hw_irq;
|
||||
rc = plpar_hcall_norets(H_EOI, val);
|
||||
if (rc)
|
||||
dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
|
||||
val, rc);
|
||||
if (adapter->resetting &&
|
||||
adapter->reset_reason == VNIC_RESET_MOBILITY) {
|
||||
u64 val = (0xff000000) | scrq->hw_irq;
|
||||
|
||||
rc = plpar_hcall_norets(H_EOI, val);
|
||||
if (rc)
|
||||
dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
|
||||
val, rc);
|
||||
}
|
||||
|
||||
rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
|
||||
H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
|
||||
|
@ -43,12 +43,12 @@
|
||||
#include "fw.h"
|
||||
|
||||
/*
|
||||
* We allocate in as big chunks as we can, up to a maximum of 256 KB
|
||||
* per chunk.
|
||||
* We allocate in page size (default 4KB on many archs) chunks to avoid high
|
||||
* order memory allocations in fragmented/high usage memory situation.
|
||||
*/
|
||||
enum {
|
||||
MLX4_ICM_ALLOC_SIZE = 1 << 18,
|
||||
MLX4_TABLE_CHUNK_SIZE = 1 << 18
|
||||
MLX4_ICM_ALLOC_SIZE = PAGE_SIZE,
|
||||
MLX4_TABLE_CHUNK_SIZE = PAGE_SIZE,
|
||||
};
|
||||
|
||||
static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk)
|
||||
@ -398,9 +398,11 @@ int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
|
||||
u64 size;
|
||||
|
||||
obj_per_chunk = MLX4_TABLE_CHUNK_SIZE / obj_size;
|
||||
if (WARN_ON(!obj_per_chunk))
|
||||
return -EINVAL;
|
||||
num_icm = (nobj + obj_per_chunk - 1) / obj_per_chunk;
|
||||
|
||||
table->icm = kcalloc(num_icm, sizeof(*table->icm), GFP_KERNEL);
|
||||
table->icm = kvzalloc(num_icm * sizeof(*table->icm), GFP_KERNEL);
|
||||
if (!table->icm)
|
||||
return -ENOMEM;
|
||||
table->virt = virt;
|
||||
@ -446,7 +448,7 @@ err:
|
||||
mlx4_free_icm(dev, table->icm[i], use_coherent);
|
||||
}
|
||||
|
||||
kfree(table->icm);
|
||||
kvfree(table->icm);
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
||||
@ -462,5 +464,5 @@ void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table)
|
||||
mlx4_free_icm(dev, table->icm[i], table->coherent);
|
||||
}
|
||||
|
||||
kfree(table->icm);
|
||||
kvfree(table->icm);
|
||||
}
|
||||
|
@ -172,7 +172,7 @@ int mlx4_do_bond(struct mlx4_dev *dev, bool enable)
|
||||
list_add_tail(&dev_ctx->list, &priv->ctx_list);
|
||||
spin_unlock_irqrestore(&priv->ctx_lock, flags);
|
||||
|
||||
mlx4_dbg(dev, "Inrerface for protocol %d restarted with when bonded mode is %s\n",
|
||||
mlx4_dbg(dev, "Interface for protocol %d restarted with bonded mode %s\n",
|
||||
dev_ctx->intf->protocol, enable ?
|
||||
"enabled" : "disabled");
|
||||
}
|
||||
|
@ -393,11 +393,11 @@ struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn)
|
||||
struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
|
||||
struct mlx4_qp *qp;
|
||||
|
||||
spin_lock(&qp_table->lock);
|
||||
spin_lock_irq(&qp_table->lock);
|
||||
|
||||
qp = __mlx4_qp_lookup(dev, qpn);
|
||||
|
||||
spin_unlock(&qp_table->lock);
|
||||
spin_unlock_irq(&qp_table->lock);
|
||||
return qp;
|
||||
}
|
||||
|
||||
|
@ -615,6 +615,45 @@ static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth)
|
||||
return (ethertype == htons(ETH_P_IP) || ethertype == htons(ETH_P_IPV6));
|
||||
}
|
||||
|
||||
static __be32 mlx5e_get_fcs(struct sk_buff *skb)
|
||||
{
|
||||
int last_frag_sz, bytes_in_prev, nr_frags;
|
||||
u8 *fcs_p1, *fcs_p2;
|
||||
skb_frag_t *last_frag;
|
||||
__be32 fcs_bytes;
|
||||
|
||||
if (!skb_is_nonlinear(skb))
|
||||
return *(__be32 *)(skb->data + skb->len - ETH_FCS_LEN);
|
||||
|
||||
nr_frags = skb_shinfo(skb)->nr_frags;
|
||||
last_frag = &skb_shinfo(skb)->frags[nr_frags - 1];
|
||||
last_frag_sz = skb_frag_size(last_frag);
|
||||
|
||||
/* If all FCS data is in last frag */
|
||||
if (last_frag_sz >= ETH_FCS_LEN)
|
||||
return *(__be32 *)(skb_frag_address(last_frag) +
|
||||
last_frag_sz - ETH_FCS_LEN);
|
||||
|
||||
fcs_p2 = (u8 *)skb_frag_address(last_frag);
|
||||
bytes_in_prev = ETH_FCS_LEN - last_frag_sz;
|
||||
|
||||
/* Find where the other part of the FCS is - Linear or another frag */
|
||||
if (nr_frags == 1) {
|
||||
fcs_p1 = skb_tail_pointer(skb);
|
||||
} else {
|
||||
skb_frag_t *prev_frag = &skb_shinfo(skb)->frags[nr_frags - 2];
|
||||
|
||||
fcs_p1 = skb_frag_address(prev_frag) +
|
||||
skb_frag_size(prev_frag);
|
||||
}
|
||||
fcs_p1 -= bytes_in_prev;
|
||||
|
||||
memcpy(&fcs_bytes, fcs_p1, bytes_in_prev);
|
||||
memcpy(((u8 *)&fcs_bytes) + bytes_in_prev, fcs_p2, last_frag_sz);
|
||||
|
||||
return fcs_bytes;
|
||||
}
|
||||
|
||||
static inline void mlx5e_handle_csum(struct net_device *netdev,
|
||||
struct mlx5_cqe64 *cqe,
|
||||
struct mlx5e_rq *rq,
|
||||
@ -643,6 +682,9 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
|
||||
skb->csum = csum_partial(skb->data + ETH_HLEN,
|
||||
network_depth - ETH_HLEN,
|
||||
skb->csum);
|
||||
if (unlikely(netdev->features & NETIF_F_RXFCS))
|
||||
skb->csum = csum_add(skb->csum,
|
||||
(__force __wsum)mlx5e_get_fcs(skb));
|
||||
rq->stats.csum_complete++;
|
||||
return;
|
||||
}
|
||||
|
@ -237,19 +237,17 @@ static void *mlx5_fpga_ipsec_cmd_exec(struct mlx5_core_dev *mdev,
|
||||
context->buf.sg[0].data = &context->command;
|
||||
|
||||
spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags);
|
||||
list_add_tail(&context->list, &fdev->ipsec->pending_cmds);
|
||||
res = mlx5_fpga_sbu_conn_sendmsg(fdev->ipsec->conn, &context->buf);
|
||||
if (!res)
|
||||
list_add_tail(&context->list, &fdev->ipsec->pending_cmds);
|
||||
spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags);
|
||||
|
||||
res = mlx5_fpga_sbu_conn_sendmsg(fdev->ipsec->conn, &context->buf);
|
||||
if (res) {
|
||||
mlx5_fpga_warn(fdev, "Failure sending IPSec command: %d\n",
|
||||
res);
|
||||
spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags);
|
||||
list_del(&context->list);
|
||||
spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags);
|
||||
mlx5_fpga_warn(fdev, "Failed to send IPSec command: %d\n", res);
|
||||
kfree(context);
|
||||
return ERR_PTR(res);
|
||||
}
|
||||
|
||||
/* Context will be freed by wait func after completion */
|
||||
return context;
|
||||
}
|
||||
|
@ -77,7 +77,7 @@
|
||||
#define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET
|
||||
|
||||
/* ILT entry structure */
|
||||
#define ILT_ENTRY_PHY_ADDR_MASK 0x000FFFFFFFFFFFULL
|
||||
#define ILT_ENTRY_PHY_ADDR_MASK (~0ULL >> 12)
|
||||
#define ILT_ENTRY_PHY_ADDR_SHIFT 0
|
||||
#define ILT_ENTRY_VALID_MASK 0x1ULL
|
||||
#define ILT_ENTRY_VALID_SHIFT 52
|
||||
|
@ -61,17 +61,17 @@ static int bcm_cygnus_afe_config(struct phy_device *phydev)
|
||||
return rc;
|
||||
|
||||
/* make rcal=100, since rdb default is 000 */
|
||||
rc = bcm_phy_write_exp(phydev, MII_BRCM_CORE_EXPB1, 0x10);
|
||||
rc = bcm_phy_write_exp_sel(phydev, MII_BRCM_CORE_EXPB1, 0x10);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
|
||||
/* CORE_EXPB0, Reset R_CAL/RC_CAL Engine */
|
||||
rc = bcm_phy_write_exp(phydev, MII_BRCM_CORE_EXPB0, 0x10);
|
||||
rc = bcm_phy_write_exp_sel(phydev, MII_BRCM_CORE_EXPB0, 0x10);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
|
||||
/* CORE_EXPB0, Disable Reset R_CAL/RC_CAL Engine */
|
||||
rc = bcm_phy_write_exp(phydev, MII_BRCM_CORE_EXPB0, 0x00);
|
||||
rc = bcm_phy_write_exp_sel(phydev, MII_BRCM_CORE_EXPB0, 0x00);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -56,7 +56,7 @@ int bcm54xx_auxctl_read(struct phy_device *phydev, u16 regnum)
|
||||
/* The register must be written to both the Shadow Register Select and
|
||||
* the Shadow Read Register Selector
|
||||
*/
|
||||
phy_write(phydev, MII_BCM54XX_AUX_CTL, regnum |
|
||||
phy_write(phydev, MII_BCM54XX_AUX_CTL, MII_BCM54XX_AUXCTL_SHDWSEL_MASK |
|
||||
regnum << MII_BCM54XX_AUXCTL_SHDWSEL_READ_SHIFT);
|
||||
return phy_read(phydev, MII_BCM54XX_AUX_CTL);
|
||||
}
|
||||
|
@ -14,11 +14,18 @@
|
||||
#ifndef _LINUX_BCM_PHY_LIB_H
|
||||
#define _LINUX_BCM_PHY_LIB_H
|
||||
|
||||
#include <linux/brcmphy.h>
|
||||
#include <linux/phy.h>
|
||||
|
||||
int bcm_phy_write_exp(struct phy_device *phydev, u16 reg, u16 val);
|
||||
int bcm_phy_read_exp(struct phy_device *phydev, u16 reg);
|
||||
|
||||
static inline int bcm_phy_write_exp_sel(struct phy_device *phydev,
|
||||
u16 reg, u16 val)
|
||||
{
|
||||
return bcm_phy_write_exp(phydev, reg | MII_BCM54XX_EXP_SEL_ER, val);
|
||||
}
|
||||
|
||||
int bcm54xx_auxctl_write(struct phy_device *phydev, u16 regnum, u16 val);
|
||||
int bcm54xx_auxctl_read(struct phy_device *phydev, u16 regnum);
|
||||
|
||||
|
@ -65,10 +65,10 @@ struct bcm7xxx_phy_priv {
|
||||
static void r_rc_cal_reset(struct phy_device *phydev)
|
||||
{
|
||||
/* Reset R_CAL/RC_CAL Engine */
|
||||
bcm_phy_write_exp(phydev, 0x00b0, 0x0010);
|
||||
bcm_phy_write_exp_sel(phydev, 0x00b0, 0x0010);
|
||||
|
||||
/* Disable Reset R_AL/RC_CAL Engine */
|
||||
bcm_phy_write_exp(phydev, 0x00b0, 0x0000);
|
||||
bcm_phy_write_exp_sel(phydev, 0x00b0, 0x0000);
|
||||
}
|
||||
|
||||
static int bcm7xxx_28nm_b0_afe_config_init(struct phy_device *phydev)
|
||||
|
@ -605,30 +605,13 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||
|
||||
if (cmd == PPPIOCDETACH) {
|
||||
/*
|
||||
* We have to be careful here... if the file descriptor
|
||||
* has been dup'd, we could have another process in the
|
||||
* middle of a poll using the same file *, so we had
|
||||
* better not free the interface data structures -
|
||||
* instead we fail the ioctl. Even in this case, we
|
||||
* shut down the interface if we are the owner of it.
|
||||
* Actually, we should get rid of PPPIOCDETACH, userland
|
||||
* (i.e. pppd) could achieve the same effect by closing
|
||||
* this fd and reopening /dev/ppp.
|
||||
* PPPIOCDETACH is no longer supported as it was heavily broken,
|
||||
* and is only known to have been used by pppd older than
|
||||
* ppp-2.4.2 (released November 2003).
|
||||
*/
|
||||
pr_warn_once("%s (%d) used obsolete PPPIOCDETACH ioctl\n",
|
||||
current->comm, current->pid);
|
||||
err = -EINVAL;
|
||||
if (pf->kind == INTERFACE) {
|
||||
ppp = PF_TO_PPP(pf);
|
||||
rtnl_lock();
|
||||
if (file == ppp->owner)
|
||||
unregister_netdevice(ppp->dev);
|
||||
rtnl_unlock();
|
||||
}
|
||||
if (atomic_long_read(&file->f_count) < 2) {
|
||||
ppp_release(NULL, file);
|
||||
err = 0;
|
||||
} else
|
||||
pr_warn("PPPIOCDETACH file->f_count=%ld\n",
|
||||
atomic_long_read(&file->f_count));
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -1423,6 +1423,13 @@ static void tun_net_init(struct net_device *dev)
|
||||
dev->max_mtu = MAX_MTU - dev->hard_header_len;
|
||||
}
|
||||
|
||||
static bool tun_sock_writeable(struct tun_struct *tun, struct tun_file *tfile)
|
||||
{
|
||||
struct sock *sk = tfile->socket.sk;
|
||||
|
||||
return (tun->dev->flags & IFF_UP) && sock_writeable(sk);
|
||||
}
|
||||
|
||||
/* Character device part */
|
||||
|
||||
/* Poll */
|
||||
@ -1445,10 +1452,14 @@ static __poll_t tun_chr_poll(struct file *file, poll_table *wait)
|
||||
if (!ptr_ring_empty(&tfile->tx_ring))
|
||||
mask |= EPOLLIN | EPOLLRDNORM;
|
||||
|
||||
if (tun->dev->flags & IFF_UP &&
|
||||
(sock_writeable(sk) ||
|
||||
(!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
|
||||
sock_writeable(sk))))
|
||||
/* Make sure SOCKWQ_ASYNC_NOSPACE is set if not writable to
|
||||
* guarantee EPOLLOUT to be raised by either here or
|
||||
* tun_sock_write_space(). Then process could get notification
|
||||
* after it writes to a down device and meets -EIO.
|
||||
*/
|
||||
if (tun_sock_writeable(tun, tfile) ||
|
||||
(!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
|
||||
tun_sock_writeable(tun, tfile)))
|
||||
mask |= EPOLLOUT | EPOLLWRNORM;
|
||||
|
||||
if (tun->dev->reg_state != NETREG_REGISTERED)
|
||||
|
@ -707,6 +707,13 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
|
||||
void *data;
|
||||
u32 act;
|
||||
|
||||
/* Transient failure which in theory could occur if
|
||||
* in-flight packets from before XDP was enabled reach
|
||||
* the receive path after XDP is loaded.
|
||||
*/
|
||||
if (unlikely(hdr->hdr.gso_type))
|
||||
goto err_xdp;
|
||||
|
||||
/* This happens when rx buffer size is underestimated
|
||||
* or headroom is not enough because of the buffer
|
||||
* was refilled before XDP is set. This should only
|
||||
@ -727,14 +734,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
|
||||
xdp_page = page;
|
||||
}
|
||||
|
||||
/* Transient failure which in theory could occur if
|
||||
* in-flight packets from before XDP was enabled reach
|
||||
* the receive path after XDP is loaded. In practice I
|
||||
* was not able to create this condition.
|
||||
*/
|
||||
if (unlikely(hdr->hdr.gso_type))
|
||||
goto err_xdp;
|
||||
|
||||
/* Allow consuming headroom but reserve enough space to push
|
||||
* the descriptor on if we get an XDP_TX return code.
|
||||
*/
|
||||
@ -775,7 +774,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
|
||||
}
|
||||
*xdp_xmit = true;
|
||||
if (unlikely(xdp_page != page))
|
||||
goto err_xdp;
|
||||
put_page(page);
|
||||
rcu_read_unlock();
|
||||
goto xdp_xmit;
|
||||
case XDP_REDIRECT:
|
||||
@ -787,7 +786,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
|
||||
}
|
||||
*xdp_xmit = true;
|
||||
if (unlikely(xdp_page != page))
|
||||
goto err_xdp;
|
||||
put_page(page);
|
||||
rcu_read_unlock();
|
||||
goto xdp_xmit;
|
||||
default:
|
||||
@ -875,7 +874,7 @@ err_xdp:
|
||||
rcu_read_unlock();
|
||||
err_skb:
|
||||
put_page(page);
|
||||
while (--num_buf) {
|
||||
while (num_buf-- > 1) {
|
||||
buf = virtqueue_get_buf(rq->vq, &len);
|
||||
if (unlikely(!buf)) {
|
||||
pr_debug("%s: rx error: %d buffers missing\n",
|
||||
|
@ -3340,7 +3340,7 @@ out_err:
|
||||
static int hwsim_dump_radio_nl(struct sk_buff *skb,
|
||||
struct netlink_callback *cb)
|
||||
{
|
||||
int last_idx = cb->args[0];
|
||||
int last_idx = cb->args[0] - 1;
|
||||
struct mac80211_hwsim_data *data = NULL;
|
||||
int res = 0;
|
||||
void *hdr;
|
||||
@ -3368,7 +3368,7 @@ static int hwsim_dump_radio_nl(struct sk_buff *skb,
|
||||
last_idx = data->idx;
|
||||
}
|
||||
|
||||
cb->args[0] = last_idx;
|
||||
cb->args[0] = last_idx + 1;
|
||||
|
||||
/* list changed, but no new element sent, set interrupted flag */
|
||||
if (skb->len == 0 && cb->prev_seq && cb->seq != cb->prev_seq) {
|
||||
|
@ -117,7 +117,7 @@ config SSB_SERIAL
|
||||
|
||||
config SSB_DRIVER_PCICORE_POSSIBLE
|
||||
bool
|
||||
depends on SSB_PCIHOST && SSB = y
|
||||
depends on SSB_PCIHOST
|
||||
default y
|
||||
|
||||
config SSB_DRIVER_PCICORE
|
||||
@ -131,7 +131,7 @@ config SSB_DRIVER_PCICORE
|
||||
|
||||
config SSB_PCICORE_HOSTMODE
|
||||
bool "Hostmode support for SSB PCI core"
|
||||
depends on SSB_DRIVER_PCICORE && SSB_DRIVER_MIPS
|
||||
depends on SSB_DRIVER_PCICORE && SSB_DRIVER_MIPS && SSB = y
|
||||
help
|
||||
PCIcore hostmode operation (external PCI bus).
|
||||
|
||||
|
@ -981,6 +981,7 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev,
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&dev->mutex);
|
||||
vhost_dev_lock_vqs(dev);
|
||||
switch (msg->type) {
|
||||
case VHOST_IOTLB_UPDATE:
|
||||
@ -1016,6 +1017,8 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev,
|
||||
}
|
||||
|
||||
vhost_dev_unlock_vqs(dev);
|
||||
mutex_unlock(&dev->mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
|
||||
|
@ -570,16 +570,7 @@ static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg,
|
||||
current_page, vec_len, vec_start);
|
||||
|
||||
len = bio_add_page(bio, page, vec_len, vec_start);
|
||||
if (len != vec_len) {
|
||||
mlog(ML_ERROR, "Adding page[%d] to bio failed, "
|
||||
"page %p, len %d, vec_len %u, vec_start %u, "
|
||||
"bi_sector %llu\n", current_page, page, len,
|
||||
vec_len, vec_start,
|
||||
(unsigned long long)bio->bi_iter.bi_sector);
|
||||
bio_put(bio);
|
||||
bio = ERR_PTR(-EIO);
|
||||
return bio;
|
||||
}
|
||||
if (len != vec_len) break;
|
||||
|
||||
cs += vec_len / (PAGE_SIZE/spp);
|
||||
vec_start = 0;
|
||||
|
@ -709,11 +709,6 @@ void seq_put_decimal_ull_width(struct seq_file *m, const char *delimiter,
|
||||
if (m->count + width >= m->size)
|
||||
goto overflow;
|
||||
|
||||
if (num < 10) {
|
||||
m->buf[m->count++] = num + '0';
|
||||
return;
|
||||
}
|
||||
|
||||
len = num_to_str(m->buf + m->count, m->size - m->count, num, width);
|
||||
if (!len)
|
||||
goto overflow;
|
||||
|
@ -142,7 +142,7 @@ struct bpf_verifier_state_list {
|
||||
struct bpf_insn_aux_data {
|
||||
union {
|
||||
enum bpf_reg_type ptr_type; /* pointer type for load/store insns */
|
||||
struct bpf_map *map_ptr; /* pointer for call insn into lookup_elem */
|
||||
unsigned long map_state; /* pointer/poison value for maps */
|
||||
s32 call_imm; /* saved imm field of call insn */
|
||||
};
|
||||
int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
|
||||
|
@ -464,7 +464,7 @@ static inline struct page *
|
||||
__alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
|
||||
{
|
||||
VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
|
||||
VM_WARN_ON(!node_online(nid));
|
||||
VM_WARN_ON((gfp_mask & __GFP_THISNODE) && !node_online(nid));
|
||||
|
||||
return __alloc_pages(gfp_mask, order, nid);
|
||||
}
|
||||
|
@ -32,9 +32,11 @@ extern struct node *node_devices[];
|
||||
typedef void (*node_registration_func_t)(struct node *);
|
||||
|
||||
#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_NUMA)
|
||||
extern int link_mem_sections(int nid, unsigned long start_pfn, unsigned long nr_pages);
|
||||
extern int link_mem_sections(int nid, unsigned long start_pfn,
|
||||
unsigned long nr_pages, bool check_nid);
|
||||
#else
|
||||
static inline int link_mem_sections(int nid, unsigned long start_pfn, unsigned long nr_pages)
|
||||
static inline int link_mem_sections(int nid, unsigned long start_pfn,
|
||||
unsigned long nr_pages, bool check_nid)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
@ -57,7 +59,7 @@ static inline int register_one_node(int nid)
|
||||
if (error)
|
||||
return error;
|
||||
/* link memory sections under this node */
|
||||
error = link_mem_sections(nid, pgdat->node_start_pfn, pgdat->node_spanned_pages);
|
||||
error = link_mem_sections(nid, pgdat->node_start_pfn, pgdat->node_spanned_pages, true);
|
||||
}
|
||||
|
||||
return error;
|
||||
|
@ -103,6 +103,8 @@ void sctp_addr_wq_mgmt(struct net *, struct sctp_sockaddr_entry *, int);
|
||||
/*
|
||||
* sctp/socket.c
|
||||
*/
|
||||
int sctp_inet_connect(struct socket *sock, struct sockaddr *uaddr,
|
||||
int addr_len, int flags);
|
||||
int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb);
|
||||
int sctp_inet_listen(struct socket *sock, int backlog);
|
||||
void sctp_write_space(struct sock *sk);
|
||||
|
@ -435,7 +435,9 @@ TRACE_EVENT(sched_pi_setprio,
|
||||
memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
|
||||
__entry->pid = tsk->pid;
|
||||
__entry->oldprio = tsk->prio;
|
||||
__entry->newprio = pi_task ? pi_task->prio : tsk->prio;
|
||||
__entry->newprio = pi_task ?
|
||||
min(tsk->normal_prio, pi_task->prio) :
|
||||
tsk->normal_prio;
|
||||
/* XXX SCHED_DEADLINE bits missing */
|
||||
),
|
||||
|
||||
|
@ -2698,7 +2698,7 @@ enum nl80211_attrs {
|
||||
#define NL80211_ATTR_KEYS NL80211_ATTR_KEYS
|
||||
#define NL80211_ATTR_FEATURE_FLAGS NL80211_ATTR_FEATURE_FLAGS
|
||||
|
||||
#define NL80211_WIPHY_NAME_MAXLEN 128
|
||||
#define NL80211_WIPHY_NAME_MAXLEN 64
|
||||
|
||||
#define NL80211_MAX_SUPP_RATES 32
|
||||
#define NL80211_MAX_SUPP_HT_RATES 77
|
||||
|
@ -106,7 +106,7 @@ struct pppol2tp_ioc_stats {
|
||||
#define PPPIOCGIDLE _IOR('t', 63, struct ppp_idle) /* get idle time */
|
||||
#define PPPIOCNEWUNIT _IOWR('t', 62, int) /* create new ppp unit */
|
||||
#define PPPIOCATTACH _IOW('t', 61, int) /* attach to ppp unit */
|
||||
#define PPPIOCDETACH _IOW('t', 60, int) /* detach from ppp unit/chan */
|
||||
#define PPPIOCDETACH _IOW('t', 60, int) /* obsolete, do not use */
|
||||
#define PPPIOCSMRRU _IOW('t', 59, int) /* set multilink MRU */
|
||||
#define PPPIOCCONNECT _IOW('t', 58, int) /* connect channel to unit */
|
||||
#define PPPIOCDISCONN _IO('t', 57) /* disconnect channel */
|
||||
|
@ -91,6 +91,7 @@
|
||||
#include <linux/cache.h>
|
||||
#include <linux/rodata_test.h>
|
||||
#include <linux/jump_label.h>
|
||||
#include <linux/mem_encrypt.h>
|
||||
|
||||
#include <asm/io.h>
|
||||
#include <asm/bugs.h>
|
||||
|
19
ipc/shm.c
19
ipc/shm.c
@ -1363,14 +1363,17 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg,
|
||||
|
||||
if (addr) {
|
||||
if (addr & (shmlba - 1)) {
|
||||
/*
|
||||
* Round down to the nearest multiple of shmlba.
|
||||
* For sane do_mmap_pgoff() parameters, avoid
|
||||
* round downs that trigger nil-page and MAP_FIXED.
|
||||
*/
|
||||
if ((shmflg & SHM_RND) && addr >= shmlba)
|
||||
addr &= ~(shmlba - 1);
|
||||
else
|
||||
if (shmflg & SHM_RND) {
|
||||
addr &= ~(shmlba - 1); /* round down */
|
||||
|
||||
/*
|
||||
* Ensure that the round-down is non-nil
|
||||
* when remapping. This can happen for
|
||||
* cases when addr < shmlba.
|
||||
*/
|
||||
if (!addr && (shmflg & SHM_REMAP))
|
||||
goto out;
|
||||
} else
|
||||
#ifndef __ARCH_FORCE_SHMLBA
|
||||
if (addr & ~PAGE_MASK)
|
||||
#endif
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user