Merge 4.2-rc6 into staging-next

We want the IIO and staging fixes in here as well.

Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Greg Kroah-Hartman 2015-08-10 09:07:25 -07:00
commit f70d631832
459 changed files with 6101 additions and 2708 deletions

View File

@ -35,7 +35,7 @@ Example:
device_type = "dma"; device_type = "dma";
reg = <0x0 0x1f270000 0x0 0x10000>, reg = <0x0 0x1f270000 0x0 0x10000>,
<0x0 0x1f200000 0x0 0x10000>, <0x0 0x1f200000 0x0 0x10000>,
<0x0 0x1b008000 0x0 0x2000>, <0x0 0x1b000000 0x0 0x400000>,
<0x0 0x1054a000 0x0 0x100>; <0x0 0x1054a000 0x0 0x100>;
interrupts = <0x0 0x82 0x4>, interrupts = <0x0 0x82 0x4>,
<0x0 0xb8 0x4>, <0x0 0xb8 0x4>,

View File

@ -82,6 +82,9 @@ Optional properties:
- id: If there are multiple instance of the same type, in order to - id: If there are multiple instance of the same type, in order to
differentiate between each instance "id" can be used (e.g., multi-lane PCIe differentiate between each instance "id" can be used (e.g., multi-lane PCIe
PHY). If "id" is not provided, it is set to default value of '1'. PHY). If "id" is not provided, it is set to default value of '1'.
- syscon-pllreset: Handle to system control region that contains the
CTRL_CORE_SMA_SW_0 register and register offset to the CTRL_CORE_SMA_SW_0
register that contains the SATA_PLL_SOFT_RESET bit. Only valid for sata_phy.
This is usually a subnode of ocp2scp to which it is connected. This is usually a subnode of ocp2scp to which it is connected.
@ -100,3 +103,16 @@ usb3phy@4a084400 {
"sysclk", "sysclk",
"refclk"; "refclk";
}; };
sata_phy: phy@4A096000 {
compatible = "ti,phy-pipe3-sata";
reg = <0x4A096000 0x80>, /* phy_rx */
<0x4A096400 0x64>, /* phy_tx */
<0x4A096800 0x40>; /* pll_ctrl */
reg-names = "phy_rx", "phy_tx", "pll_ctrl";
ctrl-module = <&omap_control_sata>;
clocks = <&sys_clkin1>, <&sata_ref_clk>;
clock-names = "sysclk", "refclk";
syscon-pllreset = <&scm_conf 0x3fc>;
#phy-cells = <0>;
};

View File

@ -3,11 +3,13 @@ MT8173 with MAX98090 CODEC
Required properties: Required properties:
- compatible : "mediatek,mt8173-max98090" - compatible : "mediatek,mt8173-max98090"
- mediatek,audio-codec: the phandle of the MAX98090 audio codec - mediatek,audio-codec: the phandle of the MAX98090 audio codec
- mediatek,platform: the phandle of MT8173 ASoC platform
Example: Example:
sound { sound {
compatible = "mediatek,mt8173-max98090"; compatible = "mediatek,mt8173-max98090";
mediatek,audio-codec = <&max98090>; mediatek,audio-codec = <&max98090>;
mediatek,platform = <&afe>;
}; };

View File

@ -3,11 +3,13 @@ MT8173 with RT5650 RT5676 CODECS
Required properties: Required properties:
- compatible : "mediatek,mt8173-rt5650-rt5676" - compatible : "mediatek,mt8173-rt5650-rt5676"
- mediatek,audio-codec: the phandles of rt5650 and rt5676 codecs - mediatek,audio-codec: the phandles of rt5650 and rt5676 codecs
- mediatek,platform: the phandle of MT8173 ASoC platform
Example: Example:
sound { sound {
compatible = "mediatek,mt8173-rt5650-rt5676"; compatible = "mediatek,mt8173-rt5650-rt5676";
mediatek,audio-codec = <&rt5650 &rt5676>; mediatek,audio-codec = <&rt5650 &rt5676>;
mediatek,platform = <&afe>;
}; };

View File

@ -3,7 +3,7 @@ Binding for Qualcomm Atheros AR7xxx/AR9xxx SPI controller
Required properties: Required properties:
- compatible: has to be "qca,<soc-type>-spi", "qca,ar7100-spi" as fallback. - compatible: has to be "qca,<soc-type>-spi", "qca,ar7100-spi" as fallback.
- reg: Base address and size of the controllers memory area - reg: Base address and size of the controllers memory area
- clocks: phandle to the AHB clock. - clocks: phandle of the AHB clock.
- clock-names: has to be "ahb". - clock-names: has to be "ahb".
- #address-cells: <1>, as required by generic SPI binding. - #address-cells: <1>, as required by generic SPI binding.
- #size-cells: <0>, also as required by generic SPI binding. - #size-cells: <0>, also as required by generic SPI binding.
@ -12,9 +12,9 @@ Child nodes as per the generic SPI binding.
Example: Example:
spi@1F000000 { spi@1f000000 {
compatible = "qca,ar9132-spi", "qca,ar7100-spi"; compatible = "qca,ar9132-spi", "qca,ar7100-spi";
reg = <0x1F000000 0x10>; reg = <0x1f000000 0x10>;
clocks = <&pll 2>; clocks = <&pll 2>;
clock-names = "ahb"; clock-names = "ahb";

View File

@ -35,11 +35,11 @@ temp1_input Local temperature (1/1000 degree,
temp[2-9]_input CPU temperatures (1/1000 degree, temp[2-9]_input CPU temperatures (1/1000 degree,
0.125 degree resolution) 0.125 degree resolution)
fan[1-4]_mode R/W, 0/1 for manual or SmartFan mode pwm[1-4]_enable R/W, 1/2 for manual or SmartFan mode
Setting SmartFan mode is supported only if it has been Setting SmartFan mode is supported only if it has been
previously configured by BIOS (or configuration EEPROM) previously configured by BIOS (or configuration EEPROM)
fan[1-4]_pwm R/O in SmartFan mode, R/W in manual control mode pwm[1-4] R/O in SmartFan mode, R/W in manual control mode
The driver checks sensor control registers and does not export the sensors The driver checks sensor control registers and does not export the sensors
that are not enabled. Anyway, a sensor that is enabled may actually be not that are not enabled. Anyway, a sensor that is enabled may actually be not

View File

@ -119,8 +119,10 @@ ALPS Absolute Mode - Protocol Version 2
byte 5: 0 z6 z5 z4 z3 z2 z1 z0 byte 5: 0 z6 z5 z4 z3 z2 z1 z0
Protocol Version 2 DualPoint devices send standard PS/2 mouse packets for Protocol Version 2 DualPoint devices send standard PS/2 mouse packets for
the DualPoint Stick. For non interleaved dualpoint devices the pointingstick the DualPoint Stick. The M, R and L bits signal the combined status of both
buttons get reported separately in the PSM, PSR and PSL bits. the pointingstick and touchpad buttons, except for Dell dualpoint devices
where the pointingstick buttons get reported separately in the PSM, PSR
and PSL bits.
Dualpoint device -- interleaved packet format Dualpoint device -- interleaved packet format
--------------------------------------------- ---------------------------------------------

View File

@ -199,7 +199,8 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += "#include <linux/string.h>\n" buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n" buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n" buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n\n" buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi_proto.h>\n\n"
buf += "#include <target/target_core_base.h>\n" buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n" buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_fabric_configfs.h>\n" buf += "#include <target/target_core_fabric_configfs.h>\n"
@ -230,8 +231,14 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += " }\n" buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n" buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n" buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
buf += " ret = core_tpg_register(&" + fabric_mod_name + "_ops, wwn,\n"
buf += " &tpg->se_tpg, SCSI_PROTOCOL_SAS);\n" if proto_ident == "FC":
buf += " ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_FCP);\n"
elif proto_ident == "SAS":
buf += " ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_SAS);\n"
elif proto_ident == "iSCSI":
buf += " ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_ISCSI);\n"
buf += " if (ret < 0) {\n" buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n" buf += " kfree(tpg);\n"
buf += " return NULL;\n" buf += " return NULL;\n"
@ -292,7 +299,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += "static const struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n" buf += "static const struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .module = THIS_MODULE,\n" buf += " .module = THIS_MODULE,\n"
buf += " .name = " + fabric_mod_name + ",\n" buf += " .name = \"" + fabric_mod_name + "\",\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n" buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n" buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n" buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
@ -322,17 +329,17 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n" buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n" buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
buf += "\n" buf += "\n"
buf += " .tfc_wwn_attrs = " + fabric_mod_name + "_wwn_attrs;\n" buf += " .tfc_wwn_attrs = " + fabric_mod_name + "_wwn_attrs,\n"
buf += "};\n\n" buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n" buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n" buf += "{\n"
buf += " return target_register_template(" + fabric_mod_name + "_ops);\n" buf += " return target_register_template(&" + fabric_mod_name + "_ops);\n"
buf += "};\n\n" buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_exit(void)\n" buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
buf += "{\n" buf += "{\n"
buf += " target_unregister_template(" + fabric_mod_name + "_ops);\n" buf += " target_unregister_template(&" + fabric_mod_name + "_ops);\n"
buf += "};\n\n" buf += "};\n\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n" buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"

View File

@ -5600,6 +5600,7 @@ F: kernel/irq/
IRQCHIP DRIVERS IRQCHIP DRIVERS
M: Thomas Gleixner <tglx@linutronix.de> M: Thomas Gleixner <tglx@linutronix.de>
M: Jason Cooper <jason@lakedaemon.net> M: Jason Cooper <jason@lakedaemon.net>
M: Marc Zyngier <marc.zyngier@arm.com>
L: linux-kernel@vger.kernel.org L: linux-kernel@vger.kernel.org
S: Maintained S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
@ -5608,11 +5609,14 @@ F: Documentation/devicetree/bindings/interrupt-controller/
F: drivers/irqchip/ F: drivers/irqchip/
IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY) IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY)
M: Benjamin Herrenschmidt <benh@kernel.crashing.org> M: Jiang Liu <jiang.liu@linux.intel.com>
M: Marc Zyngier <marc.zyngier@arm.com>
S: Maintained S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
F: Documentation/IRQ-domain.txt F: Documentation/IRQ-domain.txt
F: include/linux/irqdomain.h F: include/linux/irqdomain.h
F: kernel/irq/irqdomain.c F: kernel/irq/irqdomain.c
F: kernel/irq/msi.c
ISAPNP ISAPNP
M: Jaroslav Kysela <perex@perex.cz> M: Jaroslav Kysela <perex@perex.cz>

View File

@ -1,7 +1,7 @@
VERSION = 4 VERSION = 4
PATCHLEVEL = 2 PATCHLEVEL = 2
SUBLEVEL = 0 SUBLEVEL = 0
EXTRAVERSION = -rc4 EXTRAVERSION = -rc6
NAME = Hurr durr I'ma sheep NAME = Hurr durr I'ma sheep
# *DOCUMENTATION* # *DOCUMENTATION*
@ -597,6 +597,11 @@ endif # $(dot-config)
# Defaults to vmlinux, but the arch makefile usually adds further targets # Defaults to vmlinux, but the arch makefile usually adds further targets
all: vmlinux all: vmlinux
# The arch Makefile can set ARCH_{CPP,A,C}FLAGS to override the default
# values of the respective KBUILD_* variables
ARCH_CPPFLAGS :=
ARCH_AFLAGS :=
ARCH_CFLAGS :=
include arch/$(SRCARCH)/Makefile include arch/$(SRCARCH)/Makefile
KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,) KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,)
@ -848,10 +853,10 @@ export mod_strip_cmd
mod_compress_cmd = true mod_compress_cmd = true
ifdef CONFIG_MODULE_COMPRESS ifdef CONFIG_MODULE_COMPRESS
ifdef CONFIG_MODULE_COMPRESS_GZIP ifdef CONFIG_MODULE_COMPRESS_GZIP
mod_compress_cmd = gzip -n mod_compress_cmd = gzip -n -f
endif # CONFIG_MODULE_COMPRESS_GZIP endif # CONFIG_MODULE_COMPRESS_GZIP
ifdef CONFIG_MODULE_COMPRESS_XZ ifdef CONFIG_MODULE_COMPRESS_XZ
mod_compress_cmd = xz mod_compress_cmd = xz -f
endif # CONFIG_MODULE_COMPRESS_XZ endif # CONFIG_MODULE_COMPRESS_XZ
endif # CONFIG_MODULE_COMPRESS endif # CONFIG_MODULE_COMPRESS
export mod_compress_cmd export mod_compress_cmd

View File

@ -313,11 +313,11 @@ config ARC_PAGE_SIZE_8K
config ARC_PAGE_SIZE_16K config ARC_PAGE_SIZE_16K
bool "16KB" bool "16KB"
depends on ARC_MMU_V3 depends on ARC_MMU_V3 || ARC_MMU_V4
config ARC_PAGE_SIZE_4K config ARC_PAGE_SIZE_4K
bool "4KB" bool "4KB"
depends on ARC_MMU_V3 depends on ARC_MMU_V3 || ARC_MMU_V4
endchoice endchoice
@ -365,6 +365,11 @@ config ARC_HAS_LLSC
default y default y
depends on !ARC_CANT_LLSC depends on !ARC_CANT_LLSC
config ARC_STAR_9000923308
bool "Workaround for llock/scond livelock"
default y
depends on ISA_ARCV2 && SMP && ARC_HAS_LLSC
config ARC_HAS_SWAPE config ARC_HAS_SWAPE
bool "Insn: SWAPE (endian-swap)" bool "Insn: SWAPE (endian-swap)"
default y default y
@ -379,6 +384,10 @@ config ARC_HAS_LL64
dest operands with 2 possible source operands. dest operands with 2 possible source operands.
default y default y
config ARC_HAS_DIV_REM
bool "Insn: div, divu, rem, remu"
default y
config ARC_HAS_RTC config ARC_HAS_RTC
bool "Local 64-bit r/o cycle counter" bool "Local 64-bit r/o cycle counter"
default n default n

View File

@ -36,8 +36,16 @@ cflags-$(atleast_gcc44) += -fsection-anchors
cflags-$(CONFIG_ARC_HAS_LLSC) += -mlock cflags-$(CONFIG_ARC_HAS_LLSC) += -mlock
cflags-$(CONFIG_ARC_HAS_SWAPE) += -mswape cflags-$(CONFIG_ARC_HAS_SWAPE) += -mswape
ifdef CONFIG_ISA_ARCV2
ifndef CONFIG_ARC_HAS_LL64 ifndef CONFIG_ARC_HAS_LL64
cflags-$(CONFIG_ISA_ARCV2) += -mno-ll64 cflags-y += -mno-ll64
endif
ifndef CONFIG_ARC_HAS_DIV_REM
cflags-y += -mno-div-rem
endif
endif endif
cflags-$(CONFIG_ARC_DW2_UNWIND) += -fasynchronous-unwind-tables cflags-$(CONFIG_ARC_DW2_UNWIND) += -fasynchronous-unwind-tables

View File

@ -89,11 +89,10 @@
#define ECR_C_BIT_DTLB_LD_MISS 8 #define ECR_C_BIT_DTLB_LD_MISS 8
#define ECR_C_BIT_DTLB_ST_MISS 9 #define ECR_C_BIT_DTLB_ST_MISS 9
/* Auxiliary registers */ /* Auxiliary registers */
#define AUX_IDENTITY 4 #define AUX_IDENTITY 4
#define AUX_INTR_VEC_BASE 0x25 #define AUX_INTR_VEC_BASE 0x25
#define AUX_NON_VOL 0x5e
/* /*
* Floating Pt Registers * Floating Pt Registers
@ -240,9 +239,9 @@ struct bcr_extn_xymem {
struct bcr_perip { struct bcr_perip {
#ifdef CONFIG_CPU_BIG_ENDIAN #ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int start:8, pad2:8, sz:8, pad:8; unsigned int start:8, pad2:8, sz:8, ver:8;
#else #else
unsigned int pad:8, sz:8, pad2:8, start:8; unsigned int ver:8, sz:8, pad2:8, start:8;
#endif #endif
}; };

View File

@ -23,33 +23,60 @@
#define atomic_set(v, i) (((v)->counter) = (i)) #define atomic_set(v, i) (((v)->counter) = (i))
#ifdef CONFIG_ISA_ARCV2 #ifdef CONFIG_ARC_STAR_9000923308
#define PREFETCHW " prefetchw [%1] \n"
#else #define SCOND_FAIL_RETRY_VAR_DEF \
#define PREFETCHW unsigned int delay = 1, tmp; \
#define SCOND_FAIL_RETRY_ASM \
" bz 4f \n" \
" ; --- scond fail delay --- \n" \
" mov %[tmp], %[delay] \n" /* tmp = delay */ \
"2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \
" sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \
" rol %[delay], %[delay] \n" /* delay *= 2 */ \
" b 1b \n" /* start over */ \
"4: ; --- success --- \n" \
#define SCOND_FAIL_RETRY_VARS \
,[delay] "+&r" (delay),[tmp] "=&r" (tmp) \
#else /* !CONFIG_ARC_STAR_9000923308 */
#define SCOND_FAIL_RETRY_VAR_DEF
#define SCOND_FAIL_RETRY_ASM \
" bnz 1b \n" \
#define SCOND_FAIL_RETRY_VARS
#endif #endif
#define ATOMIC_OP(op, c_op, asm_op) \ #define ATOMIC_OP(op, c_op, asm_op) \
static inline void atomic_##op(int i, atomic_t *v) \ static inline void atomic_##op(int i, atomic_t *v) \
{ \ { \
unsigned int temp; \ unsigned int val; \
SCOND_FAIL_RETRY_VAR_DEF \
\ \
__asm__ __volatile__( \ __asm__ __volatile__( \
"1: \n" \ "1: llock %[val], [%[ctr]] \n" \
PREFETCHW \ " " #asm_op " %[val], %[val], %[i] \n" \
" llock %0, [%1] \n" \ " scond %[val], [%[ctr]] \n" \
" " #asm_op " %0, %0, %2 \n" \ " \n" \
" scond %0, [%1] \n" \ SCOND_FAIL_RETRY_ASM \
" bnz 1b \n" \ \
: "=&r"(temp) /* Early clobber, to prevent reg reuse */ \ : [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \
: "r"(&v->counter), "ir"(i) \ SCOND_FAIL_RETRY_VARS \
: [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \
[i] "ir" (i) \
: "cc"); \ : "cc"); \
} \ } \
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \ #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \ static inline int atomic_##op##_return(int i, atomic_t *v) \
{ \ { \
unsigned int temp; \ unsigned int val; \
SCOND_FAIL_RETRY_VAR_DEF \
\ \
/* \ /* \
* Explicit full memory barrier needed before/after as \ * Explicit full memory barrier needed before/after as \
@ -58,19 +85,21 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
smp_mb(); \ smp_mb(); \
\ \
__asm__ __volatile__( \ __asm__ __volatile__( \
"1: \n" \ "1: llock %[val], [%[ctr]] \n" \
PREFETCHW \ " " #asm_op " %[val], %[val], %[i] \n" \
" llock %0, [%1] \n" \ " scond %[val], [%[ctr]] \n" \
" " #asm_op " %0, %0, %2 \n" \ " \n" \
" scond %0, [%1] \n" \ SCOND_FAIL_RETRY_ASM \
" bnz 1b \n" \ \
: "=&r"(temp) \ : [val] "=&r" (val) \
: "r"(&v->counter), "ir"(i) \ SCOND_FAIL_RETRY_VARS \
: [ctr] "r" (&v->counter), \
[i] "ir" (i) \
: "cc"); \ : "cc"); \
\ \
smp_mb(); \ smp_mb(); \
\ \
return temp; \ return val; \
} }
#else /* !CONFIG_ARC_HAS_LLSC */ #else /* !CONFIG_ARC_HAS_LLSC */
@ -150,6 +179,9 @@ ATOMIC_OP(and, &=, and)
#undef ATOMIC_OPS #undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN #undef ATOMIC_OP_RETURN
#undef ATOMIC_OP #undef ATOMIC_OP
#undef SCOND_FAIL_RETRY_VAR_DEF
#undef SCOND_FAIL_RETRY_ASM
#undef SCOND_FAIL_RETRY_VARS
/** /**
* __atomic_add_unless - add unless the number is a given value * __atomic_add_unless - add unless the number is a given value

View File

@ -20,20 +20,20 @@
struct pt_regs { struct pt_regs {
/* Real registers */ /* Real registers */
long bta; /* bta_l1, bta_l2, erbta */ unsigned long bta; /* bta_l1, bta_l2, erbta */
long lp_start, lp_end, lp_count; unsigned long lp_start, lp_end, lp_count;
long status32; /* status32_l1, status32_l2, erstatus */ unsigned long status32; /* status32_l1, status32_l2, erstatus */
long ret; /* ilink1, ilink2 or eret */ unsigned long ret; /* ilink1, ilink2 or eret */
long blink; unsigned long blink;
long fp; unsigned long fp;
long r26; /* gp */ unsigned long r26; /* gp */
long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0; unsigned long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0;
long sp; /* user/kernel sp depending on where we came from */ unsigned long sp; /* User/Kernel depending on where we came from */
long orig_r0; unsigned long orig_r0;
/* /*
* To distinguish bet excp, syscall, irq * To distinguish bet excp, syscall, irq
@ -55,13 +55,13 @@ struct pt_regs {
unsigned long event; unsigned long event;
}; };
long user_r25; unsigned long user_r25;
}; };
#else #else
struct pt_regs { struct pt_regs {
long orig_r0; unsigned long orig_r0;
union { union {
struct { struct {
@ -76,26 +76,26 @@ struct pt_regs {
unsigned long event; unsigned long event;
}; };
long bta; /* bta_l1, bta_l2, erbta */ unsigned long bta; /* bta_l1, bta_l2, erbta */
long user_r25; unsigned long user_r25;
long r26; /* gp */ unsigned long r26; /* gp */
long fp; unsigned long fp;
long sp; /* user/kernel sp depending on where we came from */ unsigned long sp; /* user/kernel sp depending on where we came from */
long r12; unsigned long r12;
/*------- Below list auto saved by h/w -----------*/ /*------- Below list auto saved by h/w -----------*/
long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11; unsigned long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11;
long blink; unsigned long blink;
long lp_end, lp_start, lp_count; unsigned long lp_end, lp_start, lp_count;
long ei, ldi, jli; unsigned long ei, ldi, jli;
long ret; unsigned long ret;
long status32; unsigned long status32;
}; };
#endif #endif
@ -103,10 +103,10 @@ struct pt_regs {
/* Callee saved registers - need to be saved only when you are scheduled out */ /* Callee saved registers - need to be saved only when you are scheduled out */
struct callee_regs { struct callee_regs {
long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13; unsigned long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13;
}; };
#define instruction_pointer(regs) (unsigned long)((regs)->ret) #define instruction_pointer(regs) ((regs)->ret)
#define profile_pc(regs) instruction_pointer(regs) #define profile_pc(regs) instruction_pointer(regs)
/* return 1 if user mode or 0 if kernel mode */ /* return 1 if user mode or 0 if kernel mode */
@ -142,7 +142,7 @@ struct callee_regs {
static inline long regs_return_value(struct pt_regs *regs) static inline long regs_return_value(struct pt_regs *regs)
{ {
return regs->r0; return (long)regs->r0;
} }
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */

View File

@ -18,9 +18,518 @@
#define arch_spin_unlock_wait(x) \ #define arch_spin_unlock_wait(x) \
do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0) do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0)
#ifdef CONFIG_ARC_HAS_LLSC
/*
* A normal LLOCK/SCOND based system, w/o need for livelock workaround
*/
#ifndef CONFIG_ARC_STAR_9000923308
static inline void arch_spin_lock(arch_spinlock_t *lock) static inline void arch_spin_lock(arch_spinlock_t *lock)
{ {
unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__; unsigned int val;
smp_mb();
__asm__ __volatile__(
"1: llock %[val], [%[slock]] \n"
" breq %[val], %[LOCKED], 1b \n" /* spin while LOCKED */
" scond %[LOCKED], [%[slock]] \n" /* acquire */
" bnz 1b \n"
" \n"
: [val] "=&r" (val)
: [slock] "r" (&(lock->slock)),
[LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
: "memory", "cc");
smp_mb();
}
/* 1 - lock taken successfully */
static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
unsigned int val, got_it = 0;
smp_mb();
__asm__ __volatile__(
"1: llock %[val], [%[slock]] \n"
" breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */
" scond %[LOCKED], [%[slock]] \n" /* acquire */
" bnz 1b \n"
" mov %[got_it], 1 \n"
"4: \n"
" \n"
: [val] "=&r" (val),
[got_it] "+&r" (got_it)
: [slock] "r" (&(lock->slock)),
[LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
: "memory", "cc");
smp_mb();
return got_it;
}
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
smp_mb();
lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
smp_mb();
}
/*
* Read-write spinlocks, allowing multiple readers but only one writer.
* Unfair locking as Writers could be starved indefinitely by Reader(s)
*/
static inline void arch_read_lock(arch_rwlock_t *rw)
{
unsigned int val;
smp_mb();
/*
* zero means writer holds the lock exclusively, deny Reader.
* Otherwise grant lock to first/subseq reader
*
* if (rw->counter > 0) {
* rw->counter--;
* ret = 1;
* }
*/
__asm__ __volatile__(
"1: llock %[val], [%[rwlock]] \n"
" brls %[val], %[WR_LOCKED], 1b\n" /* <= 0: spin while write locked */
" sub %[val], %[val], 1 \n" /* reader lock */
" scond %[val], [%[rwlock]] \n"
" bnz 1b \n"
" \n"
: [val] "=&r" (val)
: [rwlock] "r" (&(rw->counter)),
[WR_LOCKED] "ir" (0)
: "memory", "cc");
smp_mb();
}
/* 1 - lock taken successfully */
static inline int arch_read_trylock(arch_rwlock_t *rw)
{
unsigned int val, got_it = 0;
smp_mb();
__asm__ __volatile__(
"1: llock %[val], [%[rwlock]] \n"
" brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */
" sub %[val], %[val], 1 \n" /* counter-- */
" scond %[val], [%[rwlock]] \n"
" bnz 1b \n" /* retry if collided with someone */
" mov %[got_it], 1 \n"
" \n"
"4: ; --- done --- \n"
: [val] "=&r" (val),
[got_it] "+&r" (got_it)
: [rwlock] "r" (&(rw->counter)),
[WR_LOCKED] "ir" (0)
: "memory", "cc");
smp_mb();
return got_it;
}
static inline void arch_write_lock(arch_rwlock_t *rw)
{
unsigned int val;
smp_mb();
/*
* If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
* deny writer. Otherwise if unlocked grant to writer
* Hence the claim that Linux rwlocks are unfair to writers.
* (can be starved for an indefinite time by readers).
*
* if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
* rw->counter = 0;
* ret = 1;
* }
*/
__asm__ __volatile__(
"1: llock %[val], [%[rwlock]] \n"
" brne %[val], %[UNLOCKED], 1b \n" /* while !UNLOCKED spin */
" mov %[val], %[WR_LOCKED] \n"
" scond %[val], [%[rwlock]] \n"
" bnz 1b \n"
" \n"
: [val] "=&r" (val)
: [rwlock] "r" (&(rw->counter)),
[UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
[WR_LOCKED] "ir" (0)
: "memory", "cc");
smp_mb();
}
/* 1 - lock taken successfully */
static inline int arch_write_trylock(arch_rwlock_t *rw)
{
unsigned int val, got_it = 0;
smp_mb();
__asm__ __volatile__(
"1: llock %[val], [%[rwlock]] \n"
" brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */
" mov %[val], %[WR_LOCKED] \n"
" scond %[val], [%[rwlock]] \n"
" bnz 1b \n" /* retry if collided with someone */
" mov %[got_it], 1 \n"
" \n"
"4: ; --- done --- \n"
: [val] "=&r" (val),
[got_it] "+&r" (got_it)
: [rwlock] "r" (&(rw->counter)),
[UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
[WR_LOCKED] "ir" (0)
: "memory", "cc");
smp_mb();
return got_it;
}
static inline void arch_read_unlock(arch_rwlock_t *rw)
{
unsigned int val;
smp_mb();
/*
* rw->counter++;
*/
__asm__ __volatile__(
"1: llock %[val], [%[rwlock]] \n"
" add %[val], %[val], 1 \n"
" scond %[val], [%[rwlock]] \n"
" bnz 1b \n"
" \n"
: [val] "=&r" (val)
: [rwlock] "r" (&(rw->counter))
: "memory", "cc");
smp_mb();
}
static inline void arch_write_unlock(arch_rwlock_t *rw)
{
smp_mb();
rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
smp_mb();
}
#else /* CONFIG_ARC_STAR_9000923308 */
/*
* HS38x4 could get into a LLOCK/SCOND livelock in case of multiple overlapping
* coherency transactions in the SCU. The exclusive line state keeps rotating
* among contenting cores leading to a never ending cycle. So break the cycle
* by deferring the retry of failed exclusive access (SCOND). The actual delay
* needed is function of number of contending cores as well as the unrelated
* coherency traffic from other cores. To keep the code simple, start off with
* small delay of 1 which would suffice most cases and in case of contention
* double the delay. Eventually the delay is sufficient such that the coherency
* pipeline is drained, thus a subsequent exclusive access would succeed.
*/
#define SCOND_FAIL_RETRY_VAR_DEF \
unsigned int delay, tmp; \
#define SCOND_FAIL_RETRY_ASM \
" ; --- scond fail delay --- \n" \
" mov %[tmp], %[delay] \n" /* tmp = delay */ \
"2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \
" sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \
" rol %[delay], %[delay] \n" /* delay *= 2 */ \
" b 1b \n" /* start over */ \
" \n" \
"4: ; --- done --- \n" \
#define SCOND_FAIL_RETRY_VARS \
,[delay] "=&r" (delay), [tmp] "=&r" (tmp) \
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
unsigned int val;
SCOND_FAIL_RETRY_VAR_DEF;
smp_mb();
__asm__ __volatile__(
"0: mov %[delay], 1 \n"
"1: llock %[val], [%[slock]] \n"
" breq %[val], %[LOCKED], 0b \n" /* spin while LOCKED */
" scond %[LOCKED], [%[slock]] \n" /* acquire */
" bz 4f \n" /* done */
" \n"
SCOND_FAIL_RETRY_ASM
: [val] "=&r" (val)
SCOND_FAIL_RETRY_VARS
: [slock] "r" (&(lock->slock)),
[LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
: "memory", "cc");
smp_mb();
}
/* 1 - lock taken successfully */
static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
unsigned int val, got_it = 0;
SCOND_FAIL_RETRY_VAR_DEF;
smp_mb();
__asm__ __volatile__(
"0: mov %[delay], 1 \n"
"1: llock %[val], [%[slock]] \n"
" breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */
" scond %[LOCKED], [%[slock]] \n" /* acquire */
" bz.d 4f \n"
" mov.z %[got_it], 1 \n" /* got it */
" \n"
SCOND_FAIL_RETRY_ASM
: [val] "=&r" (val),
[got_it] "+&r" (got_it)
SCOND_FAIL_RETRY_VARS
: [slock] "r" (&(lock->slock)),
[LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
: "memory", "cc");
smp_mb();
return got_it;
}
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
smp_mb();
lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
smp_mb();
}
/*
* Read-write spinlocks, allowing multiple readers but only one writer.
* Unfair locking as Writers could be starved indefinitely by Reader(s)
*/
static inline void arch_read_lock(arch_rwlock_t *rw)
{
unsigned int val;
SCOND_FAIL_RETRY_VAR_DEF;
smp_mb();
/*
* zero means writer holds the lock exclusively, deny Reader.
* Otherwise grant lock to first/subseq reader
*
* if (rw->counter > 0) {
* rw->counter--;
* ret = 1;
* }
*/
__asm__ __volatile__(
"0: mov %[delay], 1 \n"
"1: llock %[val], [%[rwlock]] \n"
" brls %[val], %[WR_LOCKED], 0b\n" /* <= 0: spin while write locked */
" sub %[val], %[val], 1 \n" /* reader lock */
" scond %[val], [%[rwlock]] \n"
" bz 4f \n" /* done */
" \n"
SCOND_FAIL_RETRY_ASM
: [val] "=&r" (val)
SCOND_FAIL_RETRY_VARS
: [rwlock] "r" (&(rw->counter)),
[WR_LOCKED] "ir" (0)
: "memory", "cc");
smp_mb();
}
/* 1 - lock taken successfully */
static inline int arch_read_trylock(arch_rwlock_t *rw)
{
unsigned int val, got_it = 0;
SCOND_FAIL_RETRY_VAR_DEF;
smp_mb();
__asm__ __volatile__(
"0: mov %[delay], 1 \n"
"1: llock %[val], [%[rwlock]] \n"
" brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */
" sub %[val], %[val], 1 \n" /* counter-- */
" scond %[val], [%[rwlock]] \n"
" bz.d 4f \n"
" mov.z %[got_it], 1 \n" /* got it */
" \n"
SCOND_FAIL_RETRY_ASM
: [val] "=&r" (val),
[got_it] "+&r" (got_it)
SCOND_FAIL_RETRY_VARS
: [rwlock] "r" (&(rw->counter)),
[WR_LOCKED] "ir" (0)
: "memory", "cc");
smp_mb();
return got_it;
}
static inline void arch_write_lock(arch_rwlock_t *rw)
{
unsigned int val;
SCOND_FAIL_RETRY_VAR_DEF;
smp_mb();
/*
* If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
* deny writer. Otherwise if unlocked grant to writer
* Hence the claim that Linux rwlocks are unfair to writers.
* (can be starved for an indefinite time by readers).
*
* if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
* rw->counter = 0;
* ret = 1;
* }
*/
__asm__ __volatile__(
"0: mov %[delay], 1 \n"
"1: llock %[val], [%[rwlock]] \n"
" brne %[val], %[UNLOCKED], 0b \n" /* while !UNLOCKED spin */
" mov %[val], %[WR_LOCKED] \n"
" scond %[val], [%[rwlock]] \n"
" bz 4f \n"
" \n"
SCOND_FAIL_RETRY_ASM
: [val] "=&r" (val)
SCOND_FAIL_RETRY_VARS
: [rwlock] "r" (&(rw->counter)),
[UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
[WR_LOCKED] "ir" (0)
: "memory", "cc");
smp_mb();
}
/* 1 - lock taken successfully */
static inline int arch_write_trylock(arch_rwlock_t *rw)
{
unsigned int val, got_it = 0;
SCOND_FAIL_RETRY_VAR_DEF;
smp_mb();
__asm__ __volatile__(
"0: mov %[delay], 1 \n"
"1: llock %[val], [%[rwlock]] \n"
" brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */
" mov %[val], %[WR_LOCKED] \n"
" scond %[val], [%[rwlock]] \n"
" bz.d 4f \n"
" mov.z %[got_it], 1 \n" /* got it */
" \n"
SCOND_FAIL_RETRY_ASM
: [val] "=&r" (val),
[got_it] "+&r" (got_it)
SCOND_FAIL_RETRY_VARS
: [rwlock] "r" (&(rw->counter)),
[UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
[WR_LOCKED] "ir" (0)
: "memory", "cc");
smp_mb();
return got_it;
}
static inline void arch_read_unlock(arch_rwlock_t *rw)
{
unsigned int val;
smp_mb();
/*
* rw->counter++;
*/
__asm__ __volatile__(
"1: llock %[val], [%[rwlock]] \n"
" add %[val], %[val], 1 \n"
" scond %[val], [%[rwlock]] \n"
" bnz 1b \n"
" \n"
: [val] "=&r" (val)
: [rwlock] "r" (&(rw->counter))
: "memory", "cc");
smp_mb();
}
static inline void arch_write_unlock(arch_rwlock_t *rw)
{
unsigned int val;
smp_mb();
/*
* rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
*/
__asm__ __volatile__(
"1: llock %[val], [%[rwlock]] \n"
" scond %[UNLOCKED], [%[rwlock]]\n"
" bnz 1b \n"
" \n"
: [val] "=&r" (val)
: [rwlock] "r" (&(rw->counter)),
[UNLOCKED] "r" (__ARCH_RW_LOCK_UNLOCKED__)
: "memory", "cc");
smp_mb();
}
#undef SCOND_FAIL_RETRY_VAR_DEF
#undef SCOND_FAIL_RETRY_ASM
#undef SCOND_FAIL_RETRY_VARS
#endif /* CONFIG_ARC_STAR_9000923308 */
#else /* !CONFIG_ARC_HAS_LLSC */
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
/* /*
* This smp_mb() is technically superfluous, we only need the one * This smp_mb() is technically superfluous, we only need the one
@ -33,7 +542,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
__asm__ __volatile__( __asm__ __volatile__(
"1: ex %0, [%1] \n" "1: ex %0, [%1] \n"
" breq %0, %2, 1b \n" " breq %0, %2, 1b \n"
: "+&r" (tmp) : "+&r" (val)
: "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__) : "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__)
: "memory"); : "memory");
@ -48,26 +557,27 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
smp_mb(); smp_mb();
} }
/* 1 - lock taken successfully */
static inline int arch_spin_trylock(arch_spinlock_t *lock) static inline int arch_spin_trylock(arch_spinlock_t *lock)
{ {
unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__; unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
smp_mb(); smp_mb();
__asm__ __volatile__( __asm__ __volatile__(
"1: ex %0, [%1] \n" "1: ex %0, [%1] \n"
: "+r" (tmp) : "+r" (val)
: "r"(&(lock->slock)) : "r"(&(lock->slock))
: "memory"); : "memory");
smp_mb(); smp_mb();
return (tmp == __ARCH_SPIN_LOCK_UNLOCKED__); return (val == __ARCH_SPIN_LOCK_UNLOCKED__);
} }
static inline void arch_spin_unlock(arch_spinlock_t *lock) static inline void arch_spin_unlock(arch_spinlock_t *lock)
{ {
unsigned int tmp = __ARCH_SPIN_LOCK_UNLOCKED__; unsigned int val = __ARCH_SPIN_LOCK_UNLOCKED__;
/* /*
* RELEASE barrier: given the instructions avail on ARCv2, full barrier * RELEASE barrier: given the instructions avail on ARCv2, full barrier
@ -77,7 +587,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
__asm__ __volatile__( __asm__ __volatile__(
" ex %0, [%1] \n" " ex %0, [%1] \n"
: "+r" (tmp) : "+r" (val)
: "r"(&(lock->slock)) : "r"(&(lock->slock))
: "memory"); : "memory");
@ -90,19 +600,12 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
/* /*
* Read-write spinlocks, allowing multiple readers but only one writer. * Read-write spinlocks, allowing multiple readers but only one writer.
* Unfair locking as Writers could be starved indefinitely by Reader(s)
* *
* The spinlock itself is contained in @counter and access to it is * The spinlock itself is contained in @counter and access to it is
* serialized with @lock_mutex. * serialized with @lock_mutex.
*
* Unfair locking as Writers could be starved indefinitely by Reader(s)
*/ */
/* Would read_trylock() succeed? */
#define arch_read_can_lock(x) ((x)->counter > 0)
/* Would write_trylock() succeed? */
#define arch_write_can_lock(x) ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
/* 1 - lock taken successfully */ /* 1 - lock taken successfully */
static inline int arch_read_trylock(arch_rwlock_t *rw) static inline int arch_read_trylock(arch_rwlock_t *rw)
{ {
@ -173,6 +676,11 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
arch_spin_unlock(&(rw->lock_mutex)); arch_spin_unlock(&(rw->lock_mutex));
} }
#endif
#define arch_read_can_lock(x) ((x)->counter > 0)
#define arch_write_can_lock(x) ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock) #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock) #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)

View File

@ -26,7 +26,9 @@ typedef struct {
*/ */
typedef struct { typedef struct {
volatile unsigned int counter; volatile unsigned int counter;
#ifndef CONFIG_ARC_HAS_LLSC
arch_spinlock_t lock_mutex; arch_spinlock_t lock_mutex;
#endif
} arch_rwlock_t; } arch_rwlock_t;
#define __ARCH_RW_LOCK_UNLOCKED__ 0x01000000 #define __ARCH_RW_LOCK_UNLOCKED__ 0x01000000

View File

@ -32,20 +32,20 @@
*/ */
struct user_regs_struct { struct user_regs_struct {
long pad; unsigned long pad;
struct { struct {
long bta, lp_start, lp_end, lp_count; unsigned long bta, lp_start, lp_end, lp_count;
long status32, ret, blink, fp, gp; unsigned long status32, ret, blink, fp, gp;
long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0; unsigned long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0;
long sp; unsigned long sp;
} scratch; } scratch;
long pad2; unsigned long pad2;
struct { struct {
long r25, r24, r23, r22, r21, r20; unsigned long r25, r24, r23, r22, r21, r20;
long r19, r18, r17, r16, r15, r14, r13; unsigned long r19, r18, r17, r16, r15, r14, r13;
} callee; } callee;
long efa; /* break pt addr, for break points in delay slots */ unsigned long efa; /* break pt addr, for break points in delay slots */
long stop_pc; /* give dbg stop_pc after ensuring brkpt trap */ unsigned long stop_pc; /* give dbg stop_pc after ensuring brkpt trap */
}; };
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */

View File

@ -47,6 +47,7 @@ static void read_arc_build_cfg_regs(void)
struct bcr_perip uncached_space; struct bcr_perip uncached_space;
struct bcr_generic bcr; struct bcr_generic bcr;
struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
unsigned long perip_space;
FIX_PTR(cpu); FIX_PTR(cpu);
READ_BCR(AUX_IDENTITY, cpu->core); READ_BCR(AUX_IDENTITY, cpu->core);
@ -56,7 +57,12 @@ static void read_arc_build_cfg_regs(void)
cpu->vec_base = read_aux_reg(AUX_INTR_VEC_BASE); cpu->vec_base = read_aux_reg(AUX_INTR_VEC_BASE);
READ_BCR(ARC_REG_D_UNCACH_BCR, uncached_space); READ_BCR(ARC_REG_D_UNCACH_BCR, uncached_space);
BUG_ON((uncached_space.start << 24) != ARC_UNCACHED_ADDR_SPACE); if (uncached_space.ver < 3)
perip_space = uncached_space.start << 24;
else
perip_space = read_aux_reg(AUX_NON_VOL) & 0xF0000000;
BUG_ON(perip_space != ARC_UNCACHED_ADDR_SPACE);
READ_BCR(ARC_REG_MUL_BCR, cpu->extn_mpy); READ_BCR(ARC_REG_MUL_BCR, cpu->extn_mpy);
@ -330,6 +336,10 @@ static void arc_chk_core_config(void)
pr_warn("CONFIG_ARC_FPU_SAVE_RESTORE needed for working apps\n"); pr_warn("CONFIG_ARC_FPU_SAVE_RESTORE needed for working apps\n");
else if (!cpu->extn.fpu_dp && fpu_enabled) else if (!cpu->extn.fpu_dp && fpu_enabled)
panic("FPU non-existent, disable CONFIG_ARC_FPU_SAVE_RESTORE\n"); panic("FPU non-existent, disable CONFIG_ARC_FPU_SAVE_RESTORE\n");
if (is_isa_arcv2() && IS_ENABLED(CONFIG_SMP) && cpu->isa.atomic &&
!IS_ENABLED(CONFIG_ARC_STAR_9000923308))
panic("llock/scond livelock workaround missing\n");
} }
/* /*

View File

@ -203,34 +203,24 @@ static int arc_clkevent_set_next_event(unsigned long delta,
return 0; return 0;
} }
static void arc_clkevent_set_mode(enum clock_event_mode mode, static int arc_clkevent_set_periodic(struct clock_event_device *dev)
struct clock_event_device *dev)
{ {
switch (mode) { /*
case CLOCK_EVT_MODE_PERIODIC: * At X Hz, 1 sec = 1000ms -> X cycles;
/* * 10ms -> X / 100 cycles
* At X Hz, 1 sec = 1000ms -> X cycles; */
* 10ms -> X / 100 cycles arc_timer_event_setup(arc_get_core_freq() / HZ);
*/ return 0;
arc_timer_event_setup(arc_get_core_freq() / HZ);
break;
case CLOCK_EVT_MODE_ONESHOT:
break;
default:
break;
}
return;
} }
static DEFINE_PER_CPU(struct clock_event_device, arc_clockevent_device) = { static DEFINE_PER_CPU(struct clock_event_device, arc_clockevent_device) = {
.name = "ARC Timer0", .name = "ARC Timer0",
.features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC, .features = CLOCK_EVT_FEAT_ONESHOT |
.mode = CLOCK_EVT_MODE_UNUSED, CLOCK_EVT_FEAT_PERIODIC,
.rating = 300, .rating = 300,
.irq = TIMER0_IRQ, /* hardwired, no need for resources */ .irq = TIMER0_IRQ, /* hardwired, no need for resources */
.set_next_event = arc_clkevent_set_next_event, .set_next_event = arc_clkevent_set_next_event,
.set_mode = arc_clkevent_set_mode, .set_state_periodic = arc_clkevent_set_periodic,
}; };
static irqreturn_t timer_irq_handler(int irq, void *dev_id) static irqreturn_t timer_irq_handler(int irq, void *dev_id)
@ -240,7 +230,7 @@ static irqreturn_t timer_irq_handler(int irq, void *dev_id)
* irq_set_chip_and_handler() asked for handle_percpu_devid_irq() * irq_set_chip_and_handler() asked for handle_percpu_devid_irq()
*/ */
struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device); struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
int irq_reenable = evt->mode == CLOCK_EVT_MODE_PERIODIC; int irq_reenable = clockevent_state_periodic(evt);
/* /*
* Any write to CTRL reg ACks the interrupt, we rewrite the * Any write to CTRL reg ACks the interrupt, we rewrite the

View File

@ -206,7 +206,7 @@ unalignedOffby3:
ld.ab r6, [r1, 4] ld.ab r6, [r1, 4]
prefetch [r1, 28] ;Prefetch the next read location prefetch [r1, 28] ;Prefetch the next read location
ld.ab r8, [r1,4] ld.ab r8, [r1,4]
prefetch [r3, 32] ;Prefetch the next write location prefetchw [r3, 32] ;Prefetch the next write location
SHIFT_1 (r7, r6, 8) SHIFT_1 (r7, r6, 8)
or r7, r7, r5 or r7, r7, r5

View File

@ -10,12 +10,6 @@
#undef PREALLOC_NOT_AVAIL #undef PREALLOC_NOT_AVAIL
#ifdef PREALLOC_NOT_AVAIL
#define PREWRITE(A,B) prefetchw [(A),(B)]
#else
#define PREWRITE(A,B) prealloc [(A),(B)]
#endif
ENTRY(memset) ENTRY(memset)
prefetchw [r0] ; Prefetch the write location prefetchw [r0] ; Prefetch the write location
mov.f 0, r2 mov.f 0, r2
@ -51,9 +45,15 @@ ENTRY(memset)
;;; Convert len to Dwords, unfold x8 ;;; Convert len to Dwords, unfold x8
lsr.f lp_count, lp_count, 6 lsr.f lp_count, lp_count, 6
lpnz @.Lset64bytes lpnz @.Lset64bytes
;; LOOP START ;; LOOP START
PREWRITE(r3, 64) ;Prefetch the next write location #ifdef PREALLOC_NOT_AVAIL
prefetchw [r3, 64] ;Prefetch the next write location
#else
prealloc [r3, 64]
#endif
#ifdef CONFIG_ARC_HAS_LL64
std.ab r4, [r3, 8] std.ab r4, [r3, 8]
std.ab r4, [r3, 8] std.ab r4, [r3, 8]
std.ab r4, [r3, 8] std.ab r4, [r3, 8]
@ -62,16 +62,45 @@ ENTRY(memset)
std.ab r4, [r3, 8] std.ab r4, [r3, 8]
std.ab r4, [r3, 8] std.ab r4, [r3, 8]
std.ab r4, [r3, 8] std.ab r4, [r3, 8]
#else
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
#endif
.Lset64bytes: .Lset64bytes:
lsr.f lp_count, r2, 5 ;Last remaining max 124 bytes lsr.f lp_count, r2, 5 ;Last remaining max 124 bytes
lpnz .Lset32bytes lpnz .Lset32bytes
;; LOOP START ;; LOOP START
prefetchw [r3, 32] ;Prefetch the next write location prefetchw [r3, 32] ;Prefetch the next write location
#ifdef CONFIG_ARC_HAS_LL64
std.ab r4, [r3, 8] std.ab r4, [r3, 8]
std.ab r4, [r3, 8] std.ab r4, [r3, 8]
std.ab r4, [r3, 8] std.ab r4, [r3, 8]
std.ab r4, [r3, 8] std.ab r4, [r3, 8]
#else
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
#endif
.Lset32bytes: .Lset32bytes:
and.f lp_count, r2, 0x1F ;Last remaining 31 bytes and.f lp_count, r2, 0x1F ;Last remaining 31 bytes

View File

@ -389,6 +389,21 @@ axs103_set_freq(unsigned int id, unsigned int fd, unsigned int od)
static void __init axs103_early_init(void) static void __init axs103_early_init(void)
{ {
/*
* AXS103 configurations for SMP/QUAD configurations share device tree
* which defaults to 90 MHz. However recent failures of Quad config
* revealed P&R timing violations so clamp it down to safe 50 MHz
* Instead of duplicating defconfig/DT for SMP/QUAD, add a small hack
*
* This hack is really hacky as of now. Fix it properly by getting the
* number of cores as return value of platform's early SMP callback
*/
#ifdef CONFIG_ARC_MCIP
unsigned int num_cores = (read_aux_reg(ARC_REG_MCIP_BCR) >> 16) & 0x3F;
if (num_cores > 2)
arc_set_core_freq(50 * 1000000);
#endif
switch (arc_get_core_freq()/1000000) { switch (arc_get_core_freq()/1000000) {
case 33: case 33:
axs103_set_freq(1, 1, 1); axs103_set_freq(1, 1, 1);

View File

@ -1140,6 +1140,7 @@
ctrl-module = <&omap_control_sata>; ctrl-module = <&omap_control_sata>;
clocks = <&sys_clkin1>, <&sata_ref_clk>; clocks = <&sys_clkin1>, <&sata_ref_clk>;
clock-names = "sysclk", "refclk"; clock-names = "sysclk", "refclk";
syscon-pllreset = <&scm_conf 0x3fc>;
#phy-cells = <0>; #phy-cells = <0>;
}; };

View File

@ -138,8 +138,8 @@
mipi_phy: video-phy@10020710 { mipi_phy: video-phy@10020710 {
compatible = "samsung,s5pv210-mipi-video-phy"; compatible = "samsung,s5pv210-mipi-video-phy";
reg = <0x10020710 8>;
#phy-cells = <1>; #phy-cells = <1>;
syscon = <&pmu_system_controller>;
}; };
pd_cam: cam-power-domain@10023C00 { pd_cam: cam-power-domain@10023C00 {

View File

@ -127,6 +127,10 @@
}; };
}; };
&cpu0 {
cpu0-supply = <&buck1_reg>;
};
&fimd { &fimd {
pinctrl-0 = <&lcd_en &lcd_clk &lcd_data24 &pwm0_out>; pinctrl-0 = <&lcd_en &lcd_clk &lcd_data24 &pwm0_out>;
pinctrl-names = "default"; pinctrl-names = "default";

View File

@ -188,6 +188,10 @@
}; };
}; };
&cpu0 {
cpu0-supply = <&varm_breg>;
};
&dsi_0 { &dsi_0 {
vddcore-supply = <&vusb_reg>; vddcore-supply = <&vusb_reg>;
vddio-supply = <&vmipi_reg>; vddio-supply = <&vmipi_reg>;

View File

@ -548,6 +548,10 @@
}; };
}; };
&cpu0 {
cpu0-supply = <&vdd_arm_reg>;
};
&pinctrl_1 { &pinctrl_1 {
hdmi_hpd: hdmi-hpd { hdmi_hpd: hdmi-hpd {
samsung,pins = "gpx3-7"; samsung,pins = "gpx3-7";

View File

@ -40,6 +40,18 @@
device_type = "cpu"; device_type = "cpu";
compatible = "arm,cortex-a9"; compatible = "arm,cortex-a9";
reg = <0x900>; reg = <0x900>;
clocks = <&clock CLK_ARM_CLK>;
clock-names = "cpu";
clock-latency = <160000>;
operating-points = <
1200000 1250000
1000000 1150000
800000 1075000
500000 975000
400000 975000
200000 950000
>;
cooling-min-level = <4>; cooling-min-level = <4>;
cooling-max-level = <2>; cooling-max-level = <2>;
#cooling-cells = <2>; /* min followed by max */ #cooling-cells = <2>; /* min followed by max */

View File

@ -286,8 +286,8 @@
can1: can@53fe4000 { can1: can@53fe4000 {
compatible = "fsl,imx35-flexcan", "fsl,p1010-flexcan"; compatible = "fsl,imx35-flexcan", "fsl,p1010-flexcan";
reg = <0x53fe4000 0x1000>; reg = <0x53fe4000 0x1000>;
clocks = <&clks 33>; clocks = <&clks 33>, <&clks 33>;
clock-names = "ipg"; clock-names = "ipg", "per";
interrupts = <43>; interrupts = <43>;
status = "disabled"; status = "disabled";
}; };
@ -295,8 +295,8 @@
can2: can@53fe8000 { can2: can@53fe8000 {
compatible = "fsl,imx35-flexcan", "fsl,p1010-flexcan"; compatible = "fsl,imx35-flexcan", "fsl,p1010-flexcan";
reg = <0x53fe8000 0x1000>; reg = <0x53fe8000 0x1000>;
clocks = <&clks 34>; clocks = <&clks 34>, <&clks 34>;
clock-names = "ipg"; clock-names = "ipg", "per";
interrupts = <44>; interrupts = <44>;
status = "disabled"; status = "disabled";
}; };

View File

@ -13,9 +13,8 @@ clocks {
#clock-cells = <0>; #clock-cells = <0>;
compatible = "ti,keystone,main-pll-clock"; compatible = "ti,keystone,main-pll-clock";
clocks = <&refclksys>; clocks = <&refclksys>;
reg = <0x02620350 4>, <0x02310110 4>; reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>;
reg-names = "control", "multiplier"; reg-names = "control", "multiplier", "post-divider";
fixed-postdiv = <2>;
}; };
papllclk: papllclk@2620358 { papllclk: papllclk@2620358 {

View File

@ -22,9 +22,8 @@ clocks {
#clock-cells = <0>; #clock-cells = <0>;
compatible = "ti,keystone,main-pll-clock"; compatible = "ti,keystone,main-pll-clock";
clocks = <&refclksys>; clocks = <&refclksys>;
reg = <0x02620350 4>, <0x02310110 4>; reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>;
reg-names = "control", "multiplier"; reg-names = "control", "multiplier", "post-divider";
fixed-postdiv = <2>;
}; };
papllclk: papllclk@2620358 { papllclk: papllclk@2620358 {

View File

@ -22,9 +22,8 @@ clocks {
#clock-cells = <0>; #clock-cells = <0>;
compatible = "ti,keystone,main-pll-clock"; compatible = "ti,keystone,main-pll-clock";
clocks = <&refclksys>; clocks = <&refclksys>;
reg = <0x02620350 4>, <0x02310110 4>; reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>;
reg-names = "control", "multiplier"; reg-names = "control", "multiplier", "post-divider";
fixed-postdiv = <2>;
}; };
papllclk: papllclk@2620358 { papllclk: papllclk@2620358 {

View File

@ -17,6 +17,7 @@
}; };
aliases { aliases {
serial1 = &uart1;
stmpe-i2c0 = &stmpe0; stmpe-i2c0 = &stmpe0;
stmpe-i2c1 = &stmpe1; stmpe-i2c1 = &stmpe1;
}; };

View File

@ -15,6 +15,10 @@
bootargs = "root=/dev/ram0 console=ttyAMA1,115200n8 earlyprintk"; bootargs = "root=/dev/ram0 console=ttyAMA1,115200n8 earlyprintk";
}; };
aliases {
serial1 = &uart1;
};
src@101e0000 { src@101e0000 {
/* These chrystal drivers are not used on this board */ /* These chrystal drivers are not used on this board */
disable-sxtalo; disable-sxtalo;

View File

@ -757,6 +757,7 @@
clock-names = "uartclk", "apb_pclk"; clock-names = "uartclk", "apb_pclk";
pinctrl-names = "default"; pinctrl-names = "default";
pinctrl-0 = <&uart0_default_mux>; pinctrl-0 = <&uart0_default_mux>;
status = "disabled";
}; };
uart1: uart@101fb000 { uart1: uart@101fb000 {

View File

@ -2373,6 +2373,9 @@ static int of_dev_hwmod_lookup(struct device_node *np,
* registers. This address is needed early so the OCP registers that * registers. This address is needed early so the OCP registers that
* are part of the device's address space can be ioremapped properly. * are part of the device's address space can be ioremapped properly.
* *
* If SYSC access is not needed, the registers will not be remapped
* and non-availability of MPU access is not treated as an error.
*
* Returns 0 on success, -EINVAL if an invalid hwmod is passed, and * Returns 0 on success, -EINVAL if an invalid hwmod is passed, and
* -ENXIO on absent or invalid register target address space. * -ENXIO on absent or invalid register target address space.
*/ */
@ -2387,6 +2390,11 @@ static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data,
_save_mpu_port_index(oh); _save_mpu_port_index(oh);
/* if we don't need sysc access we don't need to ioremap */
if (!oh->class->sysc)
return 0;
/* we can't continue without MPU PORT if we need sysc access */
if (oh->_int_flags & _HWMOD_NO_MPU_PORT) if (oh->_int_flags & _HWMOD_NO_MPU_PORT)
return -ENXIO; return -ENXIO;
@ -2396,8 +2404,10 @@ static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data,
oh->name); oh->name);
/* Extract the IO space from device tree blob */ /* Extract the IO space from device tree blob */
if (!np) if (!np) {
pr_err("omap_hwmod: %s: no dt node\n", oh->name);
return -ENXIO; return -ENXIO;
}
va_start = of_iomap(np, index + oh->mpu_rt_idx); va_start = of_iomap(np, index + oh->mpu_rt_idx);
} else { } else {
@ -2456,13 +2466,11 @@ static int __init _init(struct omap_hwmod *oh, void *data)
oh->name, np->name); oh->name, np->name);
} }
if (oh->class->sysc) { r = _init_mpu_rt_base(oh, NULL, index, np);
r = _init_mpu_rt_base(oh, NULL, index, np); if (r < 0) {
if (r < 0) { WARN(1, "omap_hwmod: %s: doesn't have mpu register target base\n",
WARN(1, "omap_hwmod: %s: doesn't have mpu register target base\n", oh->name);
oh->name); return 0;
return 0;
}
} }
r = _init_clocks(oh, NULL); r = _init_clocks(oh, NULL);

View File

@ -827,8 +827,7 @@ static struct omap_hwmod_class_sysconfig dra7xx_gpmc_sysc = {
.syss_offs = 0x0014, .syss_offs = 0x0014,
.sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_SIDLEMODE | .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_SIDLEMODE |
SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS), SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
.idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
SIDLE_SMART_WKUP),
.sysc_fields = &omap_hwmod_sysc_type1, .sysc_fields = &omap_hwmod_sysc_type1,
}; };
@ -844,7 +843,7 @@ static struct omap_hwmod dra7xx_gpmc_hwmod = {
.class = &dra7xx_gpmc_hwmod_class, .class = &dra7xx_gpmc_hwmod_class,
.clkdm_name = "l3main1_clkdm", .clkdm_name = "l3main1_clkdm",
/* Skip reset for CONFIG_OMAP_GPMC_DEBUG for bootloader timings */ /* Skip reset for CONFIG_OMAP_GPMC_DEBUG for bootloader timings */
.flags = HWMOD_SWSUP_SIDLE | DEBUG_OMAP_GPMC_HWMOD_FLAGS, .flags = DEBUG_OMAP_GPMC_HWMOD_FLAGS,
.main_clk = "l3_iclk_div", .main_clk = "l3_iclk_div",
.prcm = { .prcm = {
.omap4 = { .omap4 = {

View File

@ -823,7 +823,7 @@
device_type = "dma"; device_type = "dma";
reg = <0x0 0x1f270000 0x0 0x10000>, reg = <0x0 0x1f270000 0x0 0x10000>,
<0x0 0x1f200000 0x0 0x10000>, <0x0 0x1f200000 0x0 0x10000>,
<0x0 0x1b008000 0x0 0x2000>, <0x0 0x1b000000 0x0 0x400000>,
<0x0 0x1054a000 0x0 0x100>; <0x0 0x1054a000 0x0 0x100>;
interrupts = <0x0 0x82 0x4>, interrupts = <0x0 0x82 0x4>,
<0x0 0xb8 0x4>, <0x0 0xb8 0x4>,

View File

@ -122,12 +122,12 @@ static int __init uefi_init(void)
/* Show what we know for posterity */ /* Show what we know for posterity */
c16 = early_memremap(efi_to_phys(efi.systab->fw_vendor), c16 = early_memremap(efi_to_phys(efi.systab->fw_vendor),
sizeof(vendor)); sizeof(vendor) * sizeof(efi_char16_t));
if (c16) { if (c16) {
for (i = 0; i < (int) sizeof(vendor) - 1 && *c16; ++i) for (i = 0; i < (int) sizeof(vendor) - 1 && *c16; ++i)
vendor[i] = c16[i]; vendor[i] = c16[i];
vendor[i] = '\0'; vendor[i] = '\0';
early_memunmap(c16, sizeof(vendor)); early_memunmap(c16, sizeof(vendor) * sizeof(efi_char16_t));
} }
pr_info("EFI v%u.%.02u by %s\n", pr_info("EFI v%u.%.02u by %s\n",

View File

@ -168,7 +168,8 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
* Other callers might not initialize the si_lsb field, * Other callers might not initialize the si_lsb field,
* so check explicitely for the right codes here. * so check explicitely for the right codes here.
*/ */
if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO) if (from->si_signo == SIGBUS &&
(from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb); err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
#endif #endif
break; break;
@ -201,8 +202,6 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
{ {
memset(to, 0, sizeof *to);
if (copy_from_user(to, from, __ARCH_SI_PREAMBLE_SIZE) || if (copy_from_user(to, from, __ARCH_SI_PREAMBLE_SIZE) ||
copy_from_user(to->_sifields._pad, copy_from_user(to->_sifields._pad,
from->_sifields._pad, SI_PAD_SIZE)) from->_sifields._pad, SI_PAD_SIZE))

View File

@ -80,6 +80,9 @@ int clk_enable(struct clk *clk)
{ {
unsigned long flags; unsigned long flags;
if (!clk)
return 0;
spin_lock_irqsave(&clk_lock, flags); spin_lock_irqsave(&clk_lock, flags);
__clk_enable(clk); __clk_enable(clk);
spin_unlock_irqrestore(&clk_lock, flags); spin_unlock_irqrestore(&clk_lock, flags);
@ -106,6 +109,9 @@ void clk_disable(struct clk *clk)
{ {
unsigned long flags; unsigned long flags;
if (IS_ERR_OR_NULL(clk))
return;
spin_lock_irqsave(&clk_lock, flags); spin_lock_irqsave(&clk_lock, flags);
__clk_disable(clk); __clk_disable(clk);
spin_unlock_irqrestore(&clk_lock, flags); spin_unlock_irqrestore(&clk_lock, flags);
@ -117,6 +123,9 @@ unsigned long clk_get_rate(struct clk *clk)
unsigned long flags; unsigned long flags;
unsigned long rate; unsigned long rate;
if (!clk)
return 0;
spin_lock_irqsave(&clk_lock, flags); spin_lock_irqsave(&clk_lock, flags);
rate = clk->get_rate(clk); rate = clk->get_rate(clk);
spin_unlock_irqrestore(&clk_lock, flags); spin_unlock_irqrestore(&clk_lock, flags);
@ -129,6 +138,9 @@ long clk_round_rate(struct clk *clk, unsigned long rate)
{ {
unsigned long flags, actual_rate; unsigned long flags, actual_rate;
if (!clk)
return 0;
if (!clk->set_rate) if (!clk->set_rate)
return -ENOSYS; return -ENOSYS;
@ -145,6 +157,9 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
unsigned long flags; unsigned long flags;
long ret; long ret;
if (!clk)
return 0;
if (!clk->set_rate) if (!clk->set_rate)
return -ENOSYS; return -ENOSYS;
@ -161,6 +176,9 @@ int clk_set_parent(struct clk *clk, struct clk *parent)
unsigned long flags; unsigned long flags;
int ret; int ret;
if (!clk)
return 0;
if (!clk->set_parent) if (!clk->set_parent)
return -ENOSYS; return -ENOSYS;
@ -174,7 +192,7 @@ EXPORT_SYMBOL(clk_set_parent);
struct clk *clk_get_parent(struct clk *clk) struct clk *clk_get_parent(struct clk *clk)
{ {
return clk->parent; return !clk ? NULL : clk->parent;
} }
EXPORT_SYMBOL(clk_get_parent); EXPORT_SYMBOL(clk_get_parent);

View File

@ -151,7 +151,6 @@ config BMIPS_GENERIC
select BCM7120_L2_IRQ select BCM7120_L2_IRQ
select BRCMSTB_L2_IRQ select BRCMSTB_L2_IRQ
select IRQ_MIPS_CPU select IRQ_MIPS_CPU
select RAW_IRQ_ACCESSORS
select DMA_NONCOHERENT select DMA_NONCOHERENT
select SYS_SUPPORTS_32BIT_KERNEL select SYS_SUPPORTS_32BIT_KERNEL
select SYS_SUPPORTS_LITTLE_ENDIAN select SYS_SUPPORTS_LITTLE_ENDIAN

View File

@ -190,6 +190,7 @@ int get_c0_perfcount_int(void)
{ {
return ATH79_MISC_IRQ(5); return ATH79_MISC_IRQ(5);
} }
EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
unsigned int get_c0_compare_int(void) unsigned int get_c0_compare_int(void)
{ {

View File

@ -42,7 +42,7 @@ static irqreturn_t mailbox_interrupt(int irq, void *dev_id)
cvmx_write_csr(CVMX_CIU_MBOX_CLRX(coreid), action); cvmx_write_csr(CVMX_CIU_MBOX_CLRX(coreid), action);
if (action & SMP_CALL_FUNCTION) if (action & SMP_CALL_FUNCTION)
smp_call_function_interrupt(); generic_smp_call_function_interrupt();
if (action & SMP_RESCHEDULE_YOURSELF) if (action & SMP_RESCHEDULE_YOURSELF)
scheduler_ipi(); scheduler_ipi();

View File

@ -1,10 +0,0 @@
#ifndef __ASM_MACH_BCM63XX_DMA_COHERENCE_H
#define __ASM_MACH_BCM63XX_DMA_COHERENCE_H
#include <asm/bmips.h>
#define plat_post_dma_flush bmips_post_dma_flush
#include <asm/mach-generic/dma-coherence.h>
#endif /* __ASM_MACH_BCM63XX_DMA_COHERENCE_H */

View File

@ -182,8 +182,39 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
* Make sure the buddy is global too (if it's !none, * Make sure the buddy is global too (if it's !none,
* it better already be global) * it better already be global)
*/ */
#ifdef CONFIG_SMP
/*
* For SMP, multiple CPUs can race, so we need to do
* this atomically.
*/
#ifdef CONFIG_64BIT
#define LL_INSN "lld"
#define SC_INSN "scd"
#else /* CONFIG_32BIT */
#define LL_INSN "ll"
#define SC_INSN "sc"
#endif
unsigned long page_global = _PAGE_GLOBAL;
unsigned long tmp;
__asm__ __volatile__ (
" .set push\n"
" .set noreorder\n"
"1: " LL_INSN " %[tmp], %[buddy]\n"
" bnez %[tmp], 2f\n"
" or %[tmp], %[tmp], %[global]\n"
" " SC_INSN " %[tmp], %[buddy]\n"
" beqz %[tmp], 1b\n"
" nop\n"
"2:\n"
" .set pop"
: [buddy] "+m" (buddy->pte),
[tmp] "=&r" (tmp)
: [global] "r" (page_global));
#else /* !CONFIG_SMP */
if (pte_none(*buddy)) if (pte_none(*buddy))
pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL; pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
#endif /* CONFIG_SMP */
} }
#endif #endif
} }

View File

@ -83,8 +83,6 @@ static inline void __cpu_die(unsigned int cpu)
extern void play_dead(void); extern void play_dead(void);
#endif #endif
extern asmlinkage void smp_call_function_interrupt(void);
static inline void arch_send_call_function_single_ipi(int cpu) static inline void arch_send_call_function_single_ipi(int cpu)
{ {
extern struct plat_smp_ops *mp_ops; /* private */ extern struct plat_smp_ops *mp_ops; /* private */

View File

@ -152,6 +152,31 @@
.set noreorder .set noreorder
bltz k0, 8f bltz k0, 8f
move k1, sp move k1, sp
#ifdef CONFIG_EVA
/*
* Flush interAptiv's Return Prediction Stack (RPS) by writing
* EntryHi. Toggling Config7.RPS is slower and less portable.
*
* The RPS isn't automatically flushed when exceptions are
* taken, which can result in kernel mode speculative accesses
* to user addresses if the RPS mispredicts. That's harmless
* when user and kernel share the same address space, but with
* EVA the same user segments may be unmapped to kernel mode,
* even containing sensitive MMIO regions or invalid memory.
*
* This can happen when the kernel sets the return address to
* ret_from_* and jr's to the exception handler, which looks
* more like a tail call than a function call. If nested calls
* don't evict the last user address in the RPS, it will
* mispredict the return and fetch from a user controlled
* address into the icache.
*
* More recent EVA-capable cores with MAAR to restrict
* speculative accesses aren't affected.
*/
MFC0 k0, CP0_ENTRYHI
MTC0 k0, CP0_ENTRYHI
#endif
.set reorder .set reorder
/* Called from user mode, new stack. */ /* Called from user mode, new stack. */
get_saved_sp get_saved_sp

View File

@ -154,7 +154,7 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
unsigned long __user *user_mask_ptr) unsigned long __user *user_mask_ptr)
{ {
unsigned int real_len; unsigned int real_len;
cpumask_t mask; cpumask_t allowed, mask;
int retval; int retval;
struct task_struct *p; struct task_struct *p;
@ -173,7 +173,8 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
if (retval) if (retval)
goto out_unlock; goto out_unlock;
cpumask_and(&mask, &p->thread.user_cpus_allowed, cpu_possible_mask); cpumask_or(&allowed, &p->thread.user_cpus_allowed, &p->cpus_allowed);
cpumask_and(&mask, &allowed, cpu_active_mask);
out_unlock: out_unlock:
read_unlock(&tasklist_lock); read_unlock(&tasklist_lock);

View File

@ -38,7 +38,7 @@ char *mips_get_machine_name(void)
return mips_machine_name; return mips_machine_name;
} }
#ifdef CONFIG_OF #ifdef CONFIG_USE_OF
void __init early_init_dt_add_memory_arch(u64 base, u64 size) void __init early_init_dt_add_memory_arch(u64 base, u64 size)
{ {
return add_memory_region(base, size, BOOT_MEM_RAM); return add_memory_region(base, size, BOOT_MEM_RAM);

View File

@ -24,7 +24,7 @@ LEAF(relocate_new_kernel)
process_entry: process_entry:
PTR_L s2, (s0) PTR_L s2, (s0)
PTR_ADD s0, s0, SZREG PTR_ADDIU s0, s0, SZREG
/* /*
* In case of a kdump/crash kernel, the indirection page is not * In case of a kdump/crash kernel, the indirection page is not
@ -61,9 +61,9 @@ copy_word:
/* copy page word by word */ /* copy page word by word */
REG_L s5, (s2) REG_L s5, (s2)
REG_S s5, (s4) REG_S s5, (s4)
PTR_ADD s4, s4, SZREG PTR_ADDIU s4, s4, SZREG
PTR_ADD s2, s2, SZREG PTR_ADDIU s2, s2, SZREG
LONG_SUB s6, s6, 1 LONG_ADDIU s6, s6, -1
beq s6, zero, process_entry beq s6, zero, process_entry
b copy_word b copy_word
b process_entry b process_entry

View File

@ -409,8 +409,6 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
{ {
memset(to, 0, sizeof *to);
if (copy_from_user(to, from, 3*sizeof(int)) || if (copy_from_user(to, from, 3*sizeof(int)) ||
copy_from_user(to->_sifields._pad, copy_from_user(to->_sifields._pad,
from->_sifields._pad, SI_PAD_SIZE32)) from->_sifields._pad, SI_PAD_SIZE32))

View File

@ -284,7 +284,7 @@ static irqreturn_t bmips5000_ipi_interrupt(int irq, void *dev_id)
if (action == 0) if (action == 0)
scheduler_ipi(); scheduler_ipi();
else else
smp_call_function_interrupt(); generic_smp_call_function_interrupt();
return IRQ_HANDLED; return IRQ_HANDLED;
} }
@ -336,7 +336,7 @@ static irqreturn_t bmips43xx_ipi_interrupt(int irq, void *dev_id)
if (action & SMP_RESCHEDULE_YOURSELF) if (action & SMP_RESCHEDULE_YOURSELF)
scheduler_ipi(); scheduler_ipi();
if (action & SMP_CALL_FUNCTION) if (action & SMP_CALL_FUNCTION)
smp_call_function_interrupt(); generic_smp_call_function_interrupt();
return IRQ_HANDLED; return IRQ_HANDLED;
} }

View File

@ -192,16 +192,6 @@ asmlinkage void start_secondary(void)
cpu_startup_entry(CPUHP_ONLINE); cpu_startup_entry(CPUHP_ONLINE);
} }
/*
* Call into both interrupt handlers, as we share the IPI for them
*/
void __irq_entry smp_call_function_interrupt(void)
{
irq_enter();
generic_smp_call_function_interrupt();
irq_exit();
}
static void stop_this_cpu(void *dummy) static void stop_this_cpu(void *dummy)
{ {
/* /*

View File

@ -192,6 +192,7 @@ static void show_stacktrace(struct task_struct *task,
void show_stack(struct task_struct *task, unsigned long *sp) void show_stack(struct task_struct *task, unsigned long *sp)
{ {
struct pt_regs regs; struct pt_regs regs;
mm_segment_t old_fs = get_fs();
if (sp) { if (sp) {
regs.regs[29] = (unsigned long)sp; regs.regs[29] = (unsigned long)sp;
regs.regs[31] = 0; regs.regs[31] = 0;
@ -210,7 +211,13 @@ void show_stack(struct task_struct *task, unsigned long *sp)
prepare_frametrace(&regs); prepare_frametrace(&regs);
} }
} }
/*
* show_stack() deals exclusively with kernel mode, so be sure to access
* the stack in the kernel (not user) address space.
*/
set_fs(KERNEL_DS);
show_stacktrace(task, &regs); show_stacktrace(task, &regs);
set_fs(old_fs);
} }
static void show_code(unsigned int __user *pc) static void show_code(unsigned int __user *pc)
@ -1519,6 +1526,7 @@ asmlinkage void do_mcheck(struct pt_regs *regs)
const int field = 2 * sizeof(unsigned long); const int field = 2 * sizeof(unsigned long);
int multi_match = regs->cp0_status & ST0_TS; int multi_match = regs->cp0_status & ST0_TS;
enum ctx_state prev_state; enum ctx_state prev_state;
mm_segment_t old_fs = get_fs();
prev_state = exception_enter(); prev_state = exception_enter();
show_regs(regs); show_regs(regs);
@ -1540,8 +1548,13 @@ asmlinkage void do_mcheck(struct pt_regs *regs)
dump_tlb_all(); dump_tlb_all();
} }
if (!user_mode(regs))
set_fs(KERNEL_DS);
show_code((unsigned int __user *) regs->cp0_epc); show_code((unsigned int __user *) regs->cp0_epc);
set_fs(old_fs);
/* /*
* Some chips may have other causes of machine check (e.g. SB1 * Some chips may have other causes of machine check (e.g. SB1
* graduation timer) * graduation timer)

View File

@ -438,7 +438,7 @@ do { \
: "memory"); \ : "memory"); \
} while(0) } while(0)
#define StoreDW(addr, value, res) \ #define _StoreDW(addr, value, res) \
do { \ do { \
__asm__ __volatile__ ( \ __asm__ __volatile__ ( \
".set\tpush\n\t" \ ".set\tpush\n\t" \

View File

@ -293,7 +293,7 @@ static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
static irqreturn_t ipi_call_interrupt(int irq, void *dev_id) static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
{ {
smp_call_function_interrupt(); generic_smp_call_function_interrupt();
return IRQ_HANDLED; return IRQ_HANDLED;
} }
@ -466,6 +466,7 @@ int get_c0_perfcount_int(void)
{ {
return ltq_perfcount_irq; return ltq_perfcount_irq;
} }
EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
unsigned int get_c0_compare_int(void) unsigned int get_c0_compare_int(void)
{ {

View File

@ -266,8 +266,11 @@ void loongson3_ipi_interrupt(struct pt_regs *regs)
if (action & SMP_RESCHEDULE_YOURSELF) if (action & SMP_RESCHEDULE_YOURSELF)
scheduler_ipi(); scheduler_ipi();
if (action & SMP_CALL_FUNCTION) if (action & SMP_CALL_FUNCTION) {
smp_call_function_interrupt(); irq_enter();
generic_smp_call_function_interrupt();
irq_exit();
}
if (action & SMP_ASK_C0COUNT) { if (action & SMP_ASK_C0COUNT) {
BUG_ON(cpu != 0); BUG_ON(cpu != 0);

View File

@ -160,18 +160,18 @@ static inline void setup_protection_map(void)
protection_map[1] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC); protection_map[1] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
protection_map[2] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); protection_map[2] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
protection_map[3] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC); protection_map[3] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
protection_map[4] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ); protection_map[4] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
protection_map[5] = __pgprot(_page_cachable_default | _PAGE_PRESENT); protection_map[5] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
protection_map[6] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ); protection_map[6] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
protection_map[7] = __pgprot(_page_cachable_default | _PAGE_PRESENT); protection_map[7] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
protection_map[8] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); protection_map[8] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
protection_map[9] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC); protection_map[9] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
protection_map[10] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | _PAGE_NO_READ); protection_map[10] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | _PAGE_NO_READ);
protection_map[11] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE); protection_map[11] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ); protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
protection_map[13] = __pgprot(_page_cachable_default | _PAGE_PRESENT); protection_map[13] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE | _PAGE_NO_READ); protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);
protection_map[15] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE); protection_map[15] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);
} else { } else {

View File

@ -133,7 +133,8 @@ good_area:
#endif #endif
goto bad_area; goto bad_area;
} }
if (!(vma->vm_flags & VM_READ)) { if (!(vma->vm_flags & VM_READ) &&
exception_epc(regs) != address) {
#if 0 #if 0
pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] RI violation\n", pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] RI violation\n",
raw_smp_processor_id(), raw_smp_processor_id(),

View File

@ -222,7 +222,7 @@ static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
static irqreturn_t ipi_call_interrupt(int irq, void *dev_id) static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
{ {
smp_call_function_interrupt(); generic_smp_call_function_interrupt();
return IRQ_HANDLED; return IRQ_HANDLED;
} }

View File

@ -154,6 +154,7 @@ int get_c0_perfcount_int(void)
return mips_cpu_perf_irq; return mips_cpu_perf_irq;
} }
EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
unsigned int get_c0_compare_int(void) unsigned int get_c0_compare_int(void)
{ {
@ -171,14 +172,17 @@ unsigned int get_c0_compare_int(void)
static void __init init_rtc(void) static void __init init_rtc(void)
{ {
/* stop the clock whilst setting it up */ unsigned char freq, ctrl;
CMOS_WRITE(RTC_SET | RTC_24H, RTC_CONTROL);
/* 32KHz time base */ /* Set 32KHz time base if not already set */
CMOS_WRITE(RTC_REF_CLCK_32KHZ, RTC_FREQ_SELECT); freq = CMOS_READ(RTC_FREQ_SELECT);
if ((freq & RTC_DIV_CTL) != RTC_REF_CLCK_32KHZ)
CMOS_WRITE(RTC_REF_CLCK_32KHZ, RTC_FREQ_SELECT);
/* start the clock */ /* Ensure SET bit is clear so RTC can run */
CMOS_WRITE(RTC_24H, RTC_CONTROL); ctrl = CMOS_READ(RTC_CONTROL);
if (ctrl & RTC_SET)
CMOS_WRITE(ctrl & ~RTC_SET, RTC_CONTROL);
} }
void __init plat_time_init(void) void __init plat_time_init(void)

View File

@ -77,6 +77,7 @@ int get_c0_perfcount_int(void)
return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq; return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
return -1; return -1;
} }
EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
unsigned int get_c0_compare_int(void) unsigned int get_c0_compare_int(void)
{ {

View File

@ -86,7 +86,7 @@ void nlm_smp_function_ipi_handler(unsigned int irq, struct irq_desc *desc)
{ {
clear_c0_eimr(irq); clear_c0_eimr(irq);
ack_c0_eirr(irq); ack_c0_eirr(irq);
smp_call_function_interrupt(); generic_smp_call_function_interrupt();
set_c0_eimr(irq); set_c0_eimr(irq);
} }

View File

@ -114,7 +114,7 @@ static irqreturn_t paravirt_reched_interrupt(int irq, void *dev_id)
static irqreturn_t paravirt_function_interrupt(int irq, void *dev_id) static irqreturn_t paravirt_function_interrupt(int irq, void *dev_id)
{ {
smp_call_function_interrupt(); generic_smp_call_function_interrupt();
return IRQ_HANDLED; return IRQ_HANDLED;
} }

View File

@ -26,6 +26,7 @@ int get_c0_perfcount_int(void)
{ {
return gic_get_c0_perfcount_int(); return gic_get_c0_perfcount_int();
} }
EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
int get_c0_fdc_int(void) int get_c0_fdc_int(void)
{ {

View File

@ -44,7 +44,7 @@ static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
static irqreturn_t ipi_call_interrupt(int irq, void *dev_id) static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
{ {
smp_call_function_interrupt(); generic_smp_call_function_interrupt();
return IRQ_HANDLED; return IRQ_HANDLED;
} }

View File

@ -89,6 +89,7 @@ int get_c0_perfcount_int(void)
{ {
return rt_perfcount_irq; return rt_perfcount_irq;
} }
EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
unsigned int get_c0_compare_int(void) unsigned int get_c0_compare_int(void)
{ {

View File

@ -107,10 +107,14 @@ static void ip27_do_irq_mask0(void)
scheduler_ipi(); scheduler_ipi();
} else if (pend0 & (1UL << CPU_CALL_A_IRQ)) { } else if (pend0 & (1UL << CPU_CALL_A_IRQ)) {
LOCAL_HUB_CLR_INTR(CPU_CALL_A_IRQ); LOCAL_HUB_CLR_INTR(CPU_CALL_A_IRQ);
smp_call_function_interrupt(); irq_enter();
generic_smp_call_function_interrupt();
irq_exit();
} else if (pend0 & (1UL << CPU_CALL_B_IRQ)) { } else if (pend0 & (1UL << CPU_CALL_B_IRQ)) {
LOCAL_HUB_CLR_INTR(CPU_CALL_B_IRQ); LOCAL_HUB_CLR_INTR(CPU_CALL_B_IRQ);
smp_call_function_interrupt(); irq_enter();
generic_smp_call_function_interrupt();
irq_exit();
} else } else
#endif #endif
{ {

View File

@ -29,8 +29,6 @@
#include <asm/sibyte/bcm1480_regs.h> #include <asm/sibyte/bcm1480_regs.h>
#include <asm/sibyte/bcm1480_int.h> #include <asm/sibyte/bcm1480_int.h>
extern void smp_call_function_interrupt(void);
/* /*
* These are routines for dealing with the bcm1480 smp capabilities * These are routines for dealing with the bcm1480 smp capabilities
* independent of board/firmware * independent of board/firmware
@ -184,6 +182,9 @@ void bcm1480_mailbox_interrupt(void)
if (action & SMP_RESCHEDULE_YOURSELF) if (action & SMP_RESCHEDULE_YOURSELF)
scheduler_ipi(); scheduler_ipi();
if (action & SMP_CALL_FUNCTION) if (action & SMP_CALL_FUNCTION) {
smp_call_function_interrupt(); irq_enter();
generic_smp_call_function_interrupt();
irq_exit();
}
} }

View File

@ -172,6 +172,9 @@ void sb1250_mailbox_interrupt(void)
if (action & SMP_RESCHEDULE_YOURSELF) if (action & SMP_RESCHEDULE_YOURSELF)
scheduler_ipi(); scheduler_ipi();
if (action & SMP_CALL_FUNCTION) if (action & SMP_CALL_FUNCTION) {
smp_call_function_interrupt(); irq_enter();
generic_smp_call_function_interrupt();
irq_exit();
}
} }

View File

@ -966,8 +966,6 @@ int copy_siginfo_to_user32(struct compat_siginfo __user *d, const siginfo_t *s)
int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from) int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
{ {
memset(to, 0, sizeof *to);
if (copy_from_user(to, from, 3*sizeof(int)) || if (copy_from_user(to, from, 3*sizeof(int)) ||
copy_from_user(to->_sifields._pad, copy_from_user(to->_sifields._pad,
from->_sifields._pad, SI_PAD_SIZE32)) from->_sifields._pad, SI_PAD_SIZE32))

View File

@ -1478,7 +1478,7 @@ static int pnv_eeh_next_error(struct eeh_pe **pe)
} }
/* Unmask the event */ /* Unmask the event */
if (eeh_enabled()) if (ret == EEH_NEXT_ERR_NONE && eeh_enabled())
enable_irq(eeh_event_irq); enable_irq(eeh_event_irq);
return ret; return ret;

View File

@ -2220,7 +2220,7 @@ static void pnv_pci_ioda_setup_opal_tce_kill(struct pnv_phb *phb)
static __be64 *pnv_pci_ioda2_table_do_alloc_pages(int nid, unsigned shift, static __be64 *pnv_pci_ioda2_table_do_alloc_pages(int nid, unsigned shift,
unsigned levels, unsigned long limit, unsigned levels, unsigned long limit,
unsigned long *current_offset) unsigned long *current_offset, unsigned long *total_allocated)
{ {
struct page *tce_mem = NULL; struct page *tce_mem = NULL;
__be64 *addr, *tmp; __be64 *addr, *tmp;
@ -2236,6 +2236,7 @@ static __be64 *pnv_pci_ioda2_table_do_alloc_pages(int nid, unsigned shift,
} }
addr = page_address(tce_mem); addr = page_address(tce_mem);
memset(addr, 0, allocated); memset(addr, 0, allocated);
*total_allocated += allocated;
--levels; --levels;
if (!levels) { if (!levels) {
@ -2245,7 +2246,7 @@ static __be64 *pnv_pci_ioda2_table_do_alloc_pages(int nid, unsigned shift,
for (i = 0; i < entries; ++i) { for (i = 0; i < entries; ++i) {
tmp = pnv_pci_ioda2_table_do_alloc_pages(nid, shift, tmp = pnv_pci_ioda2_table_do_alloc_pages(nid, shift,
levels, limit, current_offset); levels, limit, current_offset, total_allocated);
if (!tmp) if (!tmp)
break; break;
@ -2267,7 +2268,7 @@ static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
struct iommu_table *tbl) struct iommu_table *tbl)
{ {
void *addr; void *addr;
unsigned long offset = 0, level_shift; unsigned long offset = 0, level_shift, total_allocated = 0;
const unsigned window_shift = ilog2(window_size); const unsigned window_shift = ilog2(window_size);
unsigned entries_shift = window_shift - page_shift; unsigned entries_shift = window_shift - page_shift;
unsigned table_shift = max_t(unsigned, entries_shift + 3, PAGE_SHIFT); unsigned table_shift = max_t(unsigned, entries_shift + 3, PAGE_SHIFT);
@ -2286,7 +2287,7 @@ static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
/* Allocate TCE table */ /* Allocate TCE table */
addr = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift, addr = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift,
levels, tce_table_size, &offset); levels, tce_table_size, &offset, &total_allocated);
/* addr==NULL means that the first level allocation failed */ /* addr==NULL means that the first level allocation failed */
if (!addr) if (!addr)
@ -2308,7 +2309,7 @@ static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
page_shift); page_shift);
tbl->it_level_size = 1ULL << (level_shift - 3); tbl->it_level_size = 1ULL << (level_shift - 3);
tbl->it_indirect_levels = levels - 1; tbl->it_indirect_levels = levels - 1;
tbl->it_allocated_size = offset; tbl->it_allocated_size = total_allocated;
pr_devel("Created TCE table: ws=%08llx ts=%lx @%08llx\n", pr_devel("Created TCE table: ws=%08llx ts=%lx @%08llx\n",
window_size, tce_table_size, bus_offset); window_size, tce_table_size, bus_offset);

View File

@ -138,6 +138,8 @@ int init_cache_level(unsigned int cpu)
union cache_topology ct; union cache_topology ct;
enum cache_type ctype; enum cache_type ctype;
if (!test_facility(34))
return -EOPNOTSUPP;
if (!this_cpu_ci) if (!this_cpu_ci)
return -EINVAL; return -EINVAL;
ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0); ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);

View File

@ -1742,10 +1742,10 @@ static bool ibs_enabled(struct kvm_vcpu *vcpu)
static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu) static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
{ {
if (!vcpu->requests)
return 0;
retry: retry:
kvm_s390_vcpu_request_handled(vcpu); kvm_s390_vcpu_request_handled(vcpu);
if (!vcpu->requests)
return 0;
/* /*
* We use MMU_RELOAD just to re-arm the ipte notifier for the * We use MMU_RELOAD just to re-arm the ipte notifier for the
* guest prefix page. gmap_ipte_notify will wait on the ptl lock. * guest prefix page. gmap_ipte_notify will wait on the ptl lock.

View File

@ -448,13 +448,13 @@ static void bpf_jit_prologue(struct bpf_jit *jit)
EMIT6_DISP_LH(0xe3000000, 0x0004, REG_SKB_DATA, REG_0, EMIT6_DISP_LH(0xe3000000, 0x0004, REG_SKB_DATA, REG_0,
BPF_REG_1, offsetof(struct sk_buff, data)); BPF_REG_1, offsetof(struct sk_buff, data));
} }
/* BPF compatibility: clear A (%b7) and X (%b8) registers */ /* BPF compatibility: clear A (%b0) and X (%b7) registers */
if (REG_SEEN(BPF_REG_7)) if (REG_SEEN(BPF_REG_A))
/* lghi %b7,0 */ /* lghi %ba,0 */
EMIT4_IMM(0xa7090000, BPF_REG_7, 0); EMIT4_IMM(0xa7090000, BPF_REG_A, 0);
if (REG_SEEN(BPF_REG_8)) if (REG_SEEN(BPF_REG_X))
/* lghi %b8,0 */ /* lghi %bx,0 */
EMIT4_IMM(0xa7090000, BPF_REG_8, 0); EMIT4_IMM(0xa7090000, BPF_REG_X, 0);
} }
/* /*

View File

@ -28,16 +28,10 @@
* Must preserve %o5 between VISEntryHalf and VISExitHalf */ * Must preserve %o5 between VISEntryHalf and VISExitHalf */
#define VISEntryHalf \ #define VISEntryHalf \
rd %fprs, %o5; \ VISEntry
andcc %o5, FPRS_FEF, %g0; \
be,pt %icc, 297f; \ #define VISExitHalf \
sethi %hi(298f), %g7; \ VISExit
sethi %hi(VISenterhalf), %g1; \
jmpl %g1 + %lo(VISenterhalf), %g0; \
or %g7, %lo(298f), %g7; \
clr %o5; \
297: wr %o5, FPRS_FEF, %fprs; \
298:
#define VISEntryHalfFast(fail_label) \ #define VISEntryHalfFast(fail_label) \
rd %fprs, %o5; \ rd %fprs, %o5; \
@ -47,7 +41,7 @@
ba,a,pt %xcc, fail_label; \ ba,a,pt %xcc, fail_label; \
297: wr %o5, FPRS_FEF, %fprs; 297: wr %o5, FPRS_FEF, %fprs;
#define VISExitHalf \ #define VISExitHalfFast \
wr %o5, 0, %fprs; wr %o5, 0, %fprs;
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__

View File

@ -240,8 +240,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
add %o0, 0x40, %o0 add %o0, 0x40, %o0
bne,pt %icc, 1b bne,pt %icc, 1b
LOAD(prefetch, %g1 + 0x200, #n_reads_strong) LOAD(prefetch, %g1 + 0x200, #n_reads_strong)
#ifdef NON_USER_COPY
VISExitHalfFast
#else
VISExitHalf VISExitHalf
#endif
brz,pn %o2, .Lexit brz,pn %o2, .Lexit
cmp %o2, 19 cmp %o2, 19
ble,pn %icc, .Lsmall_unaligned ble,pn %icc, .Lsmall_unaligned

View File

@ -44,9 +44,8 @@ vis1: ldub [%g6 + TI_FPSAVED], %g3
stx %g3, [%g6 + TI_GSR] stx %g3, [%g6 + TI_GSR]
2: add %g6, %g1, %g3 2: add %g6, %g1, %g3
cmp %o5, FPRS_DU mov FPRS_DU | FPRS_DL | FPRS_FEF, %o5
be,pn %icc, 6f sll %g1, 3, %g1
sll %g1, 3, %g1
stb %o5, [%g3 + TI_FPSAVED] stb %o5, [%g3 + TI_FPSAVED]
rd %gsr, %g2 rd %gsr, %g2
add %g6, %g1, %g3 add %g6, %g1, %g3
@ -80,65 +79,3 @@ vis1: ldub [%g6 + TI_FPSAVED], %g3
.align 32 .align 32
80: jmpl %g7 + %g0, %g0 80: jmpl %g7 + %g0, %g0
nop nop
6: ldub [%g3 + TI_FPSAVED], %o5
or %o5, FPRS_DU, %o5
add %g6, TI_FPREGS+0x80, %g2
stb %o5, [%g3 + TI_FPSAVED]
sll %g1, 5, %g1
add %g6, TI_FPREGS+0xc0, %g3
wr %g0, FPRS_FEF, %fprs
membar #Sync
stda %f32, [%g2 + %g1] ASI_BLK_P
stda %f48, [%g3 + %g1] ASI_BLK_P
membar #Sync
ba,pt %xcc, 80f
nop
.align 32
80: jmpl %g7 + %g0, %g0
nop
.align 32
VISenterhalf:
ldub [%g6 + TI_FPDEPTH], %g1
brnz,a,pn %g1, 1f
cmp %g1, 1
stb %g0, [%g6 + TI_FPSAVED]
stx %fsr, [%g6 + TI_XFSR]
clr %o5
jmpl %g7 + %g0, %g0
wr %g0, FPRS_FEF, %fprs
1: bne,pn %icc, 2f
srl %g1, 1, %g1
ba,pt %xcc, vis1
sub %g7, 8, %g7
2: addcc %g6, %g1, %g3
sll %g1, 3, %g1
andn %o5, FPRS_DU, %g2
stb %g2, [%g3 + TI_FPSAVED]
rd %gsr, %g2
add %g6, %g1, %g3
stx %g2, [%g3 + TI_GSR]
add %g6, %g1, %g2
stx %fsr, [%g2 + TI_XFSR]
sll %g1, 5, %g1
3: andcc %o5, FPRS_DL, %g0
be,pn %icc, 4f
add %g6, TI_FPREGS, %g2
add %g6, TI_FPREGS+0x40, %g3
membar #Sync
stda %f0, [%g2 + %g1] ASI_BLK_P
stda %f16, [%g3 + %g1] ASI_BLK_P
membar #Sync
ba,pt %xcc, 4f
nop
.align 32
4: and %o5, FPRS_DU, %o5
jmpl %g7 + %g0, %g0
wr %o5, FPRS_FEF, %fprs

View File

@ -135,10 +135,6 @@ EXPORT_SYMBOL(copy_user_page);
void VISenter(void); void VISenter(void);
EXPORT_SYMBOL(VISenter); EXPORT_SYMBOL(VISenter);
/* CRYPTO code needs this */
void VISenterhalf(void);
EXPORT_SYMBOL(VISenterhalf);
extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *); extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *);
extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *, extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *,
unsigned long *); unsigned long *);

View File

@ -113,8 +113,6 @@ int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
if (!access_ok(VERIFY_READ, from, sizeof(struct compat_siginfo))) if (!access_ok(VERIFY_READ, from, sizeof(struct compat_siginfo)))
return -EFAULT; return -EFAULT;
memset(to, 0, sizeof(*to));
err = __get_user(to->si_signo, &from->si_signo); err = __get_user(to->si_signo, &from->si_signo);
err |= __get_user(to->si_errno, &from->si_errno); err |= __get_user(to->si_errno, &from->si_errno);
err |= __get_user(to->si_code, &from->si_code); err |= __get_user(to->si_code, &from->si_code);

View File

@ -1193,6 +1193,10 @@ static efi_status_t setup_e820(struct boot_params *params,
unsigned int e820_type = 0; unsigned int e820_type = 0;
unsigned long m = efi->efi_memmap; unsigned long m = efi->efi_memmap;
#ifdef CONFIG_X86_64
m |= (u64)efi->efi_memmap_hi << 32;
#endif
d = (efi_memory_desc_t *)(m + (i * efi->efi_memdesc_size)); d = (efi_memory_desc_t *)(m + (i * efi->efi_memdesc_size));
switch (d->type) { switch (d->type) {
case EFI_RESERVED_TYPE: case EFI_RESERVED_TYPE:

View File

@ -280,21 +280,6 @@ static inline void clear_LDT(void)
set_ldt(NULL, 0); set_ldt(NULL, 0);
} }
/*
* load one particular LDT into the current CPU
*/
static inline void load_LDT_nolock(mm_context_t *pc)
{
set_ldt(pc->ldt, pc->size);
}
static inline void load_LDT(mm_context_t *pc)
{
preempt_disable();
load_LDT_nolock(pc);
preempt_enable();
}
static inline unsigned long get_desc_base(const struct desc_struct *desc) static inline unsigned long get_desc_base(const struct desc_struct *desc)
{ {
return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24)); return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));

View File

@ -9,8 +9,7 @@
* we put the segment information here. * we put the segment information here.
*/ */
typedef struct { typedef struct {
void *ldt; struct ldt_struct *ldt;
int size;
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
/* True if mm supports a task running in 32 bit compatibility mode. */ /* True if mm supports a task running in 32 bit compatibility mode. */

View File

@ -33,6 +33,50 @@ static inline void load_mm_cr4(struct mm_struct *mm)
static inline void load_mm_cr4(struct mm_struct *mm) {} static inline void load_mm_cr4(struct mm_struct *mm) {}
#endif #endif
/*
* ldt_structs can be allocated, used, and freed, but they are never
* modified while live.
*/
struct ldt_struct {
/*
* Xen requires page-aligned LDTs with special permissions. This is
* needed to prevent us from installing evil descriptors such as
* call gates. On native, we could merge the ldt_struct and LDT
* allocations, but it's not worth trying to optimize.
*/
struct desc_struct *entries;
int size;
};
static inline void load_mm_ldt(struct mm_struct *mm)
{
struct ldt_struct *ldt;
/* lockless_dereference synchronizes with smp_store_release */
ldt = lockless_dereference(mm->context.ldt);
/*
* Any change to mm->context.ldt is followed by an IPI to all
* CPUs with the mm active. The LDT will not be freed until
* after the IPI is handled by all such CPUs. This means that,
* if the ldt_struct changes before we return, the values we see
* will be safe, and the new values will be loaded before we run
* any user code.
*
* NB: don't try to convert this to use RCU without extreme care.
* We would still need IRQs off, because we don't want to change
* the local LDT after an IPI loaded a newer value than the one
* that we can see.
*/
if (unlikely(ldt))
set_ldt(ldt->entries, ldt->size);
else
clear_LDT();
DEBUG_LOCKS_WARN_ON(preemptible());
}
/* /*
* Used for LDT copy/destruction. * Used for LDT copy/destruction.
*/ */
@ -78,12 +122,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
* was called and then modify_ldt changed * was called and then modify_ldt changed
* prev->context.ldt but suppressed an IPI to this CPU. * prev->context.ldt but suppressed an IPI to this CPU.
* In this case, prev->context.ldt != NULL, because we * In this case, prev->context.ldt != NULL, because we
* never free an LDT while the mm still exists. That * never set context.ldt to NULL while the mm still
* means that next->context.ldt != prev->context.ldt, * exists. That means that next->context.ldt !=
* because mms never share an LDT. * prev->context.ldt, because mms never share an LDT.
*/ */
if (unlikely(prev->context.ldt != next->context.ldt)) if (unlikely(prev->context.ldt != next->context.ldt))
load_LDT_nolock(&next->context); load_mm_ldt(next);
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
else { else {
@ -106,7 +150,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
load_cr3(next->pgd); load_cr3(next->pgd);
trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
load_mm_cr4(next); load_mm_cr4(next);
load_LDT_nolock(&next->context); load_mm_ldt(next);
} }
} }
#endif #endif

View File

@ -943,7 +943,7 @@ static bool mp_check_pin_attr(int irq, struct irq_alloc_info *info)
*/ */
if (irq < nr_legacy_irqs() && data->count == 1) { if (irq < nr_legacy_irqs() && data->count == 1) {
if (info->ioapic_trigger != data->trigger) if (info->ioapic_trigger != data->trigger)
mp_register_handler(irq, data->trigger); mp_register_handler(irq, info->ioapic_trigger);
data->entry.trigger = data->trigger = info->ioapic_trigger; data->entry.trigger = data->trigger = info->ioapic_trigger;
data->entry.polarity = data->polarity = info->ioapic_polarity; data->entry.polarity = data->polarity = info->ioapic_polarity;
} }

View File

@ -1410,7 +1410,7 @@ void cpu_init(void)
load_sp0(t, &current->thread); load_sp0(t, &current->thread);
set_tss_desc(cpu, t); set_tss_desc(cpu, t);
load_TR_desc(); load_TR_desc();
load_LDT(&init_mm.context); load_mm_ldt(&init_mm);
clear_all_debug_regs(); clear_all_debug_regs();
dbg_restore_debug_regs(); dbg_restore_debug_regs();
@ -1459,7 +1459,7 @@ void cpu_init(void)
load_sp0(t, thread); load_sp0(t, thread);
set_tss_desc(cpu, t); set_tss_desc(cpu, t);
load_TR_desc(); load_TR_desc();
load_LDT(&init_mm.context); load_mm_ldt(&init_mm);
t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);

View File

@ -2179,21 +2179,25 @@ static unsigned long get_segment_base(unsigned int segment)
int idx = segment >> 3; int idx = segment >> 3;
if ((segment & SEGMENT_TI_MASK) == SEGMENT_LDT) { if ((segment & SEGMENT_TI_MASK) == SEGMENT_LDT) {
struct ldt_struct *ldt;
if (idx > LDT_ENTRIES) if (idx > LDT_ENTRIES)
return 0; return 0;
if (idx > current->active_mm->context.size) /* IRQs are off, so this synchronizes with smp_store_release */
ldt = lockless_dereference(current->active_mm->context.ldt);
if (!ldt || idx > ldt->size)
return 0; return 0;
desc = current->active_mm->context.ldt; desc = &ldt->entries[idx];
} else { } else {
if (idx > GDT_ENTRIES) if (idx > GDT_ENTRIES)
return 0; return 0;
desc = raw_cpu_ptr(gdt_page.gdt); desc = raw_cpu_ptr(gdt_page.gdt) + idx;
} }
return get_desc_base(desc + idx); return get_desc_base(desc);
} }
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT

View File

@ -12,6 +12,7 @@
#include <linux/string.h> #include <linux/string.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/slab.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
@ -20,82 +21,82 @@
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/syscalls.h> #include <asm/syscalls.h>
#ifdef CONFIG_SMP /* context.lock is held for us, so we don't need any locking. */
static void flush_ldt(void *current_mm) static void flush_ldt(void *current_mm)
{ {
if (current->active_mm == current_mm) mm_context_t *pc;
load_LDT(&current->active_mm->context);
if (current->active_mm != current_mm)
return;
pc = &current->active_mm->context;
set_ldt(pc->ldt->entries, pc->ldt->size);
} }
#endif
static int alloc_ldt(mm_context_t *pc, int mincount, int reload) /* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */
static struct ldt_struct *alloc_ldt_struct(int size)
{ {
void *oldldt, *newldt; struct ldt_struct *new_ldt;
int oldsize; int alloc_size;
if (mincount <= pc->size) if (size > LDT_ENTRIES)
return 0; return NULL;
oldsize = pc->size;
mincount = (mincount + (PAGE_SIZE / LDT_ENTRY_SIZE - 1)) & new_ldt = kmalloc(sizeof(struct ldt_struct), GFP_KERNEL);
(~(PAGE_SIZE / LDT_ENTRY_SIZE - 1)); if (!new_ldt)
if (mincount * LDT_ENTRY_SIZE > PAGE_SIZE) return NULL;
newldt = vmalloc(mincount * LDT_ENTRY_SIZE);
BUILD_BUG_ON(LDT_ENTRY_SIZE != sizeof(struct desc_struct));
alloc_size = size * LDT_ENTRY_SIZE;
/*
* Xen is very picky: it requires a page-aligned LDT that has no
* trailing nonzero bytes in any page that contains LDT descriptors.
* Keep it simple: zero the whole allocation and never allocate less
* than PAGE_SIZE.
*/
if (alloc_size > PAGE_SIZE)
new_ldt->entries = vzalloc(alloc_size);
else else
newldt = (void *)__get_free_page(GFP_KERNEL); new_ldt->entries = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!newldt) if (!new_ldt->entries) {
return -ENOMEM; kfree(new_ldt);
return NULL;
if (oldsize)
memcpy(newldt, pc->ldt, oldsize * LDT_ENTRY_SIZE);
oldldt = pc->ldt;
memset(newldt + oldsize * LDT_ENTRY_SIZE, 0,
(mincount - oldsize) * LDT_ENTRY_SIZE);
paravirt_alloc_ldt(newldt, mincount);
#ifdef CONFIG_X86_64
/* CHECKME: Do we really need this ? */
wmb();
#endif
pc->ldt = newldt;
wmb();
pc->size = mincount;
wmb();
if (reload) {
#ifdef CONFIG_SMP
preempt_disable();
load_LDT(pc);
if (!cpumask_equal(mm_cpumask(current->mm),
cpumask_of(smp_processor_id())))
smp_call_function(flush_ldt, current->mm, 1);
preempt_enable();
#else
load_LDT(pc);
#endif
} }
if (oldsize) {
paravirt_free_ldt(oldldt, oldsize); new_ldt->size = size;
if (oldsize * LDT_ENTRY_SIZE > PAGE_SIZE) return new_ldt;
vfree(oldldt);
else
put_page(virt_to_page(oldldt));
}
return 0;
} }
static inline int copy_ldt(mm_context_t *new, mm_context_t *old) /* After calling this, the LDT is immutable. */
static void finalize_ldt_struct(struct ldt_struct *ldt)
{ {
int err = alloc_ldt(new, old->size, 0); paravirt_alloc_ldt(ldt->entries, ldt->size);
int i; }
if (err < 0) /* context.lock is held */
return err; static void install_ldt(struct mm_struct *current_mm,
struct ldt_struct *ldt)
{
/* Synchronizes with lockless_dereference in load_mm_ldt. */
smp_store_release(&current_mm->context.ldt, ldt);
for (i = 0; i < old->size; i++) /* Activate the LDT for all CPUs using current_mm. */
write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE); on_each_cpu_mask(mm_cpumask(current_mm), flush_ldt, current_mm, true);
return 0; }
static void free_ldt_struct(struct ldt_struct *ldt)
{
if (likely(!ldt))
return;
paravirt_free_ldt(ldt->entries, ldt->size);
if (ldt->size * LDT_ENTRY_SIZE > PAGE_SIZE)
vfree(ldt->entries);
else
kfree(ldt->entries);
kfree(ldt);
} }
/* /*
@ -104,17 +105,37 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
*/ */
int init_new_context(struct task_struct *tsk, struct mm_struct *mm) int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{ {
struct ldt_struct *new_ldt;
struct mm_struct *old_mm; struct mm_struct *old_mm;
int retval = 0; int retval = 0;
mutex_init(&mm->context.lock); mutex_init(&mm->context.lock);
mm->context.size = 0;
old_mm = current->mm; old_mm = current->mm;
if (old_mm && old_mm->context.size > 0) { if (!old_mm) {
mutex_lock(&old_mm->context.lock); mm->context.ldt = NULL;
retval = copy_ldt(&mm->context, &old_mm->context); return 0;
mutex_unlock(&old_mm->context.lock);
} }
mutex_lock(&old_mm->context.lock);
if (!old_mm->context.ldt) {
mm->context.ldt = NULL;
goto out_unlock;
}
new_ldt = alloc_ldt_struct(old_mm->context.ldt->size);
if (!new_ldt) {
retval = -ENOMEM;
goto out_unlock;
}
memcpy(new_ldt->entries, old_mm->context.ldt->entries,
new_ldt->size * LDT_ENTRY_SIZE);
finalize_ldt_struct(new_ldt);
mm->context.ldt = new_ldt;
out_unlock:
mutex_unlock(&old_mm->context.lock);
return retval; return retval;
} }
@ -125,53 +146,47 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
*/ */
void destroy_context(struct mm_struct *mm) void destroy_context(struct mm_struct *mm)
{ {
if (mm->context.size) { free_ldt_struct(mm->context.ldt);
#ifdef CONFIG_X86_32 mm->context.ldt = NULL;
/* CHECKME: Can this ever happen ? */
if (mm == current->active_mm)
clear_LDT();
#endif
paravirt_free_ldt(mm->context.ldt, mm->context.size);
if (mm->context.size * LDT_ENTRY_SIZE > PAGE_SIZE)
vfree(mm->context.ldt);
else
put_page(virt_to_page(mm->context.ldt));
mm->context.size = 0;
}
} }
static int read_ldt(void __user *ptr, unsigned long bytecount) static int read_ldt(void __user *ptr, unsigned long bytecount)
{ {
int err; int retval;
unsigned long size; unsigned long size;
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
if (!mm->context.size) mutex_lock(&mm->context.lock);
return 0;
if (!mm->context.ldt) {
retval = 0;
goto out_unlock;
}
if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES) if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES)
bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES; bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES;
mutex_lock(&mm->context.lock); size = mm->context.ldt->size * LDT_ENTRY_SIZE;
size = mm->context.size * LDT_ENTRY_SIZE;
if (size > bytecount) if (size > bytecount)
size = bytecount; size = bytecount;
err = 0; if (copy_to_user(ptr, mm->context.ldt->entries, size)) {
if (copy_to_user(ptr, mm->context.ldt, size)) retval = -EFAULT;
err = -EFAULT; goto out_unlock;
mutex_unlock(&mm->context.lock); }
if (err < 0)
goto error_return;
if (size != bytecount) { if (size != bytecount) {
/* zero-fill the rest */ /* Zero-fill the rest and pretend we read bytecount bytes. */
if (clear_user(ptr + size, bytecount - size) != 0) { if (clear_user(ptr + size, bytecount - size)) {
err = -EFAULT; retval = -EFAULT;
goto error_return; goto out_unlock;
} }
} }
return bytecount; retval = bytecount;
error_return:
return err; out_unlock:
mutex_unlock(&mm->context.lock);
return retval;
} }
static int read_default_ldt(void __user *ptr, unsigned long bytecount) static int read_default_ldt(void __user *ptr, unsigned long bytecount)
@ -195,6 +210,8 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
struct desc_struct ldt; struct desc_struct ldt;
int error; int error;
struct user_desc ldt_info; struct user_desc ldt_info;
int oldsize, newsize;
struct ldt_struct *new_ldt, *old_ldt;
error = -EINVAL; error = -EINVAL;
if (bytecount != sizeof(ldt_info)) if (bytecount != sizeof(ldt_info))
@ -213,34 +230,39 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
goto out; goto out;
} }
mutex_lock(&mm->context.lock); if ((oldmode && !ldt_info.base_addr && !ldt_info.limit) ||
if (ldt_info.entry_number >= mm->context.size) { LDT_empty(&ldt_info)) {
error = alloc_ldt(&current->mm->context, /* The user wants to clear the entry. */
ldt_info.entry_number + 1, 1); memset(&ldt, 0, sizeof(ldt));
if (error < 0) } else {
goto out_unlock; if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
} error = -EINVAL;
goto out;
/* Allow LDTs to be cleared by the user. */
if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
if (oldmode || LDT_empty(&ldt_info)) {
memset(&ldt, 0, sizeof(ldt));
goto install;
} }
fill_ldt(&ldt, &ldt_info);
if (oldmode)
ldt.avl = 0;
} }
if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) { mutex_lock(&mm->context.lock);
error = -EINVAL;
old_ldt = mm->context.ldt;
oldsize = old_ldt ? old_ldt->size : 0;
newsize = max((int)(ldt_info.entry_number + 1), oldsize);
error = -ENOMEM;
new_ldt = alloc_ldt_struct(newsize);
if (!new_ldt)
goto out_unlock; goto out_unlock;
}
fill_ldt(&ldt, &ldt_info); if (old_ldt)
if (oldmode) memcpy(new_ldt->entries, old_ldt->entries, oldsize * LDT_ENTRY_SIZE);
ldt.avl = 0; new_ldt->entries[ldt_info.entry_number] = ldt;
finalize_ldt_struct(new_ldt);
/* Install the new entry ... */ install_ldt(mm, new_ldt);
install: free_ldt_struct(old_ldt);
write_ldt_entry(mm->context.ldt, ldt_info.entry_number, &ldt);
error = 0; error = 0;
out_unlock: out_unlock:

View File

@ -121,11 +121,11 @@ void __show_regs(struct pt_regs *regs, int all)
void release_thread(struct task_struct *dead_task) void release_thread(struct task_struct *dead_task)
{ {
if (dead_task->mm) { if (dead_task->mm) {
if (dead_task->mm->context.size) { if (dead_task->mm->context.ldt) {
pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n", pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n",
dead_task->comm, dead_task->comm,
dead_task->mm->context.ldt, dead_task->mm->context.ldt,
dead_task->mm->context.size); dead_task->mm->context.ldt->size);
BUG(); BUG();
} }
} }

View File

@ -5,6 +5,7 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/ptrace.h> #include <linux/ptrace.h>
#include <asm/desc.h> #include <asm/desc.h>
#include <asm/mmu_context.h>
unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs) unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs)
{ {
@ -30,10 +31,11 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
seg &= ~7UL; seg &= ~7UL;
mutex_lock(&child->mm->context.lock); mutex_lock(&child->mm->context.lock);
if (unlikely((seg >> 3) >= child->mm->context.size)) if (unlikely(!child->mm->context.ldt ||
(seg >> 3) >= child->mm->context.ldt->size))
addr = -1L; /* bogus selector, access would fault */ addr = -1L; /* bogus selector, access would fault */
else { else {
desc = child->mm->context.ldt + seg; desc = &child->mm->context.ldt->entries[seg];
base = get_desc_base(desc); base = get_desc_base(desc);
/* 16-bit code segment? */ /* 16-bit code segment? */

View File

@ -672,16 +672,16 @@ u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
if (iter.mtrr_disabled) if (iter.mtrr_disabled)
return mtrr_disabled_type(); return mtrr_disabled_type();
/* not contained in any MTRRs. */
if (type == -1)
return mtrr_default_type(mtrr_state);
/* /*
* We just check one page, partially covered by MTRRs is * We just check one page, partially covered by MTRRs is
* impossible. * impossible.
*/ */
WARN_ON(iter.partial_map); WARN_ON(iter.partial_map);
/* not contained in any MTRRs. */
if (type == -1)
return mtrr_default_type(mtrr_state);
return type; return type;
} }
EXPORT_SYMBOL_GPL(kvm_mtrr_get_guest_memory_type); EXPORT_SYMBOL_GPL(kvm_mtrr_get_guest_memory_type);

View File

@ -269,7 +269,7 @@ static void emit_bpf_tail_call(u8 **pprog)
EMIT4(0x48, 0x8B, 0x46, /* mov rax, qword ptr [rsi + 16] */ EMIT4(0x48, 0x8B, 0x46, /* mov rax, qword ptr [rsi + 16] */
offsetof(struct bpf_array, map.max_entries)); offsetof(struct bpf_array, map.max_entries));
EMIT3(0x48, 0x39, 0xD0); /* cmp rax, rdx */ EMIT3(0x48, 0x39, 0xD0); /* cmp rax, rdx */
#define OFFSET1 44 /* number of bytes to jump */ #define OFFSET1 47 /* number of bytes to jump */
EMIT2(X86_JBE, OFFSET1); /* jbe out */ EMIT2(X86_JBE, OFFSET1); /* jbe out */
label1 = cnt; label1 = cnt;
@ -278,15 +278,15 @@ static void emit_bpf_tail_call(u8 **pprog)
*/ */
EMIT2_off32(0x8B, 0x85, -STACKSIZE + 36); /* mov eax, dword ptr [rbp - 516] */ EMIT2_off32(0x8B, 0x85, -STACKSIZE + 36); /* mov eax, dword ptr [rbp - 516] */
EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */
#define OFFSET2 33 #define OFFSET2 36
EMIT2(X86_JA, OFFSET2); /* ja out */ EMIT2(X86_JA, OFFSET2); /* ja out */
label2 = cnt; label2 = cnt;
EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
EMIT2_off32(0x89, 0x85, -STACKSIZE + 36); /* mov dword ptr [rbp - 516], eax */ EMIT2_off32(0x89, 0x85, -STACKSIZE + 36); /* mov dword ptr [rbp - 516], eax */
/* prog = array->prog[index]; */ /* prog = array->prog[index]; */
EMIT4(0x48, 0x8D, 0x44, 0xD6); /* lea rax, [rsi + rdx * 8 + 0x50] */ EMIT4_off32(0x48, 0x8D, 0x84, 0xD6, /* lea rax, [rsi + rdx * 8 + offsetof(...)] */
EMIT1(offsetof(struct bpf_array, prog)); offsetof(struct bpf_array, prog));
EMIT3(0x48, 0x8B, 0x00); /* mov rax, qword ptr [rax] */ EMIT3(0x48, 0x8B, 0x00); /* mov rax, qword ptr [rax] */
/* if (prog == NULL) /* if (prog == NULL)

View File

@ -972,6 +972,11 @@ u64 efi_mem_attributes(unsigned long phys_addr)
static int __init arch_parse_efi_cmdline(char *str) static int __init arch_parse_efi_cmdline(char *str)
{ {
if (!str) {
pr_warn("need at least one option\n");
return -EINVAL;
}
if (parse_option_str(str, "old_map")) if (parse_option_str(str, "old_map"))
set_bit(EFI_OLD_MEMMAP, &efi.flags); set_bit(EFI_OLD_MEMMAP, &efi.flags);
if (parse_option_str(str, "debug")) if (parse_option_str(str, "debug"))

View File

@ -22,6 +22,7 @@
#include <asm/fpu/internal.h> #include <asm/fpu/internal.h>
#include <asm/debugreg.h> #include <asm/debugreg.h>
#include <asm/cpu.h> #include <asm/cpu.h>
#include <asm/mmu_context.h>
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
__visible unsigned long saved_context_ebx; __visible unsigned long saved_context_ebx;
@ -153,7 +154,7 @@ static void fix_processor_context(void)
syscall_init(); /* This sets MSR_*STAR and related */ syscall_init(); /* This sets MSR_*STAR and related */
#endif #endif
load_TR_desc(); /* This does ltr */ load_TR_desc(); /* This does ltr */
load_LDT(&current->active_mm->context); /* This does lldt */ load_mm_ldt(current->active_mm); /* This does lldt */
fpu__resume_cpu(); fpu__resume_cpu();
} }

View File

@ -483,6 +483,7 @@ static void set_aliased_prot(void *v, pgprot_t prot)
pte_t pte; pte_t pte;
unsigned long pfn; unsigned long pfn;
struct page *page; struct page *page;
unsigned char dummy;
ptep = lookup_address((unsigned long)v, &level); ptep = lookup_address((unsigned long)v, &level);
BUG_ON(ptep == NULL); BUG_ON(ptep == NULL);
@ -492,6 +493,32 @@ static void set_aliased_prot(void *v, pgprot_t prot)
pte = pfn_pte(pfn, prot); pte = pfn_pte(pfn, prot);
/*
* Careful: update_va_mapping() will fail if the virtual address
* we're poking isn't populated in the page tables. We don't
* need to worry about the direct map (that's always in the page
* tables), but we need to be careful about vmap space. In
* particular, the top level page table can lazily propagate
* entries between processes, so if we've switched mms since we
* vmapped the target in the first place, we might not have the
* top-level page table entry populated.
*
* We disable preemption because we want the same mm active when
* we probe the target and when we issue the hypercall. We'll
* have the same nominal mm, but if we're a kernel thread, lazy
* mm dropping could change our pgd.
*
* Out of an abundance of caution, this uses __get_user() to fault
* in the target address just in case there's some obscure case
* in which the target address isn't readable.
*/
preempt_disable();
pagefault_disable(); /* Avoid warnings due to being atomic. */
__get_user(dummy, (unsigned char __user __force *)v);
pagefault_enable();
if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0)) if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0))
BUG(); BUG();
@ -503,6 +530,8 @@ static void set_aliased_prot(void *v, pgprot_t prot)
BUG(); BUG();
} else } else
kmap_flush_unused(); kmap_flush_unused();
preempt_enable();
} }
static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries) static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
@ -510,6 +539,17 @@ static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE; const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE;
int i; int i;
/*
* We need to mark the all aliases of the LDT pages RO. We
* don't need to call vm_flush_aliases(), though, since that's
* only responsible for flushing aliases out the TLBs, not the
* page tables, and Xen will flush the TLB for us if needed.
*
* To avoid confusing future readers: none of this is necessary
* to load the LDT. The hypervisor only checks this when the
* LDT is faulted in due to subsequent descriptor access.
*/
for(i = 0; i < entries; i += entries_per_page) for(i = 0; i < entries; i += entries_per_page)
set_aliased_prot(ldt + i, PAGE_KERNEL_RO); set_aliased_prot(ldt + i, PAGE_KERNEL_RO);
} }

View File

@ -231,7 +231,7 @@ int acpi_device_set_power(struct acpi_device *device, int state)
dev_warn(&device->dev, "Failed to change power state to %s\n", dev_warn(&device->dev, "Failed to change power state to %s\n",
acpi_power_state_string(state)); acpi_power_state_string(state));
} else { } else {
device->power.state = state; device->power.state = target_state;
ACPI_DEBUG_PRINT((ACPI_DB_INFO, ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Device [%s] transitioned to %s\n", "Device [%s] transitioned to %s\n",
device->pnp.bus_id, device->pnp.bus_id,

View File

@ -523,6 +523,7 @@ void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
# define rbd_assert(expr) ((void) 0) # define rbd_assert(expr) ((void) 0)
#endif /* !RBD_DEBUG */ #endif /* !RBD_DEBUG */
static void rbd_osd_copyup_callback(struct rbd_obj_request *obj_request);
static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request); static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
static void rbd_img_parent_read(struct rbd_obj_request *obj_request); static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
static void rbd_dev_remove_parent(struct rbd_device *rbd_dev); static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
@ -1818,6 +1819,16 @@ static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
obj_request_done_set(obj_request); obj_request_done_set(obj_request);
} }
static void rbd_osd_call_callback(struct rbd_obj_request *obj_request)
{
dout("%s: obj %p\n", __func__, obj_request);
if (obj_request_img_data_test(obj_request))
rbd_osd_copyup_callback(obj_request);
else
obj_request_done_set(obj_request);
}
static void rbd_osd_req_callback(struct ceph_osd_request *osd_req, static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
struct ceph_msg *msg) struct ceph_msg *msg)
{ {
@ -1866,6 +1877,8 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
rbd_osd_discard_callback(obj_request); rbd_osd_discard_callback(obj_request);
break; break;
case CEPH_OSD_OP_CALL: case CEPH_OSD_OP_CALL:
rbd_osd_call_callback(obj_request);
break;
case CEPH_OSD_OP_NOTIFY_ACK: case CEPH_OSD_OP_NOTIFY_ACK:
case CEPH_OSD_OP_WATCH: case CEPH_OSD_OP_WATCH:
rbd_osd_trivial_callback(obj_request); rbd_osd_trivial_callback(obj_request);
@ -2530,13 +2543,15 @@ out_unwind:
} }
static void static void
rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request) rbd_osd_copyup_callback(struct rbd_obj_request *obj_request)
{ {
struct rbd_img_request *img_request; struct rbd_img_request *img_request;
struct rbd_device *rbd_dev; struct rbd_device *rbd_dev;
struct page **pages; struct page **pages;
u32 page_count; u32 page_count;
dout("%s: obj %p\n", __func__, obj_request);
rbd_assert(obj_request->type == OBJ_REQUEST_BIO || rbd_assert(obj_request->type == OBJ_REQUEST_BIO ||
obj_request->type == OBJ_REQUEST_NODATA); obj_request->type == OBJ_REQUEST_NODATA);
rbd_assert(obj_request_img_data_test(obj_request)); rbd_assert(obj_request_img_data_test(obj_request));
@ -2563,9 +2578,7 @@ rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
if (!obj_request->result) if (!obj_request->result)
obj_request->xferred = obj_request->length; obj_request->xferred = obj_request->length;
/* Finish up with the normal image object callback */ obj_request_done_set(obj_request);
rbd_img_obj_callback(obj_request);
} }
static void static void
@ -2650,7 +2663,6 @@ rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
/* All set, send it off. */ /* All set, send it off. */
orig_request->callback = rbd_img_obj_copyup_callback;
osdc = &rbd_dev->rbd_client->client->osdc; osdc = &rbd_dev->rbd_client->client->osdc;
img_result = rbd_obj_request_submit(osdc, orig_request); img_result = rbd_obj_request_submit(osdc, orig_request);
if (!img_result) if (!img_result)

Some files were not shown because too many files have changed in this diff Show More