Merge branch 'master' into for-4.12/post-merge

This commit is contained in:
Jens Axboe 2017-04-24 22:03:14 -06:00
commit d9fd363a6c
668 changed files with 7418 additions and 4100 deletions

View File

@ -99,6 +99,8 @@ Linas Vepstas <linas@austin.ibm.com>
Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@web.de>
Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@ascom.ch>
Mark Brown <broonie@sirena.org.uk>
Martin Kepplinger <martink@posteo.de> <martin.kepplinger@theobroma-systems.com>
Martin Kepplinger <martink@posteo.de> <martin.kepplinger@ginzinger.com>
Matthieu CASTET <castet.matthieu@free.fr>
Mauro Carvalho Chehab <mchehab@kernel.org> <mchehab@brturbo.com.br>
Mauro Carvalho Chehab <mchehab@kernel.org> <maurochehab@gmail.com>
@ -171,6 +173,7 @@ Vlad Dogaru <ddvlad@gmail.com> <vlad.dogaru@intel.com>
Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@virtuozzo.com>
Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@parallels.com>
Takashi YOSHII <takashi.yoshii.zj@renesas.com>
Yakir Yang <kuankuan.y@gmail.com> <ykk@rock-chips.com>
Yusuke Goda <goda.yusuke@renesas.com>
Gustavo Padovan <gustavo@las.ic.unicamp.br>
Gustavo Padovan <padovan@profusion.mobi>

View File

@ -1725,6 +1725,12 @@
kernel and module base offset ASLR (Address Space
Layout Randomization).
kasan_multi_shot
[KNL] Enforce KASAN (Kernel Address Sanitizer) to print
report on every invalid memory access. Without this
parameter KASAN will print report only for the first
invalid access.
keepinitrd [HW,ARM]
kernelcore= [KNL,X86,IA-64,PPC]

View File

@ -44,13 +44,19 @@ Hip05 Example (note that Hip06 is the same except compatible):
};
HiSilicon Hip06/Hip07 PCIe host bridge DT (almost-ECAM) description.
Some BIOSes place the host controller in a mode where it is ECAM
compliant for all devices other than the root complex. In such cases,
the host controller should be described as below.
The properties and their meanings are identical to those described in
host-generic-pci.txt except as listed below.
Properties of the host controller node that differ from
host-generic-pci.txt:
- compatible : Must be "hisilicon,pcie-almost-ecam"
- compatible : Must be "hisilicon,hip06-pcie-ecam", or
"hisilicon,hip07-pcie-ecam"
- reg : Two entries: First the ECAM configuration space for any
other bus underneath the root bus. Second, the base
@ -59,7 +65,7 @@ host-generic-pci.txt:
Example:
pcie0: pcie@a0090000 {
compatible = "hisilicon,pcie-almost-ecam";
compatible = "hisilicon,hip06-pcie-ecam";
reg = <0 0xb0000000 0 0x2000000>, /* ECAM configuration space */
<0 0xa0090000 0 0x10000>; /* host bridge registers */
bus-range = <0 31>;

View File

@ -12,7 +12,8 @@ Required properties:
- reg : Offset and length of the register set for the module
- interrupts : the interrupt number for the RNG module.
Used for "ti,omap4-rng" and "inside-secure,safexcel-eip76"
- clocks: the trng clock source
- clocks: the trng clock source. Only mandatory for the
"inside-secure,safexcel-eip76" compatible.
Example:
/* AM335x */

View File

@ -58,8 +58,7 @@ prototypes:
int (*permission) (struct inode *, int, unsigned int);
int (*get_acl)(struct inode *, int);
int (*setattr) (struct dentry *, struct iattr *);
int (*getattr) (const struct path *, struct dentry *, struct kstat *,
u32, unsigned int);
int (*getattr) (const struct path *, struct kstat *, u32, unsigned int);
ssize_t (*listxattr) (struct dentry *, char *, size_t);
int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, u64 len);
void (*update_time)(struct inode *, struct timespec *, int);

View File

@ -600,3 +600,9 @@ in your dentry operations instead.
[recommended]
->readlink is optional for symlinks. Don't set, unless filesystem needs
to fake something for readlink(2).
--
[mandatory]
->getattr() is now passed a struct path rather than a vfsmount and
dentry separately, and it now has request_mask and query_flags arguments
to specify the fields and sync type requested by statx. Filesystems not
supporting any statx-specific features may ignore the new arguments.

View File

@ -382,8 +382,7 @@ struct inode_operations {
int (*permission) (struct inode *, int);
int (*get_acl)(struct inode *, int);
int (*setattr) (struct dentry *, struct iattr *);
int (*getattr) (const struct path *, struct dentry *, struct kstat *,
u32, unsigned int);
int (*getattr) (const struct path *, struct kstat *, u32, unsigned int);
ssize_t (*listxattr) (struct dentry *, char *, size_t);
void (*update_time)(struct inode *, struct timespec *, int);
int (*atomic_open)(struct inode *, struct dentry *, struct file *,

View File

@ -77,9 +77,15 @@ static struct pinctrl_desc foo_desc = {
int __init foo_probe(void)
{
int error;
struct pinctrl_dev *pctl;
return pinctrl_register_and_init(&foo_desc, <PARENT>, NULL, &pctl);
error = pinctrl_register_and_init(&foo_desc, <PARENT>, NULL, &pctl);
if (error)
return error;
return pinctrl_enable(pctl);
}
To enable the pinctrl subsystem and the subgroups for PINMUX and PINCONF and

View File

@ -124,7 +124,7 @@ specified in the following format in the sign-off area:
.. code-block:: none
Cc: <stable@vger.kernel.org> # 3.3.x-
Cc: <stable@vger.kernel.org> # 3.3.x
The tag has the meaning of:

View File

@ -83,6 +83,12 @@ Groups:
Bits for undefined preemption levels are RAZ/WI.
For historical reasons and to provide ABI compatibility with userspace we
export the GICC_PMR register in the format of the GICH_VMCR.VMPriMask
field in the lower 5 bits of a word, meaning that userspace must always
use the lower 5 bits to communicate with the KVM device and must shift the
value left by 3 places to obtain the actual priority mask level.
Limitations:
- Priorities are not implemented, and registers are RAZ/WI
- Currently only implemented for KVM_DEV_TYPE_ARM_VGIC_V2.

View File

@ -2585,12 +2585,26 @@ F: include/uapi/linux/if_bonding.h
BPF (Safe dynamic programs and tools)
M: Alexei Starovoitov <ast@kernel.org>
M: Daniel Borkmann <daniel@iogearbox.net>
L: netdev@vger.kernel.org
L: linux-kernel@vger.kernel.org
S: Supported
F: arch/x86/net/bpf_jit*
F: Documentation/networking/filter.txt
F: include/linux/bpf*
F: include/linux/filter.h
F: include/uapi/linux/bpf*
F: include/uapi/linux/filter.h
F: kernel/bpf/
F: tools/testing/selftests/bpf/
F: kernel/trace/bpf_trace.c
F: lib/test_bpf.c
F: net/bpf/
F: net/core/filter.c
F: net/sched/act_bpf.c
F: net/sched/cls_bpf.c
F: samples/bpf/
F: tools/net/bpf*
F: tools/testing/selftests/bpf/
BROADCOM B44 10/100 ETHERNET DRIVER
M: Michael Chan <michael.chan@broadcom.com>
@ -4117,14 +4131,13 @@ F: drivers/block/drbd/
F: lib/lru_cache.c
F: Documentation/blockdev/drbd/
DRIVER CORE, KOBJECTS, DEBUGFS, KERNFS AND SYSFS
DRIVER CORE, KOBJECTS, DEBUGFS AND SYSFS
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core.git
S: Supported
F: Documentation/kobject.txt
F: drivers/base/
F: fs/debugfs/
F: fs/kernfs/
F: fs/sysfs/
F: include/linux/debugfs.h
F: include/linux/kobj*
@ -4928,6 +4941,7 @@ F: include/linux/netfilter_bridge/
F: net/bridge/
ETHERNET PHY LIBRARY
M: Andrew Lunn <andrew@lunn.ch>
M: Florian Fainelli <f.fainelli@gmail.com>
L: netdev@vger.kernel.org
S: Maintained
@ -7089,9 +7103,9 @@ S: Maintained
F: fs/autofs4/
KERNEL BUILD + files below scripts/ (unless maintained elsewhere)
M: Masahiro Yamada <yamada.masahiro@socionext.com>
M: Michal Marek <mmarek@suse.com>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild.git for-next
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild.git rc-fixes
T: git git://git.kernel.org/pub/scm/linux/kernel/git/masahiroy/linux-kbuild.git
L: linux-kbuild@vger.kernel.org
S: Maintained
F: Documentation/kbuild/
@ -7208,6 +7222,14 @@ F: arch/mips/include/uapi/asm/kvm*
F: arch/mips/include/asm/kvm*
F: arch/mips/kvm/
KERNFS
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
M: Tejun Heo <tj@kernel.org>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core.git
S: Supported
F: include/linux/kernfs.h
F: fs/kernfs/
KEXEC
M: Eric Biederman <ebiederm@xmission.com>
W: http://kernel.org/pub/linux/utils/kernel/kexec/
@ -8753,6 +8775,7 @@ W: http://www.linuxfoundation.org/en/Net
Q: http://patchwork.ozlabs.org/project/netdev/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git
B: mailto:netdev@vger.kernel.org
S: Maintained
F: net/
F: include/net/
@ -10814,6 +10837,7 @@ F: drivers/s390/block/dasd*
F: block/partitions/ibm.c
S390 NETWORK DRIVERS
M: Julian Wiedmann <jwi@linux.vnet.ibm.com>
M: Ursula Braun <ubraun@linux.vnet.ibm.com>
L: linux-s390@vger.kernel.org
W: http://www.ibm.com/developerworks/linux/linux390/
@ -10844,6 +10868,7 @@ S: Supported
F: drivers/s390/scsi/zfcp_*
S390 IUCV NETWORK LAYER
M: Julian Wiedmann <jwi@linux.vnet.ibm.com>
M: Ursula Braun <ubraun@linux.vnet.ibm.com>
L: linux-s390@vger.kernel.org
W: http://www.ibm.com/developerworks/linux/linux390/
@ -12454,7 +12479,6 @@ F: drivers/clk/ti/
F: include/linux/clk/ti.h
TI ETHERNET SWITCH DRIVER (CPSW)
M: Mugunthan V N <mugunthanvnm@ti.com>
R: Grygorii Strashko <grygorii.strashko@ti.com>
L: linux-omap@vger.kernel.org
L: netdev@vger.kernel.org
@ -13295,7 +13319,7 @@ F: drivers/virtio/
F: tools/virtio/
F: drivers/net/virtio_net.c
F: drivers/block/virtio_blk.c
F: include/linux/virtio_*.h
F: include/linux/virtio*.h
F: include/uapi/linux/virtio_*.h
F: drivers/crypto/virtio/

View File

@ -1,7 +1,7 @@
VERSION = 4
PATCHLEVEL = 11
SUBLEVEL = 0
EXTRAVERSION = -rc4
EXTRAVERSION = -rc8
NAME = Fearless Coyote
# *DOCUMENTATION*
@ -372,7 +372,7 @@ LDFLAGS_MODULE =
CFLAGS_KERNEL =
AFLAGS_KERNEL =
LDFLAGS_vmlinux =
CFLAGS_GCOV = -fprofile-arcs -ftest-coverage -fno-tree-loop-im -Wno-maybe-uninitialized
CFLAGS_GCOV := -fprofile-arcs -ftest-coverage -fno-tree-loop-im $(call cc-disable-warning,maybe-uninitialized,)
CFLAGS_KCOV := $(call cc-option,-fsanitize-coverage=trace-pc,)
@ -653,6 +653,12 @@ KBUILD_CFLAGS += $(call cc-ifversion, -lt, 0409, \
# Tell gcc to never replace conditional load with a non-conditional one
KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0)
# check for 'asm goto'
ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y)
KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
endif
include scripts/Makefile.gcc-plugins
ifdef CONFIG_READABLE_ASM
@ -798,12 +804,6 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=incompatible-pointer-types)
# use the deterministic mode of AR if available
KBUILD_ARFLAGS := $(call ar-option,D)
# check for 'asm goto'
ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y)
KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
endif
include scripts/Makefile.kasan
include scripts/Makefile.extrawarn
include scripts/Makefile.ubsan

View File

@ -1290,7 +1290,7 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
/* copy relevant bits of struct timex. */
if (copy_from_user(&txc, txc_p, offsetof(struct timex32, time)) ||
copy_from_user(&txc.tick, &txc_p->tick, sizeof(struct timex32) -
offsetof(struct timex32, time)))
offsetof(struct timex32, tick)))
return -EFAULT;
ret = do_adjtimex(&txc);

View File

@ -26,6 +26,7 @@
device_type = "cpu";
compatible = "snps,arc770d";
reg = <0>;
clocks = <&core_clk>;
};
};

View File

@ -21,6 +21,7 @@
device_type = "cpu";
compatible = "snps,archs38";
reg = <0>;
clocks = <&core_clk>;
};
};

View File

@ -19,8 +19,27 @@
cpu@0 {
device_type = "cpu";
compatible = "snps,archs38xN";
compatible = "snps,archs38";
reg = <0>;
clocks = <&core_clk>;
};
cpu@1 {
device_type = "cpu";
compatible = "snps,archs38";
reg = <1>;
clocks = <&core_clk>;
};
cpu@2 {
device_type = "cpu";
compatible = "snps,archs38";
reg = <2>;
clocks = <&core_clk>;
};
cpu@3 {
device_type = "cpu";
compatible = "snps,archs38";
reg = <3>;
clocks = <&core_clk>;
};
};

View File

@ -112,13 +112,19 @@
interrupts = <7>;
bus-width = <4>;
};
};
/* Embedded Vision subsystem UIO mappings; only relevant for EV VDK */
uio_ev: uio@0xD0000000 {
compatible = "generic-uio";
reg = <0xD0000000 0x2000 0xD1000000 0x2000 0x90000000 0x10000000 0xC0000000 0x10000000>;
reg-names = "ev_gsa", "ev_ctrl", "ev_shared_mem", "ev_code_mem";
interrupts = <23>;
};
/*
* Embedded Vision subsystem UIO mappings; only relevant for EV VDK
*
* This node is intentionally put outside of MB above becase
* it maps areas outside of MB's 0xEz-0xFz.
*/
uio_ev: uio@0xD0000000 {
compatible = "generic-uio";
reg = <0xD0000000 0x2000 0xD1000000 0x2000 0x90000000 0x10000000 0xC0000000 0x10000000>;
reg-names = "ev_gsa", "ev_ctrl", "ev_shared_mem", "ev_code_mem";
interrupt-parent = <&mb_intc>;
interrupts = <23>;
};
};

View File

@ -54,9 +54,7 @@ int kprobe_fault_handler(struct pt_regs *regs, unsigned long cause);
void kretprobe_trampoline(void);
void trap_is_kprobe(unsigned long address, struct pt_regs *regs);
#else
static void trap_is_kprobe(unsigned long address, struct pt_regs *regs)
{
}
#define trap_is_kprobe(address, regs)
#endif /* CONFIG_KPROBES */
#endif /* _ARC_KPROBES_H */

View File

@ -100,15 +100,21 @@ END(handle_interrupt)
;################### Non TLB Exception Handling #############################
ENTRY(EV_SWI)
flag 1
; TODO: implement this
EXCEPTION_PROLOGUE
b ret_from_exception
END(EV_SWI)
ENTRY(EV_DivZero)
flag 1
; TODO: implement this
EXCEPTION_PROLOGUE
b ret_from_exception
END(EV_DivZero)
ENTRY(EV_DCError)
flag 1
; TODO: implement this
EXCEPTION_PROLOGUE
b ret_from_exception
END(EV_DCError)
; ---------------------------------------------

View File

@ -10,6 +10,7 @@
#include <linux/fs.h>
#include <linux/delay.h>
#include <linux/root_dev.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/clocksource.h>
#include <linux/console.h>
@ -488,8 +489,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
{
char *str;
int cpu_id = ptr_to_cpu(v);
struct device_node *core_clk = of_find_node_by_name(NULL, "core_clk");
u32 freq = 0;
struct device *cpu_dev = get_cpu_device(cpu_id);
struct clk *cpu_clk;
unsigned long freq = 0;
if (!cpu_online(cpu_id)) {
seq_printf(m, "processor [%d]\t: Offline\n", cpu_id);
@ -502,9 +504,15 @@ static int show_cpuinfo(struct seq_file *m, void *v)
seq_printf(m, arc_cpu_mumbojumbo(cpu_id, str, PAGE_SIZE));
of_property_read_u32(core_clk, "clock-frequency", &freq);
cpu_clk = clk_get(cpu_dev, NULL);
if (IS_ERR(cpu_clk)) {
seq_printf(m, "CPU speed \t: Cannot get clock for processor [%d]\n",
cpu_id);
} else {
freq = clk_get_rate(cpu_clk);
}
if (freq)
seq_printf(m, "CPU speed\t: %u.%02u Mhz\n",
seq_printf(m, "CPU speed\t: %lu.%02lu Mhz\n",
freq / 1000000, (freq / 10000) % 100);
seq_printf(m, "Bogo MIPS\t: %lu.%02lu\n",

View File

@ -633,6 +633,9 @@ noinline static void slc_entire_op(const int op)
write_aux_reg(ARC_REG_SLC_INVALIDATE, 1);
/* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
read_aux_reg(r);
/* Important to wait for flush to complete */
while (read_aux_reg(r) & SLC_CTRL_BUSY);
}

View File

@ -371,6 +371,8 @@
phy1: ethernet-phy@1 {
reg = <7>;
eee-broken-100tx;
eee-broken-1000t;
};
};

View File

@ -672,6 +672,7 @@
ti,non-removable;
bus-width = <4>;
cap-power-off-card;
keep-power-in-suspend;
pinctrl-names = "default";
pinctrl-0 = <&mmc2_pins>;

View File

@ -283,6 +283,7 @@
device_type = "pci";
ranges = <0x81000000 0 0 0x03000 0 0x00010000
0x82000000 0 0x20013000 0x13000 0 0xffed000>;
bus-range = <0x00 0xff>;
#interrupt-cells = <1>;
num-lanes = <1>;
linux,pci-domain = <0>;
@ -319,6 +320,7 @@
device_type = "pci";
ranges = <0x81000000 0 0 0x03000 0 0x00010000
0x82000000 0 0x30013000 0x13000 0 0xffed000>;
bus-range = <0x00 0xff>;
#interrupt-cells = <1>;
num-lanes = <1>;
linux,pci-domain = <1>;

View File

@ -121,7 +121,7 @@
&i2c3 {
clock-frequency = <400000>;
at24@50 {
compatible = "at24,24c02";
compatible = "atmel,24c64";
readonly;
reg = <0x50>;
};

View File

@ -66,12 +66,6 @@
opp-microvolt = <1200000>;
clock-latency-ns = <244144>; /* 8 32k periods */
};
opp@1200000000 {
opp-hz = /bits/ 64 <1200000000>;
opp-microvolt = <1320000>;
clock-latency-ns = <244144>; /* 8 32k periods */
};
};
cpus {
@ -81,16 +75,22 @@
operating-points-v2 = <&cpu0_opp_table>;
};
cpu@1 {
operating-points-v2 = <&cpu0_opp_table>;
};
cpu@2 {
compatible = "arm,cortex-a7";
device_type = "cpu";
reg = <2>;
operating-points-v2 = <&cpu0_opp_table>;
};
cpu@3 {
compatible = "arm,cortex-a7";
device_type = "cpu";
reg = <3>;
operating-points-v2 = <&cpu0_opp_table>;
};
};
@ -113,8 +113,8 @@
simple-audio-card,mclk-fs = <512>;
simple-audio-card,aux-devs = <&codec_analog>;
simple-audio-card,routing =
"Left DAC", "Digital Left DAC",
"Right DAC", "Digital Right DAC";
"Left DAC", "AIF1 Slot 0 Left",
"Right DAC", "AIF1 Slot 0 Right";
status = "disabled";
simple-audio-card,cpu {

View File

@ -1124,6 +1124,9 @@ static void cpu_hyp_reinit(void)
if (__hyp_get_vectors() == hyp_default_vectors)
cpu_init_hyp_mode(NULL);
}
if (vgic_present)
kvm_vgic_init_cpu_hardware();
}
static void cpu_hyp_reset(void)

View File

@ -292,11 +292,18 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
phys_addr_t addr = start, end = start + size;
phys_addr_t next;
assert_spin_locked(&kvm->mmu_lock);
pgd = kvm->arch.pgd + stage2_pgd_index(addr);
do {
next = stage2_pgd_addr_end(addr, end);
if (!stage2_pgd_none(*pgd))
unmap_stage2_puds(kvm, pgd, addr, next);
/*
* If the range is too large, release the kvm->mmu_lock
* to prevent starvation and lockup detector warnings.
*/
if (next != end)
cond_resched_lock(&kvm->mmu_lock);
} while (pgd++, addr = next, addr != end);
}
@ -803,6 +810,7 @@ void stage2_unmap_vm(struct kvm *kvm)
int idx;
idx = srcu_read_lock(&kvm->srcu);
down_read(&current->mm->mmap_sem);
spin_lock(&kvm->mmu_lock);
slots = kvm_memslots(kvm);
@ -810,6 +818,7 @@ void stage2_unmap_vm(struct kvm *kvm)
stage2_unmap_memslot(kvm, memslot);
spin_unlock(&kvm->mmu_lock);
up_read(&current->mm->mmap_sem);
srcu_read_unlock(&kvm->srcu, idx);
}
@ -829,7 +838,10 @@ void kvm_free_stage2_pgd(struct kvm *kvm)
if (kvm->arch.pgd == NULL)
return;
spin_lock(&kvm->mmu_lock);
unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
spin_unlock(&kvm->mmu_lock);
/* Free the HW pgd, one page at a time */
free_pages_exact(kvm->arch.pgd, S2_PGD_SIZE);
kvm->arch.pgd = NULL;
@ -1801,6 +1813,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
(KVM_PHYS_SIZE >> PAGE_SHIFT))
return -EFAULT;
down_read(&current->mm->mmap_sem);
/*
* A memory region could potentially cover multiple VMAs, and any holes
* between them, so iterate over all of them to find out if we can map
@ -1844,8 +1857,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
pa += vm_start - vma->vm_start;
/* IO region dirty page logging not allowed */
if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES)
return -EINVAL;
if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) {
ret = -EINVAL;
goto out;
}
ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
vm_end - vm_start,
@ -1857,7 +1872,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
} while (hva < reg_end);
if (change == KVM_MR_FLAGS_ONLY)
return ret;
goto out;
spin_lock(&kvm->mmu_lock);
if (ret)
@ -1865,6 +1880,8 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
else
stage2_flush_memslot(kvm, memslot);
spin_unlock(&kvm->mmu_lock);
out:
up_read(&current->mm->mmap_sem);
return ret;
}

View File

@ -270,6 +270,7 @@ extern const struct smp_operations omap4_smp_ops;
extern int omap4_mpuss_init(void);
extern int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state);
extern int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state);
extern u32 omap4_get_cpu1_ns_pa_addr(void);
#else
static inline int omap4_enter_lowpower(unsigned int cpu,
unsigned int power_state)

View File

@ -50,7 +50,7 @@ void omap4_cpu_die(unsigned int cpu)
omap4_hotplug_cpu(cpu, PWRDM_POWER_OFF);
if (omap_secure_apis_support())
boot_cpu = omap_read_auxcoreboot0();
boot_cpu = omap_read_auxcoreboot0() >> 9;
else
boot_cpu =
readl_relaxed(base + OMAP_AUX_CORE_BOOT_0) >> 5;

View File

@ -64,6 +64,7 @@
#include "prm-regbits-44xx.h"
static void __iomem *sar_base;
static u32 old_cpu1_ns_pa_addr;
#if defined(CONFIG_PM) && defined(CONFIG_SMP)
@ -212,6 +213,11 @@ static void __init save_l2x0_context(void)
{}
#endif
u32 omap4_get_cpu1_ns_pa_addr(void)
{
return old_cpu1_ns_pa_addr;
}
/**
* omap4_enter_lowpower: OMAP4 MPUSS Low Power Entry Function
* The purpose of this function is to manage low power programming
@ -460,22 +466,30 @@ int __init omap4_mpuss_init(void)
void __init omap4_mpuss_early_init(void)
{
unsigned long startup_pa;
void __iomem *ns_pa_addr;
if (!(cpu_is_omap44xx() || soc_is_omap54xx()))
if (!(soc_is_omap44xx() || soc_is_omap54xx()))
return;
sar_base = omap4_get_sar_ram_base();
if (cpu_is_omap443x())
/* Save old NS_PA_ADDR for validity checks later on */
if (soc_is_omap44xx())
ns_pa_addr = sar_base + CPU1_WAKEUP_NS_PA_ADDR_OFFSET;
else
ns_pa_addr = sar_base + OMAP5_CPU1_WAKEUP_NS_PA_ADDR_OFFSET;
old_cpu1_ns_pa_addr = readl_relaxed(ns_pa_addr);
if (soc_is_omap443x())
startup_pa = __pa_symbol(omap4_secondary_startup);
else if (cpu_is_omap446x())
else if (soc_is_omap446x())
startup_pa = __pa_symbol(omap4460_secondary_startup);
else if ((__boot_cpu_mode & MODE_MASK) == HYP_MODE)
startup_pa = __pa_symbol(omap5_secondary_hyp_startup);
else
startup_pa = __pa_symbol(omap5_secondary_startup);
if (cpu_is_omap44xx())
if (soc_is_omap44xx())
writel_relaxed(startup_pa, sar_base +
CPU1_WAKEUP_NS_PA_ADDR_OFFSET);
else

View File

@ -94,6 +94,5 @@ ENTRY(omap_read_auxcoreboot0)
ldr r12, =0x103
dsb
smc #0
mov r0, r0, lsr #9
ldmfd sp!, {r2-r12, pc}
ENDPROC(omap_read_auxcoreboot0)

View File

@ -21,6 +21,7 @@
#include <linux/io.h>
#include <linux/irqchip/arm-gic.h>
#include <asm/sections.h>
#include <asm/smp_scu.h>
#include <asm/virt.h>
@ -40,10 +41,14 @@
#define OMAP5_CORE_COUNT 0x2
#define AUX_CORE_BOOT0_GP_RELEASE 0x020
#define AUX_CORE_BOOT0_HS_RELEASE 0x200
struct omap_smp_config {
unsigned long cpu1_rstctrl_pa;
void __iomem *cpu1_rstctrl_va;
void __iomem *scu_base;
void __iomem *wakeupgen_base;
void *startup_addr;
};
@ -140,7 +145,6 @@ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
static struct clockdomain *cpu1_clkdm;
static bool booted;
static struct powerdomain *cpu1_pwrdm;
void __iomem *base = omap_get_wakeupgen_base();
/*
* Set synchronisation state between this boot processor
@ -155,9 +159,11 @@ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
* A barrier is added to ensure that write buffer is drained
*/
if (omap_secure_apis_support())
omap_modify_auxcoreboot0(0x200, 0xfffffdff);
omap_modify_auxcoreboot0(AUX_CORE_BOOT0_HS_RELEASE,
0xfffffdff);
else
writel_relaxed(0x20, base + OMAP_AUX_CORE_BOOT_0);
writel_relaxed(AUX_CORE_BOOT0_GP_RELEASE,
cfg.wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
if (!cpu1_clkdm && !cpu1_pwrdm) {
cpu1_clkdm = clkdm_lookup("mpu1_clkdm");
@ -261,9 +267,72 @@ static void __init omap4_smp_init_cpus(void)
set_cpu_possible(i, true);
}
/*
* For now, just make sure the start-up address is not within the booting
* kernel space as that means we just overwrote whatever secondary_startup()
* code there was.
*/
static bool __init omap4_smp_cpu1_startup_valid(unsigned long addr)
{
if ((addr >= __pa(PAGE_OFFSET)) && (addr <= __pa(__bss_start)))
return false;
return true;
}
/*
* We may need to reset CPU1 before configuring, otherwise kexec boot can end
* up trying to use old kernel startup address or suspend-resume will
* occasionally fail to bring up CPU1 on 4430 if CPU1 fails to enter deeper
* idle states.
*/
static void __init omap4_smp_maybe_reset_cpu1(struct omap_smp_config *c)
{
unsigned long cpu1_startup_pa, cpu1_ns_pa_addr;
bool needs_reset = false;
u32 released;
if (omap_secure_apis_support())
released = omap_read_auxcoreboot0() & AUX_CORE_BOOT0_HS_RELEASE;
else
released = readl_relaxed(cfg.wakeupgen_base +
OMAP_AUX_CORE_BOOT_0) &
AUX_CORE_BOOT0_GP_RELEASE;
if (released) {
pr_warn("smp: CPU1 not parked?\n");
return;
}
cpu1_startup_pa = readl_relaxed(cfg.wakeupgen_base +
OMAP_AUX_CORE_BOOT_1);
cpu1_ns_pa_addr = omap4_get_cpu1_ns_pa_addr();
/* Did the configured secondary_startup() get overwritten? */
if (!omap4_smp_cpu1_startup_valid(cpu1_startup_pa))
needs_reset = true;
/*
* If omap4 or 5 has NS_PA_ADDR configured, CPU1 may be in a
* deeper idle state in WFI and will wake to an invalid address.
*/
if ((soc_is_omap44xx() || soc_is_omap54xx()) &&
!omap4_smp_cpu1_startup_valid(cpu1_ns_pa_addr))
needs_reset = true;
if (!needs_reset || !c->cpu1_rstctrl_va)
return;
pr_info("smp: CPU1 parked within kernel, needs reset (0x%lx 0x%lx)\n",
cpu1_startup_pa, cpu1_ns_pa_addr);
writel_relaxed(1, c->cpu1_rstctrl_va);
readl_relaxed(c->cpu1_rstctrl_va);
writel_relaxed(0, c->cpu1_rstctrl_va);
}
static void __init omap4_smp_prepare_cpus(unsigned int max_cpus)
{
void __iomem *base = omap_get_wakeupgen_base();
const struct omap_smp_config *c = NULL;
if (soc_is_omap443x())
@ -281,6 +350,7 @@ static void __init omap4_smp_prepare_cpus(unsigned int max_cpus)
/* Must preserve cfg.scu_base set earlier */
cfg.cpu1_rstctrl_pa = c->cpu1_rstctrl_pa;
cfg.startup_addr = c->startup_addr;
cfg.wakeupgen_base = omap_get_wakeupgen_base();
if (soc_is_dra74x() || soc_is_omap54xx()) {
if ((__boot_cpu_mode & MODE_MASK) == HYP_MODE)
@ -299,15 +369,7 @@ static void __init omap4_smp_prepare_cpus(unsigned int max_cpus)
if (cfg.scu_base)
scu_enable(cfg.scu_base);
/*
* Reset CPU1 before configuring, otherwise kexec will
* end up trying to use old kernel startup address.
*/
if (cfg.cpu1_rstctrl_va) {
writel_relaxed(1, cfg.cpu1_rstctrl_va);
readl_relaxed(cfg.cpu1_rstctrl_va);
writel_relaxed(0, cfg.cpu1_rstctrl_va);
}
omap4_smp_maybe_reset_cpu1(&cfg);
/*
* Write the address of secondary startup routine into the
@ -319,7 +381,7 @@ static void __init omap4_smp_prepare_cpus(unsigned int max_cpus)
omap_auxcoreboot_addr(__pa_symbol(cfg.startup_addr));
else
writel_relaxed(__pa_symbol(cfg.startup_addr),
base + OMAP_AUX_CORE_BOOT_1);
cfg.wakeupgen_base + OMAP_AUX_CORE_BOOT_1);
}
const struct smp_operations omap4_smp_ops __initconst = {

View File

@ -222,6 +222,14 @@ static int _omap_device_notifier_call(struct notifier_block *nb,
dev_err(dev, "failed to idle\n");
}
break;
case BUS_NOTIFY_BIND_DRIVER:
od = to_omap_device(pdev);
if (od && (od->_state == OMAP_DEVICE_STATE_ENABLED) &&
pm_runtime_status_suspended(dev)) {
od->_driver_status = BUS_NOTIFY_BIND_DRIVER;
pm_runtime_set_active(dev);
}
break;
case BUS_NOTIFY_ADD_DEVICE:
if (pdev->dev.of_node)
omap_device_build_from_dt(pdev);

View File

@ -6,6 +6,7 @@ menuconfig ARCH_ORION5X
select GPIOLIB
select MVEBU_MBUS
select PCI
select PHYLIB if NETDEVICES
select PLAT_ORION_LEGACY
help
Support for the following Marvell Orion 5x series SoCs:

View File

@ -935,13 +935,31 @@ static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_add
__arm_dma_free(dev, size, cpu_addr, handle, attrs, true);
}
/*
* The whole dma_get_sgtable() idea is fundamentally unsafe - it seems
* that the intention is to allow exporting memory allocated via the
* coherent DMA APIs through the dma_buf API, which only accepts a
* scattertable. This presents a couple of problems:
* 1. Not all memory allocated via the coherent DMA APIs is backed by
* a struct page
* 2. Passing coherent DMA memory into the streaming APIs is not allowed
* as we will try to flush the memory through a different alias to that
* actually being used (and the flushes are redundant.)
*/
int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t handle, size_t size,
unsigned long attrs)
{
struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
unsigned long pfn = dma_to_pfn(dev, handle);
struct page *page;
int ret;
/* If the PFN is not valid, we do not have a struct page */
if (!pfn_valid(pfn))
return -ENXIO;
page = pfn_to_page(pfn);
ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
if (unlikely(ret))
return ret;

View File

@ -303,7 +303,10 @@ static inline void set_vbar(unsigned long val)
*/
static inline bool security_extensions_enabled(void)
{
return !!cpuid_feature_extract(CPUID_EXT_PFR1, 4);
/* Check CPUID Identification Scheme before ID_PFR1 read */
if ((read_cpuid_id() & 0x000f0000) == 0x000f0000)
return !!cpuid_feature_extract(CPUID_EXT_PFR1, 4);
return 0;
}
static unsigned long __init setup_vectors_base(void)

View File

@ -468,6 +468,7 @@ void __init orion_ge11_init(struct mv643xx_eth_platform_data *eth_data,
eth_data, &orion_ge11);
}
#ifdef CONFIG_ARCH_ORION5X
/*****************************************************************************
* Ethernet switch
****************************************************************************/
@ -480,6 +481,9 @@ void __init orion_ge00_switch_init(struct dsa_chip_data *d)
struct mdio_board_info *bd;
unsigned int i;
if (!IS_BUILTIN(CONFIG_PHYLIB))
return;
for (i = 0; i < ARRAY_SIZE(d->port_names); i++)
if (!strcmp(d->port_names[i], "cpu"))
break;
@ -493,6 +497,7 @@ void __init orion_ge00_switch_init(struct dsa_chip_data *d)
mdiobus_register_board_info(&orion_ge00_switch_board_info, 1);
}
#endif
/*****************************************************************************
* I2C

View File

@ -266,11 +266,20 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
#endif
if (p) {
if (cur) {
if (!p->ainsn.insn_check_cc(regs->ARM_cpsr)) {
/*
* Probe hit but conditional execution check failed,
* so just skip the instruction and continue as if
* nothing had happened.
* In this case, we can skip recursing check too.
*/
singlestep_skip(p, regs);
} else if (cur) {
/* Kprobe is pending, so we're recursing. */
switch (kcb->kprobe_status) {
case KPROBE_HIT_ACTIVE:
case KPROBE_HIT_SSDONE:
case KPROBE_HIT_SS:
/* A pre- or post-handler probe got us here. */
kprobes_inc_nmissed_count(p);
save_previous_kprobe(kcb);
@ -279,11 +288,16 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
singlestep(p, regs, kcb);
restore_previous_kprobe(kcb);
break;
case KPROBE_REENTER:
/* A nested probe was hit in FIQ, it is a BUG */
pr_warn("Unrecoverable kprobe detected at %p.\n",
p->addr);
/* fall through */
default:
/* impossible cases */
BUG();
}
} else if (p->ainsn.insn_check_cc(regs->ARM_cpsr)) {
} else {
/* Probe hit and conditional execution check ok. */
set_current_kprobe(p);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
@ -304,13 +318,6 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
}
reset_current_kprobe();
}
} else {
/*
* Probe hit but conditional execution check failed,
* so just skip the instruction and continue as if
* nothing had happened.
*/
singlestep_skip(p, regs);
}
} else if (cur) {
/* We probably hit a jprobe. Call its break handler. */
@ -434,6 +441,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
struct hlist_node *tmp;
unsigned long flags, orig_ret_address = 0;
unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
kprobe_opcode_t *correct_ret_addr = NULL;
INIT_HLIST_HEAD(&empty_rp);
kretprobe_hash_lock(current, &head, &flags);
@ -456,15 +464,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
/* another task is sharing our hash bucket */
continue;
if (ri->rp && ri->rp->handler) {
__this_cpu_write(current_kprobe, &ri->rp->kp);
get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
ri->rp->handler(ri, regs);
__this_cpu_write(current_kprobe, NULL);
}
orig_ret_address = (unsigned long)ri->ret_addr;
recycle_rp_inst(ri, &empty_rp);
if (orig_ret_address != trampoline_address)
/*
@ -476,6 +476,33 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
}
kretprobe_assert(ri, orig_ret_address, trampoline_address);
correct_ret_addr = ri->ret_addr;
hlist_for_each_entry_safe(ri, tmp, head, hlist) {
if (ri->task != current)
/* another task is sharing our hash bucket */
continue;
orig_ret_address = (unsigned long)ri->ret_addr;
if (ri->rp && ri->rp->handler) {
__this_cpu_write(current_kprobe, &ri->rp->kp);
get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
ri->ret_addr = correct_ret_addr;
ri->rp->handler(ri, regs);
__this_cpu_write(current_kprobe, NULL);
}
recycle_rp_inst(ri, &empty_rp);
if (orig_ret_address != trampoline_address)
/*
* This is the real return address. Any other
* instances associated with this task are for
* other calls deeper on the call stack
*/
break;
}
kretprobe_hash_unlock(current, &flags);
hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {

View File

@ -977,7 +977,10 @@ static void coverage_end(void)
void __naked __kprobes_test_case_start(void)
{
__asm__ __volatile__ (
"stmdb sp!, {r4-r11} \n\t"
"mov r2, sp \n\t"
"bic r3, r2, #7 \n\t"
"mov sp, r3 \n\t"
"stmdb sp!, {r2-r11} \n\t"
"sub sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t"
"bic r0, lr, #1 @ r0 = inline data \n\t"
"mov r1, sp \n\t"
@ -997,7 +1000,8 @@ void __naked __kprobes_test_case_end_32(void)
"movne pc, r0 \n\t"
"mov r0, r4 \n\t"
"add sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t"
"ldmia sp!, {r4-r11} \n\t"
"ldmia sp!, {r2-r11} \n\t"
"mov sp, r2 \n\t"
"mov pc, r0 \n\t"
);
}
@ -1013,7 +1017,8 @@ void __naked __kprobes_test_case_end_16(void)
"bxne r0 \n\t"
"mov r0, r4 \n\t"
"add sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t"
"ldmia sp!, {r4-r11} \n\t"
"ldmia sp!, {r2-r11} \n\t"
"mov sp, r2 \n\t"
"bx r0 \n\t"
);
}

View File

@ -179,8 +179,10 @@
usbphy: phy@01c19400 {
compatible = "allwinner,sun50i-a64-usb-phy";
reg = <0x01c19400 0x14>,
<0x01c1a800 0x4>,
<0x01c1b800 0x4>;
reg-names = "phy_ctrl",
"pmu0",
"pmu1";
clocks = <&ccu CLK_USB_PHY0>,
<&ccu CLK_USB_PHY1>;

View File

@ -3,8 +3,6 @@
#include <linux/compiler.h>
#include <asm/sysreg.h>
#ifndef __ASSEMBLY__
struct task_struct;

View File

@ -944,7 +944,7 @@ static bool have_cpu_die(void)
#ifdef CONFIG_HOTPLUG_CPU
int any_cpu = raw_smp_processor_id();
if (cpu_ops[any_cpu]->cpu_die)
if (cpu_ops[any_cpu] && cpu_ops[any_cpu]->cpu_die)
return true;
#endif
return false;

View File

@ -1,2 +1 @@
vdso.lds
vdso-offsets.h

View File

@ -42,7 +42,20 @@
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
static const char *fault_name(unsigned int esr);
struct fault_info {
int (*fn)(unsigned long addr, unsigned int esr,
struct pt_regs *regs);
int sig;
int code;
const char *name;
};
static const struct fault_info fault_info[];
static inline const struct fault_info *esr_to_fault_info(unsigned int esr)
{
return fault_info + (esr & 63);
}
#ifdef CONFIG_KPROBES
static inline int notify_page_fault(struct pt_regs *regs, unsigned int esr)
@ -197,10 +210,12 @@ static void __do_user_fault(struct task_struct *tsk, unsigned long addr,
struct pt_regs *regs)
{
struct siginfo si;
const struct fault_info *inf;
if (unhandled_signal(tsk, sig) && show_unhandled_signals_ratelimited()) {
inf = esr_to_fault_info(esr);
pr_info("%s[%d]: unhandled %s (%d) at 0x%08lx, esr 0x%03x\n",
tsk->comm, task_pid_nr(tsk), fault_name(esr), sig,
tsk->comm, task_pid_nr(tsk), inf->name, sig,
addr, esr);
show_pte(tsk->mm, addr);
show_regs(regs);
@ -219,14 +234,16 @@ static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *re
{
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->active_mm;
const struct fault_info *inf;
/*
* If we are in kernel mode at this point, we have no context to
* handle this fault with.
*/
if (user_mode(regs))
__do_user_fault(tsk, addr, esr, SIGSEGV, SEGV_MAPERR, regs);
else
if (user_mode(regs)) {
inf = esr_to_fault_info(esr);
__do_user_fault(tsk, addr, esr, inf->sig, inf->code, regs);
} else
__do_kernel_fault(mm, addr, esr, regs);
}
@ -488,12 +505,7 @@ static int do_bad(unsigned long addr, unsigned int esr, struct pt_regs *regs)
return 1;
}
static const struct fault_info {
int (*fn)(unsigned long addr, unsigned int esr, struct pt_regs *regs);
int sig;
int code;
const char *name;
} fault_info[] = {
static const struct fault_info fault_info[] = {
{ do_bad, SIGBUS, 0, "ttbr address size fault" },
{ do_bad, SIGBUS, 0, "level 1 address size fault" },
{ do_bad, SIGBUS, 0, "level 2 address size fault" },
@ -560,19 +572,13 @@ static const struct fault_info {
{ do_bad, SIGBUS, 0, "unknown 63" },
};
static const char *fault_name(unsigned int esr)
{
const struct fault_info *inf = fault_info + (esr & 63);
return inf->name;
}
/*
* Dispatch a data abort to the relevant handler.
*/
asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr,
struct pt_regs *regs)
{
const struct fault_info *inf = fault_info + (esr & 63);
const struct fault_info *inf = esr_to_fault_info(esr);
struct siginfo info;
if (!inf->fn(addr, esr, regs))

View File

@ -294,10 +294,6 @@ static __init int setup_hugepagesz(char *opt)
hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
} else if (ps == PUD_SIZE) {
hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
} else if (ps == (PAGE_SIZE * CONT_PTES)) {
hugetlb_add_hstate(CONT_PTE_SHIFT);
} else if (ps == (PMD_SIZE * CONT_PMDS)) {
hugetlb_add_hstate((PMD_SHIFT + CONT_PMD_SHIFT) - PAGE_SHIFT);
} else {
hugetlb_bad_size();
pr_err("hugepagesz: Unsupported page size %lu K\n", ps >> 10);
@ -306,13 +302,3 @@ static __init int setup_hugepagesz(char *opt)
return 1;
}
__setup("hugepagesz=", setup_hugepagesz);
#ifdef CONFIG_ARM64_64K_PAGES
static __init int add_default_hugepagesz(void)
{
if (size_to_hstate(CONT_PTES * PAGE_SIZE) == NULL)
hugetlb_add_hstate(CONT_PTE_SHIFT);
return 0;
}
arch_initcall(add_default_hugepagesz);
#endif

View File

@ -0,0 +1,29 @@
#ifndef _ASM_IA64_ASM_PROTOTYPES_H
#define _ASM_IA64_ASM_PROTOTYPES_H
#include <asm/cacheflush.h>
#include <asm/checksum.h>
#include <asm/esi.h>
#include <asm/ftrace.h>
#include <asm/page.h>
#include <asm/pal.h>
#include <asm/string.h>
#include <asm/uaccess.h>
#include <asm/unwind.h>
#include <asm/xor.h>
extern const char ia64_ivt[];
signed int __divsi3(signed int, unsigned int);
signed int __modsi3(signed int, unsigned int);
signed long long __divdi3(signed long long, unsigned long long);
signed long long __moddi3(signed long long, unsigned long long);
unsigned int __udivsi3(unsigned int, unsigned int);
unsigned int __umodsi3(unsigned int, unsigned int);
unsigned long long __udivdi3(unsigned long long, unsigned long long);
unsigned long long __umoddi3(unsigned long long, unsigned long long);
#endif /* _ASM_IA64_ASM_PROTOTYPES_H */

View File

@ -24,25 +24,25 @@ AFLAGS___modsi3.o = -DMODULO
AFLAGS___umodsi3.o = -DUNSIGNED -DMODULO
$(obj)/__divdi3.o: $(src)/idiv64.S FORCE
$(call if_changed_dep,as_o_S)
$(call if_changed_rule,as_o_S)
$(obj)/__udivdi3.o: $(src)/idiv64.S FORCE
$(call if_changed_dep,as_o_S)
$(call if_changed_rule,as_o_S)
$(obj)/__moddi3.o: $(src)/idiv64.S FORCE
$(call if_changed_dep,as_o_S)
$(call if_changed_rule,as_o_S)
$(obj)/__umoddi3.o: $(src)/idiv64.S FORCE
$(call if_changed_dep,as_o_S)
$(call if_changed_rule,as_o_S)
$(obj)/__divsi3.o: $(src)/idiv32.S FORCE
$(call if_changed_dep,as_o_S)
$(call if_changed_rule,as_o_S)
$(obj)/__udivsi3.o: $(src)/idiv32.S FORCE
$(call if_changed_dep,as_o_S)
$(call if_changed_rule,as_o_S)
$(obj)/__modsi3.o: $(src)/idiv32.S FORCE
$(call if_changed_dep,as_o_S)
$(call if_changed_rule,as_o_S)
$(obj)/__umodsi3.o: $(src)/idiv32.S FORCE
$(call if_changed_dep,as_o_S)
$(call if_changed_rule,as_o_S)

View File

@ -197,20 +197,21 @@ extern long __must_check strnlen_user(const char __user *src, long count);
#define strlen_user(str) strnlen_user(str, 32767)
extern unsigned long __must_check __copy_user_zeroing(void *to,
const void __user *from,
unsigned long n);
extern unsigned long raw_copy_from_user(void *to, const void __user *from,
unsigned long n);
static inline unsigned long
copy_from_user(void *to, const void __user *from, unsigned long n)
{
unsigned long res = n;
if (likely(access_ok(VERIFY_READ, from, n)))
return __copy_user_zeroing(to, from, n);
memset(to, 0, n);
return n;
res = raw_copy_from_user(to, from, n);
if (unlikely(res))
memset(to + (n - res), 0, res);
return res;
}
#define __copy_from_user(to, from, n) __copy_user_zeroing(to, from, n)
#define __copy_from_user(to, from, n) raw_copy_from_user(to, from, n)
#define __copy_from_user_inatomic __copy_from_user
extern unsigned long __must_check __copy_user(void __user *to,

View File

@ -29,7 +29,6 @@
COPY \
"1:\n" \
" .section .fixup,\"ax\"\n" \
" MOV D1Ar1,#0\n" \
FIXUP \
" MOVT D1Ar1,#HI(1b)\n" \
" JUMP D1Ar1,#LO(1b)\n" \
@ -260,27 +259,31 @@
"MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"22:\n" \
"MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
"SUB %3, %3, #32\n" \
"23:\n" \
"MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"SUB %3, %3, #32\n" \
"24:\n" \
"MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"25:\n" \
"MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
"26:\n" \
"SUB %3, %3, #32\n" \
"DCACHE [%1+#-64], D0Ar6\n" \
"BR $Lloop"id"\n" \
\
"MOV RAPF, %1\n" \
"25:\n" \
"MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"26:\n" \
"MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
"SUB %3, %3, #32\n" \
"27:\n" \
"MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"28:\n" \
"MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
"SUB %0, %0, #8\n" \
"29:\n" \
"SUB %3, %3, #32\n" \
"30:\n" \
"MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"31:\n" \
"MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
"32:\n" \
"SUB %0, %0, #8\n" \
"33:\n" \
"SETL [%0++], D0.7, D1.7\n" \
"SUB %3, %3, #32\n" \
"1:" \
@ -312,11 +315,15 @@
" .long 26b,3b\n" \
" .long 27b,3b\n" \
" .long 28b,3b\n" \
" .long 29b,4b\n" \
" .long 29b,3b\n" \
" .long 30b,3b\n" \
" .long 31b,3b\n" \
" .long 32b,3b\n" \
" .long 33b,4b\n" \
" .previous\n" \
: "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \
: "0" (to), "1" (from), "2" (ret), "3" (n) \
: "D1Ar1", "D0Ar2", "memory")
: "D1Ar1", "D0Ar2", "cc", "memory")
/* rewind 'to' and 'from' pointers when a fault occurs
*
@ -342,7 +349,7 @@
#define __asm_copy_to_user_64bit_rapf_loop(to, from, ret, n, id)\
__asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \
"LSR D0Ar2, D0Ar2, #8\n" \
"AND D0Ar2, D0Ar2, #0x7\n" \
"ANDS D0Ar2, D0Ar2, #0x7\n" \
"ADDZ D0Ar2, D0Ar2, #4\n" \
"SUB D0Ar2, D0Ar2, #1\n" \
"MOV D1Ar1, #4\n" \
@ -403,47 +410,55 @@
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"22:\n" \
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
"SUB %3, %3, #16\n" \
"23:\n" \
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"24:\n" \
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
"SUB %3, %3, #16\n" \
"25:\n" \
"24:\n" \
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"26:\n" \
"25:\n" \
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
"26:\n" \
"SUB %3, %3, #16\n" \
"27:\n" \
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"28:\n" \
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
"29:\n" \
"SUB %3, %3, #16\n" \
"30:\n" \
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"31:\n" \
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
"32:\n" \
"SUB %3, %3, #16\n" \
"DCACHE [%1+#-64], D0Ar6\n" \
"BR $Lloop"id"\n" \
\
"MOV RAPF, %1\n" \
"29:\n" \
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"30:\n" \
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
"SUB %3, %3, #16\n" \
"31:\n" \
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"32:\n" \
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
"SUB %3, %3, #16\n" \
"33:\n" \
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"34:\n" \
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
"SUB %3, %3, #16\n" \
"35:\n" \
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"SUB %3, %3, #16\n" \
"36:\n" \
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
"SUB %0, %0, #4\n" \
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"37:\n" \
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
"38:\n" \
"SUB %3, %3, #16\n" \
"39:\n" \
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"40:\n" \
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
"41:\n" \
"SUB %3, %3, #16\n" \
"42:\n" \
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"43:\n" \
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
"44:\n" \
"SUB %0, %0, #4\n" \
"45:\n" \
"SETD [%0++], D0.7\n" \
"SUB %3, %3, #16\n" \
"1:" \
@ -483,11 +498,19 @@
" .long 34b,3b\n" \
" .long 35b,3b\n" \
" .long 36b,3b\n" \
" .long 37b,4b\n" \
" .long 37b,3b\n" \
" .long 38b,3b\n" \
" .long 39b,3b\n" \
" .long 40b,3b\n" \
" .long 41b,3b\n" \
" .long 42b,3b\n" \
" .long 43b,3b\n" \
" .long 44b,3b\n" \
" .long 45b,4b\n" \
" .previous\n" \
: "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \
: "0" (to), "1" (from), "2" (ret), "3" (n) \
: "D1Ar1", "D0Ar2", "memory")
: "D1Ar1", "D0Ar2", "cc", "memory")
/* rewind 'to' and 'from' pointers when a fault occurs
*
@ -513,7 +536,7 @@
#define __asm_copy_to_user_32bit_rapf_loop(to, from, ret, n, id)\
__asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \
"LSR D0Ar2, D0Ar2, #8\n" \
"AND D0Ar2, D0Ar2, #0x7\n" \
"ANDS D0Ar2, D0Ar2, #0x7\n" \
"ADDZ D0Ar2, D0Ar2, #4\n" \
"SUB D0Ar2, D0Ar2, #1\n" \
"MOV D1Ar1, #4\n" \
@ -538,23 +561,31 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
if ((unsigned long) src & 1) {
__asm_copy_to_user_1(dst, src, retn);
n--;
if (retn)
return retn + n;
}
if ((unsigned long) dst & 1) {
/* Worst case - byte copy */
while (n > 0) {
__asm_copy_to_user_1(dst, src, retn);
n--;
if (retn)
return retn + n;
}
}
if (((unsigned long) src & 2) && n >= 2) {
__asm_copy_to_user_2(dst, src, retn);
n -= 2;
if (retn)
return retn + n;
}
if ((unsigned long) dst & 2) {
/* Second worst case - word copy */
while (n >= 2) {
__asm_copy_to_user_2(dst, src, retn);
n -= 2;
if (retn)
return retn + n;
}
}
@ -569,6 +600,8 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
while (n >= 8) {
__asm_copy_to_user_8x64(dst, src, retn);
n -= 8;
if (retn)
return retn + n;
}
}
if (n >= RAPF_MIN_BUF_SIZE) {
@ -581,6 +614,8 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
while (n >= 8) {
__asm_copy_to_user_8x64(dst, src, retn);
n -= 8;
if (retn)
return retn + n;
}
}
#endif
@ -588,11 +623,15 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
while (n >= 16) {
__asm_copy_to_user_16(dst, src, retn);
n -= 16;
if (retn)
return retn + n;
}
while (n >= 4) {
__asm_copy_to_user_4(dst, src, retn);
n -= 4;
if (retn)
return retn + n;
}
switch (n) {
@ -609,6 +648,10 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
break;
}
/*
* If we get here, retn correctly reflects the number of failing
* bytes.
*/
return retn;
}
EXPORT_SYMBOL(__copy_user);
@ -617,16 +660,14 @@ EXPORT_SYMBOL(__copy_user);
__asm_copy_user_cont(to, from, ret, \
" GETB D1Ar1,[%1++]\n" \
"2: SETB [%0++],D1Ar1\n", \
"3: ADD %2,%2,#1\n" \
" SETB [%0++],D1Ar1\n", \
"3: ADD %2,%2,#1\n", \
" .long 2b,3b\n")
#define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_user_cont(to, from, ret, \
" GETW D1Ar1,[%1++]\n" \
"2: SETW [%0++],D1Ar1\n" COPY, \
"3: ADD %2,%2,#2\n" \
" SETW [%0++],D1Ar1\n" FIXUP, \
"3: ADD %2,%2,#2\n" FIXUP, \
" .long 2b,3b\n" TENTRY)
#define __asm_copy_from_user_2(to, from, ret) \
@ -636,145 +677,26 @@ EXPORT_SYMBOL(__copy_user);
__asm_copy_from_user_2x_cont(to, from, ret, \
" GETB D1Ar1,[%1++]\n" \
"4: SETB [%0++],D1Ar1\n", \
"5: ADD %2,%2,#1\n" \
" SETB [%0++],D1Ar1\n", \
"5: ADD %2,%2,#1\n", \
" .long 4b,5b\n")
#define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_user_cont(to, from, ret, \
" GETD D1Ar1,[%1++]\n" \
"2: SETD [%0++],D1Ar1\n" COPY, \
"3: ADD %2,%2,#4\n" \
" SETD [%0++],D1Ar1\n" FIXUP, \
"3: ADD %2,%2,#4\n" FIXUP, \
" .long 2b,3b\n" TENTRY)
#define __asm_copy_from_user_4(to, from, ret) \
__asm_copy_from_user_4x_cont(to, from, ret, "", "", "")
#define __asm_copy_from_user_5(to, from, ret) \
__asm_copy_from_user_4x_cont(to, from, ret, \
" GETB D1Ar1,[%1++]\n" \
"4: SETB [%0++],D1Ar1\n", \
"5: ADD %2,%2,#1\n" \
" SETB [%0++],D1Ar1\n", \
" .long 4b,5b\n")
#define __asm_copy_from_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_from_user_4x_cont(to, from, ret, \
" GETW D1Ar1,[%1++]\n" \
"4: SETW [%0++],D1Ar1\n" COPY, \
"5: ADD %2,%2,#2\n" \
" SETW [%0++],D1Ar1\n" FIXUP, \
" .long 4b,5b\n" TENTRY)
#define __asm_copy_from_user_6(to, from, ret) \
__asm_copy_from_user_6x_cont(to, from, ret, "", "", "")
#define __asm_copy_from_user_7(to, from, ret) \
__asm_copy_from_user_6x_cont(to, from, ret, \
" GETB D1Ar1,[%1++]\n" \
"6: SETB [%0++],D1Ar1\n", \
"7: ADD %2,%2,#1\n" \
" SETB [%0++],D1Ar1\n", \
" .long 6b,7b\n")
#define __asm_copy_from_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_from_user_4x_cont(to, from, ret, \
" GETD D1Ar1,[%1++]\n" \
"4: SETD [%0++],D1Ar1\n" COPY, \
"5: ADD %2,%2,#4\n" \
" SETD [%0++],D1Ar1\n" FIXUP, \
" .long 4b,5b\n" TENTRY)
#define __asm_copy_from_user_8(to, from, ret) \
__asm_copy_from_user_8x_cont(to, from, ret, "", "", "")
#define __asm_copy_from_user_9(to, from, ret) \
__asm_copy_from_user_8x_cont(to, from, ret, \
" GETB D1Ar1,[%1++]\n" \
"6: SETB [%0++],D1Ar1\n", \
"7: ADD %2,%2,#1\n" \
" SETB [%0++],D1Ar1\n", \
" .long 6b,7b\n")
#define __asm_copy_from_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_from_user_8x_cont(to, from, ret, \
" GETW D1Ar1,[%1++]\n" \
"6: SETW [%0++],D1Ar1\n" COPY, \
"7: ADD %2,%2,#2\n" \
" SETW [%0++],D1Ar1\n" FIXUP, \
" .long 6b,7b\n" TENTRY)
#define __asm_copy_from_user_10(to, from, ret) \
__asm_copy_from_user_10x_cont(to, from, ret, "", "", "")
#define __asm_copy_from_user_11(to, from, ret) \
__asm_copy_from_user_10x_cont(to, from, ret, \
" GETB D1Ar1,[%1++]\n" \
"8: SETB [%0++],D1Ar1\n", \
"9: ADD %2,%2,#1\n" \
" SETB [%0++],D1Ar1\n", \
" .long 8b,9b\n")
#define __asm_copy_from_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_from_user_8x_cont(to, from, ret, \
" GETD D1Ar1,[%1++]\n" \
"6: SETD [%0++],D1Ar1\n" COPY, \
"7: ADD %2,%2,#4\n" \
" SETD [%0++],D1Ar1\n" FIXUP, \
" .long 6b,7b\n" TENTRY)
#define __asm_copy_from_user_12(to, from, ret) \
__asm_copy_from_user_12x_cont(to, from, ret, "", "", "")
#define __asm_copy_from_user_13(to, from, ret) \
__asm_copy_from_user_12x_cont(to, from, ret, \
" GETB D1Ar1,[%1++]\n" \
"8: SETB [%0++],D1Ar1\n", \
"9: ADD %2,%2,#1\n" \
" SETB [%0++],D1Ar1\n", \
" .long 8b,9b\n")
#define __asm_copy_from_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_from_user_12x_cont(to, from, ret, \
" GETW D1Ar1,[%1++]\n" \
"8: SETW [%0++],D1Ar1\n" COPY, \
"9: ADD %2,%2,#2\n" \
" SETW [%0++],D1Ar1\n" FIXUP, \
" .long 8b,9b\n" TENTRY)
#define __asm_copy_from_user_14(to, from, ret) \
__asm_copy_from_user_14x_cont(to, from, ret, "", "", "")
#define __asm_copy_from_user_15(to, from, ret) \
__asm_copy_from_user_14x_cont(to, from, ret, \
" GETB D1Ar1,[%1++]\n" \
"10: SETB [%0++],D1Ar1\n", \
"11: ADD %2,%2,#1\n" \
" SETB [%0++],D1Ar1\n", \
" .long 10b,11b\n")
#define __asm_copy_from_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_from_user_12x_cont(to, from, ret, \
" GETD D1Ar1,[%1++]\n" \
"8: SETD [%0++],D1Ar1\n" COPY, \
"9: ADD %2,%2,#4\n" \
" SETD [%0++],D1Ar1\n" FIXUP, \
" .long 8b,9b\n" TENTRY)
#define __asm_copy_from_user_16(to, from, ret) \
__asm_copy_from_user_16x_cont(to, from, ret, "", "", "")
#define __asm_copy_from_user_8x64(to, from, ret) \
asm volatile ( \
" GETL D0Ar2,D1Ar1,[%1++]\n" \
"2: SETL [%0++],D0Ar2,D1Ar1\n" \
"1:\n" \
" .section .fixup,\"ax\"\n" \
" MOV D1Ar1,#0\n" \
" MOV D0Ar2,#0\n" \
"3: ADD %2,%2,#8\n" \
" SETL [%0++],D0Ar2,D1Ar1\n" \
" MOVT D0Ar2,#HI(1b)\n" \
" JUMP D0Ar2,#LO(1b)\n" \
" .previous\n" \
@ -789,36 +711,57 @@ EXPORT_SYMBOL(__copy_user);
*
* Rationale:
* A fault occurs while reading from user buffer, which is the
* source. Since the fault is at a single address, we only
* need to rewind by 8 bytes.
* source.
* Since we don't write to kernel buffer until we read first,
* the kernel buffer is at the right state and needn't be
* corrected.
* corrected, but the source must be rewound to the beginning of
* the block, which is LSM_STEP*8 bytes.
* LSM_STEP is bits 10:8 in TXSTATUS which is already read
* and stored in D0Ar2
*
* NOTE: If a fault occurs at the last operation in M{G,S}ETL
* LSM_STEP will be 0. ie: we do 4 writes in our case, if
* a fault happens at the 4th write, LSM_STEP will be 0
* instead of 4. The code copes with that.
*/
#define __asm_copy_from_user_64bit_rapf_loop(to, from, ret, n, id) \
__asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \
"SUB %1, %1, #8\n")
"LSR D0Ar2, D0Ar2, #5\n" \
"ANDS D0Ar2, D0Ar2, #0x38\n" \
"ADDZ D0Ar2, D0Ar2, #32\n" \
"SUB %1, %1, D0Ar2\n")
/* rewind 'from' pointer when a fault occurs
*
* Rationale:
* A fault occurs while reading from user buffer, which is the
* source. Since the fault is at a single address, we only
* need to rewind by 4 bytes.
* source.
* Since we don't write to kernel buffer until we read first,
* the kernel buffer is at the right state and needn't be
* corrected.
* corrected, but the source must be rewound to the beginning of
* the block, which is LSM_STEP*4 bytes.
* LSM_STEP is bits 10:8 in TXSTATUS which is already read
* and stored in D0Ar2
*
* NOTE: If a fault occurs at the last operation in M{G,S}ETL
* LSM_STEP will be 0. ie: we do 4 writes in our case, if
* a fault happens at the 4th write, LSM_STEP will be 0
* instead of 4. The code copes with that.
*/
#define __asm_copy_from_user_32bit_rapf_loop(to, from, ret, n, id) \
__asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \
"SUB %1, %1, #4\n")
"LSR D0Ar2, D0Ar2, #6\n" \
"ANDS D0Ar2, D0Ar2, #0x1c\n" \
"ADDZ D0Ar2, D0Ar2, #16\n" \
"SUB %1, %1, D0Ar2\n")
/* Copy from user to kernel, zeroing the bytes that were inaccessible in
userland. The return-value is the number of bytes that were
inaccessible. */
unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
unsigned long n)
/*
* Copy from user to kernel. The return-value is the number of bytes that were
* inaccessible.
*/
unsigned long raw_copy_from_user(void *pdst, const void __user *psrc,
unsigned long n)
{
register char *dst asm ("A0.2") = pdst;
register const char __user *src asm ("A1.2") = psrc;
@ -830,6 +773,8 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
if ((unsigned long) src & 1) {
__asm_copy_from_user_1(dst, src, retn);
n--;
if (retn)
return retn + n;
}
if ((unsigned long) dst & 1) {
/* Worst case - byte copy */
@ -837,12 +782,14 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
__asm_copy_from_user_1(dst, src, retn);
n--;
if (retn)
goto copy_exception_bytes;
return retn + n;
}
}
if (((unsigned long) src & 2) && n >= 2) {
__asm_copy_from_user_2(dst, src, retn);
n -= 2;
if (retn)
return retn + n;
}
if ((unsigned long) dst & 2) {
/* Second worst case - word copy */
@ -850,16 +797,10 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
__asm_copy_from_user_2(dst, src, retn);
n -= 2;
if (retn)
goto copy_exception_bytes;
return retn + n;
}
}
/* We only need one check after the unalignment-adjustments,
because if both adjustments were done, either both or
neither reference had an exception. */
if (retn != 0)
goto copy_exception_bytes;
#ifdef USE_RAPF
/* 64 bit copy loop */
if (!(((unsigned long) src | (unsigned long) dst) & 7)) {
@ -872,7 +813,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
__asm_copy_from_user_8x64(dst, src, retn);
n -= 8;
if (retn)
goto copy_exception_bytes;
return retn + n;
}
}
@ -888,7 +829,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
__asm_copy_from_user_8x64(dst, src, retn);
n -= 8;
if (retn)
goto copy_exception_bytes;
return retn + n;
}
}
#endif
@ -898,7 +839,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
n -= 4;
if (retn)
goto copy_exception_bytes;
return retn + n;
}
/* If we get here, there were no memory read faults. */
@ -924,21 +865,8 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
/* If we get here, retn correctly reflects the number of failing
bytes. */
return retn;
copy_exception_bytes:
/* We already have "retn" bytes cleared, and need to clear the
remaining "n" bytes. A non-optimized simple byte-for-byte in-line
memset is preferred here, since this isn't speed-critical code and
we'd rather have this a leaf-function than calling memset. */
{
char *endp;
for (endp = dst + n; dst < endp; dst++)
*dst = 0;
}
return retn + n;
}
EXPORT_SYMBOL(__copy_user_zeroing);
EXPORT_SYMBOL(raw_copy_from_user);
#define __asm_clear_8x64(to, ret) \
asm volatile ( \

View File

@ -1531,7 +1531,7 @@ config CPU_MIPS64_R6
select CPU_SUPPORTS_HIGHMEM
select CPU_SUPPORTS_MSA
select GENERIC_CSUM
select MIPS_O32_FP64_SUPPORT if MIPS32_O32
select MIPS_O32_FP64_SUPPORT if 32BIT || MIPS32_O32
select HAVE_KVM
help
Choose this option to build a kernel for release 6 or later of the

View File

@ -489,7 +489,7 @@ $(generic_defconfigs):
$(Q)$(CONFIG_SHELL) $(srctree)/scripts/kconfig/merge_config.sh \
-m -O $(objtree) $(srctree)/arch/$(ARCH)/configs/generic_defconfig $^ \
$(foreach board,$(BOARDS),$(generic_config_dir)/board-$(board).config)
$(Q)$(MAKE) olddefconfig
$(Q)$(MAKE) -f $(srctree)/Makefile olddefconfig
#
# Prevent generic merge_config rules attempting to merge single fragments
@ -503,8 +503,8 @@ $(generic_config_dir)/%.config: ;
#
.PHONY: sead3_defconfig
sead3_defconfig:
$(Q)$(MAKE) 32r2el_defconfig BOARDS=sead-3
$(Q)$(MAKE) -f $(srctree)/Makefile 32r2el_defconfig BOARDS=sead-3
.PHONY: sead3micro_defconfig
sead3micro_defconfig:
$(Q)$(MAKE) micro32r2el_defconfig BOARDS=sead-3
$(Q)$(MAKE) -f $(srctree)/Makefile micro32r2el_defconfig BOARDS=sead-3

View File

@ -3,3 +3,4 @@
#include <asm/fpu.h>
#include <asm-generic/asm-prototypes.h>
#include <asm/uaccess.h>
#include <asm/ftrace.h>

View File

@ -21,6 +21,7 @@
#include <asm/cpu-features.h>
#include <asm/fpu_emulator.h>
#include <asm/hazards.h>
#include <asm/ptrace.h>
#include <asm/processor.h>
#include <asm/current.h>
#include <asm/msa.h>

View File

@ -18,9 +18,24 @@
#include <irq.h>
#define IRQ_STACK_SIZE THREAD_SIZE
#define IRQ_STACK_START (IRQ_STACK_SIZE - sizeof(unsigned long))
extern void *irq_stack[NR_CPUS];
/*
* The highest address on the IRQ stack contains a dummy frame put down in
* genex.S (handle_int & except_vec_vi_handler) which is structured as follows:
*
* top ------------
* | task sp | <- irq_stack[cpu] + IRQ_STACK_START
* ------------
* | | <- First frame of IRQ context
* ------------
*
* task sp holds a copy of the task stack pointer where the struct pt_regs
* from exception entry can be found.
*/
static inline bool on_irq_stack(int cpu, unsigned long sp)
{
unsigned long low = (unsigned long)irq_stack[cpu];

View File

@ -127,7 +127,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
" andi %[ticket], %[ticket], 0xffff \n"
" bne %[ticket], %[my_ticket], 4f \n"
" subu %[ticket], %[my_ticket], %[ticket] \n"
"2: \n"
"2: .insn \n"
" .subsection 2 \n"
"4: andi %[ticket], %[ticket], 0xffff \n"
" sll %[ticket], 5 \n"
@ -202,7 +202,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
" sc %[ticket], %[ticket_ptr] \n"
" beqz %[ticket], 1b \n"
" li %[ticket], 1 \n"
"2: \n"
"2: .insn \n"
" .subsection 2 \n"
"3: b 2b \n"
" li %[ticket], 0 \n"
@ -382,7 +382,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
" .set reorder \n"
__WEAK_LLSC_MB
" li %2, 1 \n"
"2: \n"
"2: .insn \n"
: "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
: GCC_OFF_SMALL_ASM() (rw->lock)
: "memory");
@ -422,7 +422,7 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
" lui %1, 0x8000 \n"
" sc %1, %0 \n"
" li %2, 1 \n"
"2: \n"
"2: .insn \n"
: "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp),
"=&r" (ret)
: GCC_OFF_SMALL_ASM() (rw->lock)

View File

@ -386,17 +386,18 @@
#define __NR_pkey_mprotect (__NR_Linux + 363)
#define __NR_pkey_alloc (__NR_Linux + 364)
#define __NR_pkey_free (__NR_Linux + 365)
#define __NR_statx (__NR_Linux + 366)
/*
* Offset of the last Linux o32 flavoured syscall
*/
#define __NR_Linux_syscalls 365
#define __NR_Linux_syscalls 366
#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
#define __NR_O32_Linux 4000
#define __NR_O32_Linux_syscalls 365
#define __NR_O32_Linux_syscalls 366
#if _MIPS_SIM == _MIPS_SIM_ABI64
@ -730,16 +731,17 @@
#define __NR_pkey_mprotect (__NR_Linux + 323)
#define __NR_pkey_alloc (__NR_Linux + 324)
#define __NR_pkey_free (__NR_Linux + 325)
#define __NR_statx (__NR_Linux + 326)
/*
* Offset of the last Linux 64-bit flavoured syscall
*/
#define __NR_Linux_syscalls 325
#define __NR_Linux_syscalls 326
#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
#define __NR_64_Linux 5000
#define __NR_64_Linux_syscalls 325
#define __NR_64_Linux_syscalls 326
#if _MIPS_SIM == _MIPS_SIM_NABI32
@ -1077,15 +1079,16 @@
#define __NR_pkey_mprotect (__NR_Linux + 327)
#define __NR_pkey_alloc (__NR_Linux + 328)
#define __NR_pkey_free (__NR_Linux + 329)
#define __NR_statx (__NR_Linux + 330)
/*
* Offset of the last N32 flavoured syscall
*/
#define __NR_Linux_syscalls 329
#define __NR_Linux_syscalls 330
#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
#define __NR_N32_Linux 6000
#define __NR_N32_Linux_syscalls 329
#define __NR_N32_Linux_syscalls 330
#endif /* _UAPI_ASM_UNISTD_H */

View File

@ -102,6 +102,7 @@ void output_thread_info_defines(void)
DEFINE(_THREAD_SIZE, THREAD_SIZE);
DEFINE(_THREAD_MASK, THREAD_MASK);
DEFINE(_IRQ_STACK_SIZE, IRQ_STACK_SIZE);
DEFINE(_IRQ_STACK_START, IRQ_STACK_START);
BLANK();
}

View File

@ -80,7 +80,7 @@ static unsigned int calculate_min_delta(void)
}
/* Sorted insert of 75th percentile into buf2 */
for (k = 0; k < i; ++k) {
for (k = 0; k < i && k < ARRAY_SIZE(buf2); ++k) {
if (buf1[ARRAY_SIZE(buf1) - 1] < buf2[k]) {
l = min_t(unsigned int,
i, ARRAY_SIZE(buf2) - 1);

View File

@ -361,7 +361,7 @@ LEAF(mips_cps_get_bootcfg)
END(mips_cps_get_bootcfg)
LEAF(mips_cps_boot_vpes)
PTR_L ta2, COREBOOTCFG_VPEMASK(a0)
lw ta2, COREBOOTCFG_VPEMASK(a0)
PTR_L ta3, COREBOOTCFG_VPECONFIG(a0)
#if defined(CONFIG_CPU_MIPSR6)

View File

@ -1824,7 +1824,7 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
}
decode_configs(c);
c->options |= MIPS_CPU_TLBINV | MIPS_CPU_LDPTE;
c->options |= MIPS_CPU_FTLB | MIPS_CPU_TLBINV | MIPS_CPU_LDPTE;
c->writecombine = _CACHE_UNCACHED_ACCELERATED;
break;
default:

View File

@ -257,7 +257,7 @@ int arch_check_elf(void *_ehdr, bool has_interpreter, void *_interp_ehdr,
else if ((prog_req.fr1 && prog_req.frdefault) ||
(prog_req.single && !prog_req.frdefault))
/* Make sure 64-bit MIPS III/IV/64R1 will not pick FR1 */
state->overall_fp_mode = ((current_cpu_data.fpu_id & MIPS_FPIR_F64) &&
state->overall_fp_mode = ((raw_current_cpu_data.fpu_id & MIPS_FPIR_F64) &&
cpu_has_mips_r2_r6) ?
FP_FR1 : FP_FR0;
else if (prog_req.fr1)

View File

@ -215,9 +215,11 @@ NESTED(handle_int, PT_SIZE, sp)
beq t0, t1, 2f
/* Switch to IRQ stack */
li t1, _IRQ_STACK_SIZE
li t1, _IRQ_STACK_START
PTR_ADD sp, t0, t1
/* Save task's sp on IRQ stack so that unwinding can follow it */
LONG_S s1, 0(sp)
2:
jal plat_irq_dispatch
@ -325,9 +327,11 @@ NESTED(except_vec_vi_handler, 0, sp)
beq t0, t1, 2f
/* Switch to IRQ stack */
li t1, _IRQ_STACK_SIZE
li t1, _IRQ_STACK_START
PTR_ADD sp, t0, t1
/* Save task's sp on IRQ stack so that unwinding can follow it */
LONG_S s1, 0(sp)
2:
jalr v0
@ -519,7 +523,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
BUILD_HANDLER reserved reserved sti verbose /* others */
.align 5
LEAF(handle_ri_rdhwr_vivt)
LEAF(handle_ri_rdhwr_tlbp)
.set push
.set noat
.set noreorder
@ -538,7 +542,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
.set pop
bltz k1, handle_ri /* slow path */
/* fall thru */
END(handle_ri_rdhwr_vivt)
END(handle_ri_rdhwr_tlbp)
LEAF(handle_ri_rdhwr)
.set push

View File

@ -244,9 +244,6 @@ static int compute_signal(int tt)
void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
{
int reg;
struct thread_info *ti = task_thread_info(p);
unsigned long ksp = (unsigned long)ti + THREAD_SIZE - 32;
struct pt_regs *regs = (struct pt_regs *)ksp - 1;
#if (KGDB_GDB_REG_SIZE == 32)
u32 *ptr = (u32 *)gdb_regs;
#else
@ -254,25 +251,46 @@ void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
#endif
for (reg = 0; reg < 16; reg++)
*(ptr++) = regs->regs[reg];
*(ptr++) = 0;
/* S0 - S7 */
for (reg = 16; reg < 24; reg++)
*(ptr++) = regs->regs[reg];
*(ptr++) = p->thread.reg16;
*(ptr++) = p->thread.reg17;
*(ptr++) = p->thread.reg18;
*(ptr++) = p->thread.reg19;
*(ptr++) = p->thread.reg20;
*(ptr++) = p->thread.reg21;
*(ptr++) = p->thread.reg22;
*(ptr++) = p->thread.reg23;
for (reg = 24; reg < 28; reg++)
*(ptr++) = 0;
/* GP, SP, FP, RA */
for (reg = 28; reg < 32; reg++)
*(ptr++) = regs->regs[reg];
*(ptr++) = (long)p;
*(ptr++) = p->thread.reg29;
*(ptr++) = p->thread.reg30;
*(ptr++) = p->thread.reg31;
*(ptr++) = regs->cp0_status;
*(ptr++) = regs->lo;
*(ptr++) = regs->hi;
*(ptr++) = regs->cp0_badvaddr;
*(ptr++) = regs->cp0_cause;
*(ptr++) = regs->cp0_epc;
*(ptr++) = p->thread.cp0_status;
/* lo, hi */
*(ptr++) = 0;
*(ptr++) = 0;
/*
* BadVAddr, Cause
* Ideally these would come from the last exception frame up the stack
* but that requires unwinding, otherwise we can't know much for sure.
*/
*(ptr++) = 0;
*(ptr++) = 0;
/*
* PC
* use return address (RA), i.e. the moment after return from resume()
*/
*(ptr++) = p->thread.reg31;
}
void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)

View File

@ -1446,6 +1446,11 @@ static int mipsxx_pmu_handle_shared_irq(void)
HANDLE_COUNTER(0)
}
#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
read_unlock(&pmuint_rwlock);
#endif
resume_local_counters();
/*
* Do all the work for the pending perf events. We can do this
* in here because the performance counter interrupt is a regular
@ -1454,10 +1459,6 @@ static int mipsxx_pmu_handle_shared_irq(void)
if (handled == IRQ_HANDLED)
irq_work_run();
#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
read_unlock(&pmuint_rwlock);
#endif
resume_local_counters();
return handled;
}

View File

@ -488,31 +488,52 @@ unsigned long notrace unwind_stack_by_address(unsigned long stack_page,
unsigned long pc,
unsigned long *ra)
{
unsigned long low, high, irq_stack_high;
struct mips_frame_info info;
unsigned long size, ofs;
struct pt_regs *regs;
int leaf;
extern void ret_from_irq(void);
extern void ret_from_exception(void);
if (!stack_page)
return 0;
/*
* If we reached the bottom of interrupt context,
* return saved pc in pt_regs.
* IRQ stacks start at IRQ_STACK_START
* task stacks at THREAD_SIZE - 32
*/
if (pc == (unsigned long)ret_from_irq ||
pc == (unsigned long)ret_from_exception) {
struct pt_regs *regs;
if (*sp >= stack_page &&
*sp + sizeof(*regs) <= stack_page + THREAD_SIZE - 32) {
regs = (struct pt_regs *)*sp;
pc = regs->cp0_epc;
if (!user_mode(regs) && __kernel_text_address(pc)) {
*sp = regs->regs[29];
*ra = regs->regs[31];
return pc;
}
low = stack_page;
if (!preemptible() && on_irq_stack(raw_smp_processor_id(), *sp)) {
high = stack_page + IRQ_STACK_START;
irq_stack_high = high;
} else {
high = stack_page + THREAD_SIZE - 32;
irq_stack_high = 0;
}
/*
* If we reached the top of the interrupt stack, start unwinding
* the interrupted task stack.
*/
if (unlikely(*sp == irq_stack_high)) {
unsigned long task_sp = *(unsigned long *)*sp;
/*
* Check that the pointer saved in the IRQ stack head points to
* something within the stack of the current task
*/
if (!object_is_on_stack((void *)task_sp))
return 0;
/*
* Follow pointer to tasks kernel stack frame where interrupted
* state was saved.
*/
regs = (struct pt_regs *)task_sp;
pc = regs->cp0_epc;
if (!user_mode(regs) && __kernel_text_address(pc)) {
*sp = regs->regs[29];
*ra = regs->regs[31];
return pc;
}
return 0;
}
@ -533,8 +554,7 @@ unsigned long notrace unwind_stack_by_address(unsigned long stack_page,
if (leaf < 0)
return 0;
if (*sp < stack_page ||
*sp + info.frame_size > stack_page + THREAD_SIZE - 32)
if (*sp < low || *sp + info.frame_size > high)
return 0;
if (leaf)

View File

@ -18,7 +18,7 @@
#include <linux/kernel.h>
#include <linux/libfdt.h>
#include <linux/of_fdt.h>
#include <linux/sched.h>
#include <linux/sched/task.h>
#include <linux/start_kernel.h>
#include <linux/string.h>
#include <linux/printk.h>

View File

@ -600,3 +600,4 @@ EXPORT(sys_call_table)
PTR sys_pkey_mprotect
PTR sys_pkey_alloc
PTR sys_pkey_free /* 4365 */
PTR sys_statx

View File

@ -438,4 +438,5 @@ EXPORT(sys_call_table)
PTR sys_pkey_mprotect
PTR sys_pkey_alloc
PTR sys_pkey_free /* 5325 */
PTR sys_statx
.size sys_call_table,.-sys_call_table

View File

@ -433,4 +433,5 @@ EXPORT(sysn32_call_table)
PTR sys_pkey_mprotect
PTR sys_pkey_alloc
PTR sys_pkey_free
PTR sys_statx /* 6330 */
.size sysn32_call_table,.-sysn32_call_table

View File

@ -588,4 +588,5 @@ EXPORT(sys32_call_table)
PTR sys_pkey_mprotect
PTR sys_pkey_alloc
PTR sys_pkey_free /* 4365 */
PTR sys_statx
.size sys32_call_table,.-sys32_call_table

View File

@ -422,13 +422,12 @@ void play_dead(void)
local_irq_disable();
idle_task_exit();
cpu = smp_processor_id();
core = cpu_data[cpu].core;
cpu_death = CPU_DEATH_POWER;
pr_debug("CPU%d going offline\n", cpu);
if (cpu_has_mipsmt || cpu_has_vp) {
core = cpu_data[cpu].core;
/* Look for another online VPE within the core */
for_each_online_cpu(cpu_death_sibling) {
if (cpu_data[cpu_death_sibling].core != core)

View File

@ -83,7 +83,7 @@ extern asmlinkage void handle_dbe(void);
extern asmlinkage void handle_sys(void);
extern asmlinkage void handle_bp(void);
extern asmlinkage void handle_ri(void);
extern asmlinkage void handle_ri_rdhwr_vivt(void);
extern asmlinkage void handle_ri_rdhwr_tlbp(void);
extern asmlinkage void handle_ri_rdhwr(void);
extern asmlinkage void handle_cpu(void);
extern asmlinkage void handle_ov(void);
@ -2408,9 +2408,18 @@ void __init trap_init(void)
set_except_vector(EXCCODE_SYS, handle_sys);
set_except_vector(EXCCODE_BP, handle_bp);
set_except_vector(EXCCODE_RI, rdhwr_noopt ? handle_ri :
(cpu_has_vtag_icache ?
handle_ri_rdhwr_vivt : handle_ri_rdhwr));
if (rdhwr_noopt)
set_except_vector(EXCCODE_RI, handle_ri);
else {
if (cpu_has_vtag_icache)
set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
else if (current_cpu_type() == CPU_LOONGSON3)
set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
else
set_except_vector(EXCCODE_RI, handle_ri_rdhwr);
}
set_except_vector(EXCCODE_CPU, handle_cpu);
set_except_vector(EXCCODE_OV, handle_ov);
set_except_vector(EXCCODE_TR, handle_tr);

View File

@ -467,7 +467,7 @@ void __init ltq_soc_init(void)
if (!np_xbar)
panic("Failed to load xbar nodes from devicetree");
if (of_address_to_resource(np_pmu, 0, &res_xbar))
if (of_address_to_resource(np_xbar, 0, &res_xbar))
panic("Failed to get xbar resources");
if (!request_mem_region(res_xbar.start, resource_size(&res_xbar),
res_xbar.name))

View File

@ -1562,6 +1562,7 @@ static void probe_vcache(void)
vcache_size = c->vcache.sets * c->vcache.ways * c->vcache.linesz;
c->vcache.waybit = 0;
c->vcache.waysize = vcache_size / c->vcache.ways;
pr_info("Unified victim cache %ldkB %s, linesize %d bytes.\n",
vcache_size >> 10, way_string[c->vcache.ways], c->vcache.linesz);
@ -1664,6 +1665,7 @@ static void __init loongson3_sc_init(void)
/* Loongson-3 has 4 cores, 1MB scache for each. scaches are shared */
scache_size *= 4;
c->scache.waybit = 0;
c->scache.waysize = scache_size / c->scache.ways;
pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
if (scache_size)

View File

@ -760,7 +760,8 @@ static void build_huge_update_entries(u32 **p, unsigned int pte,
static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r,
struct uasm_label **l,
unsigned int pte,
unsigned int ptr)
unsigned int ptr,
unsigned int flush)
{
#ifdef CONFIG_SMP
UASM_i_SC(p, pte, 0, ptr);
@ -769,6 +770,22 @@ static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r,
#else
UASM_i_SW(p, pte, 0, ptr);
#endif
if (cpu_has_ftlb && flush) {
BUG_ON(!cpu_has_tlbinv);
UASM_i_MFC0(p, ptr, C0_ENTRYHI);
uasm_i_ori(p, ptr, ptr, MIPS_ENTRYHI_EHINV);
UASM_i_MTC0(p, ptr, C0_ENTRYHI);
build_tlb_write_entry(p, l, r, tlb_indexed);
uasm_i_xori(p, ptr, ptr, MIPS_ENTRYHI_EHINV);
UASM_i_MTC0(p, ptr, C0_ENTRYHI);
build_huge_update_entries(p, pte, ptr);
build_huge_tlb_write_entry(p, l, r, pte, tlb_random, 0);
return;
}
build_huge_update_entries(p, pte, ptr);
build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed, 0);
}
@ -2199,7 +2216,7 @@ static void build_r4000_tlb_load_handler(void)
uasm_l_tlbl_goaround2(&l, p);
}
uasm_i_ori(&p, wr.r1, wr.r1, (_PAGE_ACCESSED | _PAGE_VALID));
build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1);
#endif
uasm_l_nopage_tlbl(&l, p);
@ -2254,7 +2271,7 @@ static void build_r4000_tlb_store_handler(void)
build_tlb_probe_entry(&p);
uasm_i_ori(&p, wr.r1, wr.r1,
_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1);
#endif
uasm_l_nopage_tlbs(&l, p);
@ -2310,7 +2327,7 @@ static void build_r4000_tlb_modify_handler(void)
build_tlb_probe_entry(&p);
uasm_i_ori(&p, wr.r1, wr.r1,
_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 0);
#endif
uasm_l_nopage_tlbm(&l, p);

View File

@ -232,6 +232,17 @@ void __init arch_init_irq(void)
{
int corehi_irq;
/*
* Preallocate the i8259's expected virq's here. Since irqchip_init()
* will probe the irqchips in hierarchial order, i8259 is probed last.
* If anything allocates a virq before the i8259 is probed, it will
* be given one of the i8259's expected range and consequently setup
* of the i8259 will fail.
*/
WARN(irq_alloc_descs(I8259A_IRQ_BASE, I8259A_IRQ_BASE,
16, numa_node_id()) < 0,
"Cannot reserve i8259 virqs at IRQ%d\n", I8259A_IRQ_BASE);
i8259_set_poll(mips_pcibios_iack);
irqchip_init();

View File

@ -190,7 +190,7 @@ void register_pci_controller(struct pci_controller *hose)
}
INIT_LIST_HEAD(&hose->list);
list_add(&hose->list, &controllers);
list_add_tail(&hose->list, &controllers);
/*
* Do not panic here but later - this might happen before console init.

View File

@ -35,7 +35,7 @@ static struct rt2880_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 15, 2) };
static struct rt2880_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) };
static struct rt2880_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) };
static struct rt2880_pmx_func lna_a_func[] = { FUNC("lna a", 0, 32, 3) };
static struct rt2880_pmx_func lna_g_func[] = { FUNC("lna a", 0, 35, 3) };
static struct rt2880_pmx_func lna_g_func[] = { FUNC("lna g", 0, 35, 3) };
static struct rt2880_pmx_func pci_func[] = {
FUNC("pci-dev", 0, 40, 32),
FUNC("pci-host2", 1, 40, 32),
@ -43,7 +43,7 @@ static struct rt2880_pmx_func pci_func[] = {
FUNC("pci-fnc", 3, 40, 32)
};
static struct rt2880_pmx_func ge1_func[] = { FUNC("ge1", 0, 72, 12) };
static struct rt2880_pmx_func ge2_func[] = { FUNC("ge1", 0, 84, 12) };
static struct rt2880_pmx_func ge2_func[] = { FUNC("ge2", 0, 84, 12) };
static struct rt2880_pmx_group rt3883_pinmux_data[] = {
GRP("i2c", i2c_func, 1, RT3883_GPIO_MODE_I2C),

View File

@ -48,6 +48,13 @@ void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
return alloc_bootmem_align(size, align);
}
int __init early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size,
bool nomap)
{
reserve_bootmem(base, size, BOOTMEM_DEFAULT);
return 0;
}
void __init early_init_devtree(void *params)
{
__be32 *dtb = (u32 *)__dtb_start;

View File

@ -201,6 +201,9 @@ void __init setup_arch(char **cmdline_p)
}
#endif /* CONFIG_BLK_DEV_INITRD */
early_init_fdt_reserve_self();
early_init_fdt_scan_reserved_mem();
unflatten_and_copy_device_tree();
setup_cpuinfo();

View File

@ -39,10 +39,10 @@
#define get_user __get_user
#if !defined(CONFIG_64BIT)
#define LDD_USER(ptr) __get_user_asm64(ptr)
#define LDD_USER(val, ptr) __get_user_asm64(val, ptr)
#define STD_USER(x, ptr) __put_user_asm64(x, ptr)
#else
#define LDD_USER(ptr) __get_user_asm("ldd", ptr)
#define LDD_USER(val, ptr) __get_user_asm(val, "ldd", ptr)
#define STD_USER(x, ptr) __put_user_asm("std", x, ptr)
#endif
@ -64,6 +64,15 @@ struct exception_table_entry {
".word (" #fault_addr " - .), (" #except_addr " - .)\n\t" \
".previous\n"
/*
* ASM_EXCEPTIONTABLE_ENTRY_EFAULT() creates a special exception table entry
* (with lowest bit set) for which the fault handler in fixup_exception() will
* load -EFAULT into %r8 for a read or write fault, and zeroes the target
* register in case of a read fault in get_user().
*/
#define ASM_EXCEPTIONTABLE_ENTRY_EFAULT( fault_addr, except_addr )\
ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr + 1)
/*
* The page fault handler stores, in a per-cpu area, the following information
* if a fixup routine is available.
@ -88,92 +97,116 @@ struct exception_data {
" mtsp %0,%%sr2\n\t" \
: : "r"(get_fs()) : )
#define __get_user(x, ptr) \
({ \
register long __gu_err __asm__ ("r8") = 0; \
register long __gu_val __asm__ ("r9") = 0; \
\
load_sr2(); \
switch (sizeof(*(ptr))) { \
case 1: __get_user_asm("ldb", ptr); break; \
case 2: __get_user_asm("ldh", ptr); break; \
case 4: __get_user_asm("ldw", ptr); break; \
case 8: LDD_USER(ptr); break; \
default: BUILD_BUG(); break; \
} \
\
(x) = (__force __typeof__(*(ptr))) __gu_val; \
__gu_err; \
#define __get_user_internal(val, ptr) \
({ \
register long __gu_err __asm__ ("r8") = 0; \
\
switch (sizeof(*(ptr))) { \
case 1: __get_user_asm(val, "ldb", ptr); break; \
case 2: __get_user_asm(val, "ldh", ptr); break; \
case 4: __get_user_asm(val, "ldw", ptr); break; \
case 8: LDD_USER(val, ptr); break; \
default: BUILD_BUG(); \
} \
\
__gu_err; \
})
#define __get_user_asm(ldx, ptr) \
__asm__("\n1:\t" ldx "\t0(%%sr2,%2),%0\n\t" \
ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_1)\
#define __get_user(val, ptr) \
({ \
load_sr2(); \
__get_user_internal(val, ptr); \
})
#define __get_user_asm(val, ldx, ptr) \
{ \
register long __gu_val; \
\
__asm__("1: " ldx " 0(%%sr2,%2),%0\n" \
"9:\n" \
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
: "=r"(__gu_val), "=r"(__gu_err) \
: "r"(ptr), "1"(__gu_err) \
: "r1");
: "r"(ptr), "1"(__gu_err)); \
\
(val) = (__force __typeof__(*(ptr))) __gu_val; \
}
#if !defined(CONFIG_64BIT)
#define __get_user_asm64(ptr) \
__asm__("\n1:\tldw 0(%%sr2,%2),%0" \
"\n2:\tldw 4(%%sr2,%2),%R0\n\t" \
ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_2)\
ASM_EXCEPTIONTABLE_ENTRY(2b, fixup_get_user_skip_1)\
: "=r"(__gu_val), "=r"(__gu_err) \
: "r"(ptr), "1"(__gu_err) \
: "r1");
#define __get_user_asm64(val, ptr) \
{ \
union { \
unsigned long long l; \
__typeof__(*(ptr)) t; \
} __gu_tmp; \
\
__asm__(" copy %%r0,%R0\n" \
"1: ldw 0(%%sr2,%2),%0\n" \
"2: ldw 4(%%sr2,%2),%R0\n" \
"9:\n" \
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
: "=&r"(__gu_tmp.l), "=r"(__gu_err) \
: "r"(ptr), "1"(__gu_err)); \
\
(val) = __gu_tmp.t; \
}
#endif /* !defined(CONFIG_64BIT) */
#define __put_user(x, ptr) \
#define __put_user_internal(x, ptr) \
({ \
register long __pu_err __asm__ ("r8") = 0; \
__typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x); \
\
load_sr2(); \
switch (sizeof(*(ptr))) { \
case 1: __put_user_asm("stb", __x, ptr); break; \
case 2: __put_user_asm("sth", __x, ptr); break; \
case 4: __put_user_asm("stw", __x, ptr); break; \
case 8: STD_USER(__x, ptr); break; \
default: BUILD_BUG(); break; \
} \
case 1: __put_user_asm("stb", __x, ptr); break; \
case 2: __put_user_asm("sth", __x, ptr); break; \
case 4: __put_user_asm("stw", __x, ptr); break; \
case 8: STD_USER(__x, ptr); break; \
default: BUILD_BUG(); \
} \
\
__pu_err; \
})
#define __put_user(x, ptr) \
({ \
load_sr2(); \
__put_user_internal(x, ptr); \
})
/*
* The "__put_user/kernel_asm()" macros tell gcc they read from memory
* instead of writing. This is because they do not write to any memory
* gcc knows about, so there are no aliasing issues. These macros must
* also be aware that "fixup_put_user_skip_[12]" are executed in the
* context of the fault, and any registers used there must be listed
* as clobbers. In this case only "r1" is used by the current routines.
* r8/r9 are already listed as err/val.
* also be aware that fixups are executed in the context of the fault,
* and any registers used there must be listed as clobbers.
* r8 is already listed as err.
*/
#define __put_user_asm(stx, x, ptr) \
__asm__ __volatile__ ( \
"\n1:\t" stx "\t%2,0(%%sr2,%1)\n\t" \
ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_1)\
"1: " stx " %2,0(%%sr2,%1)\n" \
"9:\n" \
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
: "=r"(__pu_err) \
: "r"(ptr), "r"(x), "0"(__pu_err) \
: "r1")
: "r"(ptr), "r"(x), "0"(__pu_err))
#if !defined(CONFIG_64BIT)
#define __put_user_asm64(__val, ptr) do { \
__asm__ __volatile__ ( \
"\n1:\tstw %2,0(%%sr2,%1)" \
"\n2:\tstw %R2,4(%%sr2,%1)\n\t" \
ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_2)\
ASM_EXCEPTIONTABLE_ENTRY(2b, fixup_put_user_skip_1)\
"1: stw %2,0(%%sr2,%1)\n" \
"2: stw %R2,4(%%sr2,%1)\n" \
"9:\n" \
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
: "=r"(__pu_err) \
: "r"(ptr), "r"(__val), "0"(__pu_err) \
: "r1"); \
: "r"(ptr), "r"(__val), "0"(__pu_err)); \
} while (0)
#endif /* !defined(CONFIG_64BIT) */

View File

@ -47,16 +47,6 @@ EXPORT_SYMBOL(__cmpxchg_u64);
EXPORT_SYMBOL(lclear_user);
EXPORT_SYMBOL(lstrnlen_user);
/* Global fixups - defined as int to avoid creation of function pointers */
extern int fixup_get_user_skip_1;
extern int fixup_get_user_skip_2;
extern int fixup_put_user_skip_1;
extern int fixup_put_user_skip_2;
EXPORT_SYMBOL(fixup_get_user_skip_1);
EXPORT_SYMBOL(fixup_get_user_skip_2);
EXPORT_SYMBOL(fixup_put_user_skip_1);
EXPORT_SYMBOL(fixup_put_user_skip_2);
#ifndef CONFIG_64BIT
/* Needed so insmod can set dp value */
extern int $global$;

View File

@ -143,6 +143,8 @@ void machine_power_off(void)
printk(KERN_EMERG "System shut down completed.\n"
"Please power this system off now.");
/* prevent soft lockup/stalled CPU messages for endless loop. */
rcu_sysrq_start();
for (;;);
}

View File

@ -2,7 +2,7 @@
# Makefile for parisc-specific library files
#
lib-y := lusercopy.o bitops.o checksum.o io.o memset.o fixup.o memcpy.o \
lib-y := lusercopy.o bitops.o checksum.o io.o memset.o memcpy.o \
ucmpdi2.o delay.o
obj-y := iomap.o

View File

@ -1,98 +0,0 @@
/*
* Linux/PA-RISC Project (http://www.parisc-linux.org/)
*
* Copyright (C) 2004 Randolph Chung <tausq@debian.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Fixup routines for kernel exception handling.
*/
#include <asm/asm-offsets.h>
#include <asm/assembly.h>
#include <asm/errno.h>
#include <linux/linkage.h>
#ifdef CONFIG_SMP
.macro get_fault_ip t1 t2
loadgp
addil LT%__per_cpu_offset,%r27
LDREG RT%__per_cpu_offset(%r1),\t1
/* t2 = smp_processor_id() */
mfctl 30,\t2
ldw TI_CPU(\t2),\t2
#ifdef CONFIG_64BIT
extrd,u \t2,63,32,\t2
#endif
/* t2 = &__per_cpu_offset[smp_processor_id()]; */
LDREGX \t2(\t1),\t2
addil LT%exception_data,%r27
LDREG RT%exception_data(%r1),\t1
/* t1 = this_cpu_ptr(&exception_data) */
add,l \t1,\t2,\t1
/* %r27 = t1->fault_gp - restore gp */
LDREG EXCDATA_GP(\t1), %r27
/* t1 = t1->fault_ip */
LDREG EXCDATA_IP(\t1), \t1
.endm
#else
.macro get_fault_ip t1 t2
loadgp
/* t1 = this_cpu_ptr(&exception_data) */
addil LT%exception_data,%r27
LDREG RT%exception_data(%r1),\t2
/* %r27 = t2->fault_gp - restore gp */
LDREG EXCDATA_GP(\t2), %r27
/* t1 = t2->fault_ip */
LDREG EXCDATA_IP(\t2), \t1
.endm
#endif
.level LEVEL
.text
.section .fixup, "ax"
/* get_user() fixups, store -EFAULT in r8, and 0 in r9 */
ENTRY_CFI(fixup_get_user_skip_1)
get_fault_ip %r1,%r8
ldo 4(%r1), %r1
ldi -EFAULT, %r8
bv %r0(%r1)
copy %r0, %r9
ENDPROC_CFI(fixup_get_user_skip_1)
ENTRY_CFI(fixup_get_user_skip_2)
get_fault_ip %r1,%r8
ldo 8(%r1), %r1
ldi -EFAULT, %r8
bv %r0(%r1)
copy %r0, %r9
ENDPROC_CFI(fixup_get_user_skip_2)
/* put_user() fixups, store -EFAULT in r8 */
ENTRY_CFI(fixup_put_user_skip_1)
get_fault_ip %r1,%r8
ldo 4(%r1), %r1
bv %r0(%r1)
ldi -EFAULT, %r8
ENDPROC_CFI(fixup_put_user_skip_1)
ENTRY_CFI(fixup_put_user_skip_2)
get_fault_ip %r1,%r8
ldo 8(%r1), %r1
bv %r0(%r1)
ldi -EFAULT, %r8
ENDPROC_CFI(fixup_put_user_skip_2)

View File

@ -5,6 +5,8 @@
* Copyright (C) 2000 Richard Hirst <rhirst with parisc-linux.org>
* Copyright (C) 2001 Matthieu Delahaye <delahaym at esiee.fr>
* Copyright (C) 2003 Randolph Chung <tausq with parisc-linux.org>
* Copyright (C) 2017 Helge Deller <deller@gmx.de>
* Copyright (C) 2017 John David Anglin <dave.anglin@bell.net>
*
*
* This program is free software; you can redistribute it and/or modify
@ -132,4 +134,321 @@ ENDPROC_CFI(lstrnlen_user)
.procend
/*
* unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len)
*
* Inputs:
* - sr1 already contains space of source region
* - sr2 already contains space of destination region
*
* Returns:
* - number of bytes that could not be copied.
* On success, this will be zero.
*
* This code is based on a C-implementation of a copy routine written by
* Randolph Chung, which in turn was derived from the glibc.
*
* Several strategies are tried to try to get the best performance for various
* conditions. In the optimal case, we copy by loops that copy 32- or 16-bytes
* at a time using general registers. Unaligned copies are handled either by
* aligning the destination and then using shift-and-write method, or in a few
* cases by falling back to a byte-at-a-time copy.
*
* Testing with various alignments and buffer sizes shows that this code is
* often >10x faster than a simple byte-at-a-time copy, even for strangely
* aligned operands. It is interesting to note that the glibc version of memcpy
* (written in C) is actually quite fast already. This routine is able to beat
* it by 30-40% for aligned copies because of the loop unrolling, but in some
* cases the glibc version is still slightly faster. This lends more
* credibility that gcc can generate very good code as long as we are careful.
*
* Possible optimizations:
* - add cache prefetching
* - try not to use the post-increment address modifiers; they may create
* additional interlocks. Assumption is that those were only efficient on old
* machines (pre PA8000 processors)
*/
dst = arg0
src = arg1
len = arg2
end = arg3
t1 = r19
t2 = r20
t3 = r21
t4 = r22
srcspc = sr1
dstspc = sr2
t0 = r1
a1 = t1
a2 = t2
a3 = t3
a0 = t4
save_src = ret0
save_dst = ret1
save_len = r31
ENTRY_CFI(pa_memcpy)
.proc
.callinfo NO_CALLS
.entry
/* Last destination address */
add dst,len,end
/* short copy with less than 16 bytes? */
cmpib,COND(>>=),n 15,len,.Lbyte_loop
/* same alignment? */
xor src,dst,t0
extru t0,31,2,t1
cmpib,<>,n 0,t1,.Lunaligned_copy
#ifdef CONFIG_64BIT
/* only do 64-bit copies if we can get aligned. */
extru t0,31,3,t1
cmpib,<>,n 0,t1,.Lalign_loop32
/* loop until we are 64-bit aligned */
.Lalign_loop64:
extru dst,31,3,t1
cmpib,=,n 0,t1,.Lcopy_loop_16_start
20: ldb,ma 1(srcspc,src),t1
21: stb,ma t1,1(dstspc,dst)
b .Lalign_loop64
ldo -1(len),len
ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
.Lcopy_loop_16_start:
ldi 31,t0
.Lcopy_loop_16:
cmpb,COND(>>=),n t0,len,.Lword_loop
10: ldd 0(srcspc,src),t1
11: ldd 8(srcspc,src),t2
ldo 16(src),src
12: std,ma t1,8(dstspc,dst)
13: std,ma t2,8(dstspc,dst)
14: ldd 0(srcspc,src),t1
15: ldd 8(srcspc,src),t2
ldo 16(src),src
16: std,ma t1,8(dstspc,dst)
17: std,ma t2,8(dstspc,dst)
ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
ASM_EXCEPTIONTABLE_ENTRY(11b,.Lcopy16_fault)
ASM_EXCEPTIONTABLE_ENTRY(12b,.Lcopy_done)
ASM_EXCEPTIONTABLE_ENTRY(13b,.Lcopy_done)
ASM_EXCEPTIONTABLE_ENTRY(14b,.Lcopy_done)
ASM_EXCEPTIONTABLE_ENTRY(15b,.Lcopy16_fault)
ASM_EXCEPTIONTABLE_ENTRY(16b,.Lcopy_done)
ASM_EXCEPTIONTABLE_ENTRY(17b,.Lcopy_done)
b .Lcopy_loop_16
ldo -32(len),len
.Lword_loop:
cmpib,COND(>>=),n 3,len,.Lbyte_loop
20: ldw,ma 4(srcspc,src),t1
21: stw,ma t1,4(dstspc,dst)
b .Lword_loop
ldo -4(len),len
ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
#endif /* CONFIG_64BIT */
/* loop until we are 32-bit aligned */
.Lalign_loop32:
extru dst,31,2,t1
cmpib,=,n 0,t1,.Lcopy_loop_8
20: ldb,ma 1(srcspc,src),t1
21: stb,ma t1,1(dstspc,dst)
b .Lalign_loop32
ldo -1(len),len
ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
.Lcopy_loop_8:
cmpib,COND(>>=),n 15,len,.Lbyte_loop
10: ldw 0(srcspc,src),t1
11: ldw 4(srcspc,src),t2
12: stw,ma t1,4(dstspc,dst)
13: stw,ma t2,4(dstspc,dst)
14: ldw 8(srcspc,src),t1
15: ldw 12(srcspc,src),t2
ldo 16(src),src
16: stw,ma t1,4(dstspc,dst)
17: stw,ma t2,4(dstspc,dst)
ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
ASM_EXCEPTIONTABLE_ENTRY(11b,.Lcopy8_fault)
ASM_EXCEPTIONTABLE_ENTRY(12b,.Lcopy_done)
ASM_EXCEPTIONTABLE_ENTRY(13b,.Lcopy_done)
ASM_EXCEPTIONTABLE_ENTRY(14b,.Lcopy_done)
ASM_EXCEPTIONTABLE_ENTRY(15b,.Lcopy8_fault)
ASM_EXCEPTIONTABLE_ENTRY(16b,.Lcopy_done)
ASM_EXCEPTIONTABLE_ENTRY(17b,.Lcopy_done)
b .Lcopy_loop_8
ldo -16(len),len
.Lbyte_loop:
cmpclr,COND(<>) len,%r0,%r0
b,n .Lcopy_done
20: ldb 0(srcspc,src),t1
ldo 1(src),src
21: stb,ma t1,1(dstspc,dst)
b .Lbyte_loop
ldo -1(len),len
ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
.Lcopy_done:
bv %r0(%r2)
sub end,dst,ret0
/* src and dst are not aligned the same way. */
/* need to go the hard way */
.Lunaligned_copy:
/* align until dst is 32bit-word-aligned */
extru dst,31,2,t1
cmpib,=,n 0,t1,.Lcopy_dstaligned
20: ldb 0(srcspc,src),t1
ldo 1(src),src
21: stb,ma t1,1(dstspc,dst)
b .Lunaligned_copy
ldo -1(len),len
ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
.Lcopy_dstaligned:
/* store src, dst and len in safe place */
copy src,save_src
copy dst,save_dst
copy len,save_len
/* len now needs give number of words to copy */
SHRREG len,2,len
/*
* Copy from a not-aligned src to an aligned dst using shifts.
* Handles 4 words per loop.
*/
depw,z src,28,2,t0
subi 32,t0,t0
mtsar t0
extru len,31,2,t0
cmpib,= 2,t0,.Lcase2
/* Make src aligned by rounding it down. */
depi 0,31,2,src
cmpiclr,<> 3,t0,%r0
b,n .Lcase3
cmpiclr,<> 1,t0,%r0
b,n .Lcase1
.Lcase0:
cmpb,COND(=) %r0,len,.Lcda_finish
nop
1: ldw,ma 4(srcspc,src), a3
ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
1: ldw,ma 4(srcspc,src), a0
ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
b,n .Ldo3
.Lcase1:
1: ldw,ma 4(srcspc,src), a2
ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
1: ldw,ma 4(srcspc,src), a3
ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
ldo -1(len),len
cmpb,COND(=),n %r0,len,.Ldo0
.Ldo4:
1: ldw,ma 4(srcspc,src), a0
ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
shrpw a2, a3, %sar, t0
1: stw,ma t0, 4(dstspc,dst)
ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
.Ldo3:
1: ldw,ma 4(srcspc,src), a1
ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
shrpw a3, a0, %sar, t0
1: stw,ma t0, 4(dstspc,dst)
ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
.Ldo2:
1: ldw,ma 4(srcspc,src), a2
ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
shrpw a0, a1, %sar, t0
1: stw,ma t0, 4(dstspc,dst)
ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
.Ldo1:
1: ldw,ma 4(srcspc,src), a3
ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
shrpw a1, a2, %sar, t0
1: stw,ma t0, 4(dstspc,dst)
ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
ldo -4(len),len
cmpb,COND(<>) %r0,len,.Ldo4
nop
.Ldo0:
shrpw a2, a3, %sar, t0
1: stw,ma t0, 4(dstspc,dst)
ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
.Lcda_rdfault:
.Lcda_finish:
/* calculate new src, dst and len and jump to byte-copy loop */
sub dst,save_dst,t0
add save_src,t0,src
b .Lbyte_loop
sub save_len,t0,len
.Lcase3:
1: ldw,ma 4(srcspc,src), a0
ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
1: ldw,ma 4(srcspc,src), a1
ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
b .Ldo2
ldo 1(len),len
.Lcase2:
1: ldw,ma 4(srcspc,src), a1
ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
1: ldw,ma 4(srcspc,src), a2
ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
b .Ldo1
ldo 2(len),len
/* fault exception fixup handlers: */
#ifdef CONFIG_64BIT
.Lcopy16_fault:
b .Lcopy_done
10: std,ma t1,8(dstspc,dst)
ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
#endif
.Lcopy8_fault:
b .Lcopy_done
10: stw,ma t1,4(dstspc,dst)
ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
.exit
ENDPROC_CFI(pa_memcpy)
.procend
.end

View File

@ -2,7 +2,7 @@
* Optimized memory copy routines.
*
* Copyright (C) 2004 Randolph Chung <tausq@debian.org>
* Copyright (C) 2013 Helge Deller <deller@gmx.de>
* Copyright (C) 2013-2017 Helge Deller <deller@gmx.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@ -21,474 +21,21 @@
* Portions derived from the GNU C Library
* Copyright (C) 1991, 1997, 2003 Free Software Foundation, Inc.
*
* Several strategies are tried to try to get the best performance for various
* conditions. In the optimal case, we copy 64-bytes in an unrolled loop using
* fp regs. This is followed by loops that copy 32- or 16-bytes at a time using
* general registers. Unaligned copies are handled either by aligning the
* destination and then using shift-and-write method, or in a few cases by
* falling back to a byte-at-a-time copy.
*
* I chose to implement this in C because it is easier to maintain and debug,
* and in my experiments it appears that the C code generated by gcc (3.3/3.4
* at the time of writing) is fairly optimal. Unfortunately some of the
* semantics of the copy routine (exception handling) is difficult to express
* in C, so we have to play some tricks to get it to work.
*
* All the loads and stores are done via explicit asm() code in order to use
* the right space registers.
*
* Testing with various alignments and buffer sizes shows that this code is
* often >10x faster than a simple byte-at-a-time copy, even for strangely
* aligned operands. It is interesting to note that the glibc version
* of memcpy (written in C) is actually quite fast already. This routine is
* able to beat it by 30-40% for aligned copies because of the loop unrolling,
* but in some cases the glibc version is still slightly faster. This lends
* more credibility that gcc can generate very good code as long as we are
* careful.
*
* TODO:
* - cache prefetching needs more experimentation to get optimal settings
* - try not to use the post-increment address modifiers; they create additional
* interlocks
* - replace byte-copy loops with stybs sequences
*/
#ifdef __KERNEL__
#include <linux/module.h>
#include <linux/compiler.h>
#include <linux/uaccess.h>
#define s_space "%%sr1"
#define d_space "%%sr2"
#else
#include "memcpy.h"
#define s_space "%%sr0"
#define d_space "%%sr0"
#define pa_memcpy new2_copy
#endif
DECLARE_PER_CPU(struct exception_data, exception_data);
#define preserve_branch(label) do { \
volatile int dummy = 0; \
/* The following branch is never taken, it's just here to */ \
/* prevent gcc from optimizing away our exception code. */ \
if (unlikely(dummy != dummy)) \
goto label; \
} while (0)
#define get_user_space() (segment_eq(get_fs(), KERNEL_DS) ? 0 : mfsp(3))
#define get_kernel_space() (0)
#define MERGE(w0, sh_1, w1, sh_2) ({ \
unsigned int _r; \
asm volatile ( \
"mtsar %3\n" \
"shrpw %1, %2, %%sar, %0\n" \
: "=r"(_r) \
: "r"(w0), "r"(w1), "r"(sh_2) \
); \
_r; \
})
#define THRESHOLD 16
#ifdef DEBUG_MEMCPY
#define DPRINTF(fmt, args...) do { printk(KERN_DEBUG "%s:%d:%s ", __FILE__, __LINE__, __func__ ); printk(KERN_DEBUG fmt, ##args ); } while (0)
#else
#define DPRINTF(fmt, args...)
#endif
#define def_load_ai_insn(_insn,_sz,_tt,_s,_a,_t,_e) \
__asm__ __volatile__ ( \
"1:\t" #_insn ",ma " #_sz "(" _s ",%1), %0\n\t" \
ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \
: _tt(_t), "+r"(_a) \
: \
: "r8")
#define def_store_ai_insn(_insn,_sz,_tt,_s,_a,_t,_e) \
__asm__ __volatile__ ( \
"1:\t" #_insn ",ma %1, " #_sz "(" _s ",%0)\n\t" \
ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \
: "+r"(_a) \
: _tt(_t) \
: "r8")
#define ldbma(_s, _a, _t, _e) def_load_ai_insn(ldbs,1,"=r",_s,_a,_t,_e)
#define stbma(_s, _t, _a, _e) def_store_ai_insn(stbs,1,"r",_s,_a,_t,_e)
#define ldwma(_s, _a, _t, _e) def_load_ai_insn(ldw,4,"=r",_s,_a,_t,_e)
#define stwma(_s, _t, _a, _e) def_store_ai_insn(stw,4,"r",_s,_a,_t,_e)
#define flddma(_s, _a, _t, _e) def_load_ai_insn(fldd,8,"=f",_s,_a,_t,_e)
#define fstdma(_s, _t, _a, _e) def_store_ai_insn(fstd,8,"f",_s,_a,_t,_e)
#define def_load_insn(_insn,_tt,_s,_o,_a,_t,_e) \
__asm__ __volatile__ ( \
"1:\t" #_insn " " #_o "(" _s ",%1), %0\n\t" \
ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \
: _tt(_t) \
: "r"(_a) \
: "r8")
#define def_store_insn(_insn,_tt,_s,_t,_o,_a,_e) \
__asm__ __volatile__ ( \
"1:\t" #_insn " %0, " #_o "(" _s ",%1)\n\t" \
ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \
: \
: _tt(_t), "r"(_a) \
: "r8")
#define ldw(_s,_o,_a,_t,_e) def_load_insn(ldw,"=r",_s,_o,_a,_t,_e)
#define stw(_s,_t,_o,_a,_e) def_store_insn(stw,"r",_s,_t,_o,_a,_e)
#ifdef CONFIG_PREFETCH
static inline void prefetch_src(const void *addr)
{
__asm__("ldw 0(" s_space ",%0), %%r0" : : "r" (addr));
}
static inline void prefetch_dst(const void *addr)
{
__asm__("ldd 0(" d_space ",%0), %%r0" : : "r" (addr));
}
#else
#define prefetch_src(addr) do { } while(0)
#define prefetch_dst(addr) do { } while(0)
#endif
#define PA_MEMCPY_OK 0
#define PA_MEMCPY_LOAD_ERROR 1
#define PA_MEMCPY_STORE_ERROR 2
/* Copy from a not-aligned src to an aligned dst, using shifts. Handles 4 words
* per loop. This code is derived from glibc.
*/
static noinline unsigned long copy_dstaligned(unsigned long dst,
unsigned long src, unsigned long len)
{
/* gcc complains that a2 and a3 may be uninitialized, but actually
* they cannot be. Initialize a2/a3 to shut gcc up.
*/
register unsigned int a0, a1, a2 = 0, a3 = 0;
int sh_1, sh_2;
/* prefetch_src((const void *)src); */
/* Calculate how to shift a word read at the memory operation
aligned srcp to make it aligned for copy. */
sh_1 = 8 * (src % sizeof(unsigned int));
sh_2 = 8 * sizeof(unsigned int) - sh_1;
/* Make src aligned by rounding it down. */
src &= -sizeof(unsigned int);
switch (len % 4)
{
case 2:
/* a1 = ((unsigned int *) src)[0];
a2 = ((unsigned int *) src)[1]; */
ldw(s_space, 0, src, a1, cda_ldw_exc);
ldw(s_space, 4, src, a2, cda_ldw_exc);
src -= 1 * sizeof(unsigned int);
dst -= 3 * sizeof(unsigned int);
len += 2;
goto do1;
case 3:
/* a0 = ((unsigned int *) src)[0];
a1 = ((unsigned int *) src)[1]; */
ldw(s_space, 0, src, a0, cda_ldw_exc);
ldw(s_space, 4, src, a1, cda_ldw_exc);
src -= 0 * sizeof(unsigned int);
dst -= 2 * sizeof(unsigned int);
len += 1;
goto do2;
case 0:
if (len == 0)
return PA_MEMCPY_OK;
/* a3 = ((unsigned int *) src)[0];
a0 = ((unsigned int *) src)[1]; */
ldw(s_space, 0, src, a3, cda_ldw_exc);
ldw(s_space, 4, src, a0, cda_ldw_exc);
src -=-1 * sizeof(unsigned int);
dst -= 1 * sizeof(unsigned int);
len += 0;
goto do3;
case 1:
/* a2 = ((unsigned int *) src)[0];
a3 = ((unsigned int *) src)[1]; */
ldw(s_space, 0, src, a2, cda_ldw_exc);
ldw(s_space, 4, src, a3, cda_ldw_exc);
src -=-2 * sizeof(unsigned int);
dst -= 0 * sizeof(unsigned int);
len -= 1;
if (len == 0)
goto do0;
goto do4; /* No-op. */
}
do
{
/* prefetch_src((const void *)(src + 4 * sizeof(unsigned int))); */
do4:
/* a0 = ((unsigned int *) src)[0]; */
ldw(s_space, 0, src, a0, cda_ldw_exc);
/* ((unsigned int *) dst)[0] = MERGE (a2, sh_1, a3, sh_2); */
stw(d_space, MERGE (a2, sh_1, a3, sh_2), 0, dst, cda_stw_exc);
do3:
/* a1 = ((unsigned int *) src)[1]; */
ldw(s_space, 4, src, a1, cda_ldw_exc);
/* ((unsigned int *) dst)[1] = MERGE (a3, sh_1, a0, sh_2); */
stw(d_space, MERGE (a3, sh_1, a0, sh_2), 4, dst, cda_stw_exc);
do2:
/* a2 = ((unsigned int *) src)[2]; */
ldw(s_space, 8, src, a2, cda_ldw_exc);
/* ((unsigned int *) dst)[2] = MERGE (a0, sh_1, a1, sh_2); */
stw(d_space, MERGE (a0, sh_1, a1, sh_2), 8, dst, cda_stw_exc);
do1:
/* a3 = ((unsigned int *) src)[3]; */
ldw(s_space, 12, src, a3, cda_ldw_exc);
/* ((unsigned int *) dst)[3] = MERGE (a1, sh_1, a2, sh_2); */
stw(d_space, MERGE (a1, sh_1, a2, sh_2), 12, dst, cda_stw_exc);
src += 4 * sizeof(unsigned int);
dst += 4 * sizeof(unsigned int);
len -= 4;
}
while (len != 0);
do0:
/* ((unsigned int *) dst)[0] = MERGE (a2, sh_1, a3, sh_2); */
stw(d_space, MERGE (a2, sh_1, a3, sh_2), 0, dst, cda_stw_exc);
preserve_branch(handle_load_error);
preserve_branch(handle_store_error);
return PA_MEMCPY_OK;
handle_load_error:
__asm__ __volatile__ ("cda_ldw_exc:\n");
return PA_MEMCPY_LOAD_ERROR;
handle_store_error:
__asm__ __volatile__ ("cda_stw_exc:\n");
return PA_MEMCPY_STORE_ERROR;
}
/* Returns PA_MEMCPY_OK, PA_MEMCPY_LOAD_ERROR or PA_MEMCPY_STORE_ERROR.
* In case of an access fault the faulty address can be read from the per_cpu
* exception data struct. */
static noinline unsigned long pa_memcpy_internal(void *dstp, const void *srcp,
unsigned long len)
{
register unsigned long src, dst, t1, t2, t3;
register unsigned char *pcs, *pcd;
register unsigned int *pws, *pwd;
register double *pds, *pdd;
unsigned long ret;
src = (unsigned long)srcp;
dst = (unsigned long)dstp;
pcs = (unsigned char *)srcp;
pcd = (unsigned char *)dstp;
/* prefetch_src((const void *)srcp); */
if (len < THRESHOLD)
goto byte_copy;
/* Check alignment */
t1 = (src ^ dst);
if (unlikely(t1 & (sizeof(double)-1)))
goto unaligned_copy;
/* src and dst have same alignment. */
/* Copy bytes till we are double-aligned. */
t2 = src & (sizeof(double) - 1);
if (unlikely(t2 != 0)) {
t2 = sizeof(double) - t2;
while (t2 && len) {
/* *pcd++ = *pcs++; */
ldbma(s_space, pcs, t3, pmc_load_exc);
len--;
stbma(d_space, t3, pcd, pmc_store_exc);
t2--;
}
}
pds = (double *)pcs;
pdd = (double *)pcd;
#if 0
/* Copy 8 doubles at a time */
while (len >= 8*sizeof(double)) {
register double r1, r2, r3, r4, r5, r6, r7, r8;
/* prefetch_src((char *)pds + L1_CACHE_BYTES); */
flddma(s_space, pds, r1, pmc_load_exc);
flddma(s_space, pds, r2, pmc_load_exc);
flddma(s_space, pds, r3, pmc_load_exc);
flddma(s_space, pds, r4, pmc_load_exc);
fstdma(d_space, r1, pdd, pmc_store_exc);
fstdma(d_space, r2, pdd, pmc_store_exc);
fstdma(d_space, r3, pdd, pmc_store_exc);
fstdma(d_space, r4, pdd, pmc_store_exc);
#if 0
if (L1_CACHE_BYTES <= 32)
prefetch_src((char *)pds + L1_CACHE_BYTES);
#endif
flddma(s_space, pds, r5, pmc_load_exc);
flddma(s_space, pds, r6, pmc_load_exc);
flddma(s_space, pds, r7, pmc_load_exc);
flddma(s_space, pds, r8, pmc_load_exc);
fstdma(d_space, r5, pdd, pmc_store_exc);
fstdma(d_space, r6, pdd, pmc_store_exc);
fstdma(d_space, r7, pdd, pmc_store_exc);
fstdma(d_space, r8, pdd, pmc_store_exc);
len -= 8*sizeof(double);
}
#endif
pws = (unsigned int *)pds;
pwd = (unsigned int *)pdd;
word_copy:
while (len >= 8*sizeof(unsigned int)) {
register unsigned int r1,r2,r3,r4,r5,r6,r7,r8;
/* prefetch_src((char *)pws + L1_CACHE_BYTES); */
ldwma(s_space, pws, r1, pmc_load_exc);
ldwma(s_space, pws, r2, pmc_load_exc);
ldwma(s_space, pws, r3, pmc_load_exc);
ldwma(s_space, pws, r4, pmc_load_exc);
stwma(d_space, r1, pwd, pmc_store_exc);
stwma(d_space, r2, pwd, pmc_store_exc);
stwma(d_space, r3, pwd, pmc_store_exc);
stwma(d_space, r4, pwd, pmc_store_exc);
ldwma(s_space, pws, r5, pmc_load_exc);
ldwma(s_space, pws, r6, pmc_load_exc);
ldwma(s_space, pws, r7, pmc_load_exc);
ldwma(s_space, pws, r8, pmc_load_exc);
stwma(d_space, r5, pwd, pmc_store_exc);
stwma(d_space, r6, pwd, pmc_store_exc);
stwma(d_space, r7, pwd, pmc_store_exc);
stwma(d_space, r8, pwd, pmc_store_exc);
len -= 8*sizeof(unsigned int);
}
while (len >= 4*sizeof(unsigned int)) {
register unsigned int r1,r2,r3,r4;
ldwma(s_space, pws, r1, pmc_load_exc);
ldwma(s_space, pws, r2, pmc_load_exc);
ldwma(s_space, pws, r3, pmc_load_exc);
ldwma(s_space, pws, r4, pmc_load_exc);
stwma(d_space, r1, pwd, pmc_store_exc);
stwma(d_space, r2, pwd, pmc_store_exc);
stwma(d_space, r3, pwd, pmc_store_exc);
stwma(d_space, r4, pwd, pmc_store_exc);
len -= 4*sizeof(unsigned int);
}
pcs = (unsigned char *)pws;
pcd = (unsigned char *)pwd;
byte_copy:
while (len) {
/* *pcd++ = *pcs++; */
ldbma(s_space, pcs, t3, pmc_load_exc);
stbma(d_space, t3, pcd, pmc_store_exc);
len--;
}
return PA_MEMCPY_OK;
unaligned_copy:
/* possibly we are aligned on a word, but not on a double... */
if (likely((t1 & (sizeof(unsigned int)-1)) == 0)) {
t2 = src & (sizeof(unsigned int) - 1);
if (unlikely(t2 != 0)) {
t2 = sizeof(unsigned int) - t2;
while (t2) {
/* *pcd++ = *pcs++; */
ldbma(s_space, pcs, t3, pmc_load_exc);
stbma(d_space, t3, pcd, pmc_store_exc);
len--;
t2--;
}
}
pws = (unsigned int *)pcs;
pwd = (unsigned int *)pcd;
goto word_copy;
}
/* Align the destination. */
if (unlikely((dst & (sizeof(unsigned int) - 1)) != 0)) {
t2 = sizeof(unsigned int) - (dst & (sizeof(unsigned int) - 1));
while (t2) {
/* *pcd++ = *pcs++; */
ldbma(s_space, pcs, t3, pmc_load_exc);
stbma(d_space, t3, pcd, pmc_store_exc);
len--;
t2--;
}
dst = (unsigned long)pcd;
src = (unsigned long)pcs;
}
ret = copy_dstaligned(dst, src, len / sizeof(unsigned int));
if (ret)
return ret;
pcs += (len & -sizeof(unsigned int));
pcd += (len & -sizeof(unsigned int));
len %= sizeof(unsigned int);
preserve_branch(handle_load_error);
preserve_branch(handle_store_error);
goto byte_copy;
handle_load_error:
__asm__ __volatile__ ("pmc_load_exc:\n");
return PA_MEMCPY_LOAD_ERROR;
handle_store_error:
__asm__ __volatile__ ("pmc_store_exc:\n");
return PA_MEMCPY_STORE_ERROR;
}
/* Returns 0 for success, otherwise, returns number of bytes not transferred. */
static unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len)
{
unsigned long ret, fault_addr, reference;
struct exception_data *d;
extern unsigned long pa_memcpy(void *dst, const void *src,
unsigned long len);
ret = pa_memcpy_internal(dstp, srcp, len);
if (likely(ret == PA_MEMCPY_OK))
return 0;
/* if a load or store fault occured we can get the faulty addr */
d = this_cpu_ptr(&exception_data);
fault_addr = d->fault_addr;
/* error in load or store? */
if (ret == PA_MEMCPY_LOAD_ERROR)
reference = (unsigned long) srcp;
else
reference = (unsigned long) dstp;
DPRINTF("pa_memcpy: fault type = %lu, len=%lu fault_addr=%lu ref=%lu\n",
ret, len, fault_addr, reference);
if (fault_addr >= reference)
return len - (fault_addr - reference);
else
return len;
}
#ifdef __KERNEL__
unsigned long __copy_to_user(void __user *dst, const void *src,
unsigned long len)
{
@ -537,5 +84,3 @@ long probe_kernel_read(void *dst, const void *src, size_t size)
return __probe_kernel_read(dst, src, size);
}
#endif

View File

@ -150,6 +150,23 @@ int fixup_exception(struct pt_regs *regs)
d->fault_space = regs->isr;
d->fault_addr = regs->ior;
/*
* Fix up get_user() and put_user().
* ASM_EXCEPTIONTABLE_ENTRY_EFAULT() sets the least-significant
* bit in the relative address of the fixup routine to indicate
* that %r8 should be loaded with -EFAULT to report a userspace
* access error.
*/
if (fix->fixup & 1) {
regs->gr[8] = -EFAULT;
/* zero target register for get_user() */
if (parisc_acctyp(0, regs->iir) == VM_READ) {
int treg = regs->iir & 0x1f;
regs->gr[treg] = 0;
}
}
regs->iaoq[0] = (unsigned long)&fix->fixup + fix->fixup;
regs->iaoq[0] &= ~3;
/*

View File

@ -33,10 +33,13 @@ static u32 crc32c_vpmsum(u32 crc, unsigned char const *p, size_t len)
}
if (len & ~VMX_ALIGN_MASK) {
preempt_disable();
pagefault_disable();
enable_kernel_altivec();
crc = __crc32c_vpmsum(crc, p, len & ~VMX_ALIGN_MASK);
disable_kernel_altivec();
pagefault_enable();
preempt_enable();
}
tail = len & VMX_ALIGN_MASK;

View File

@ -236,9 +236,9 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
mtctr reg; \
bctr
#define BRANCH_LINK_TO_FAR(reg, label) \
__LOAD_FAR_HANDLER(reg, label); \
mtctr reg; \
#define BRANCH_LINK_TO_FAR(label) \
__LOAD_FAR_HANDLER(r12, label); \
mtctr r12; \
bctrl
/*
@ -265,7 +265,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
#define BRANCH_TO_COMMON(reg, label) \
b label
#define BRANCH_LINK_TO_FAR(reg, label) \
#define BRANCH_LINK_TO_FAR(label) \
bl label
#define BRANCH_TO_KVM(reg, label) \

View File

@ -807,14 +807,25 @@ int fix_alignment(struct pt_regs *regs)
nb = aligninfo[instr].len;
flags = aligninfo[instr].flags;
/* ldbrx/stdbrx overlap lfs/stfs in the DSISR unfortunately */
if (IS_XFORM(instruction) && ((instruction >> 1) & 0x3ff) == 532) {
nb = 8;
flags = LD+SW;
} else if (IS_XFORM(instruction) &&
((instruction >> 1) & 0x3ff) == 660) {
nb = 8;
flags = ST+SW;
/*
* Handle some cases which give overlaps in the DSISR values.
*/
if (IS_XFORM(instruction)) {
switch (get_xop(instruction)) {
case 532: /* ldbrx */
nb = 8;
flags = LD+SW;
break;
case 660: /* stdbrx */
nb = 8;
flags = ST+SW;
break;
case 20: /* lwarx */
case 84: /* ldarx */
case 116: /* lharx */
case 276: /* lqarx */
return 0; /* not emulated ever */
}
}
/* Byteswap little endian loads and stores */

View File

@ -689,7 +689,7 @@ resume_kernel:
addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
lwz r3,GPR1(r1)
ld r3,GPR1(r1)
subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
mr r4,r1 /* src: current exception frame */
mr r1,r3 /* Reroute the trampoline frame to r1 */
@ -703,8 +703,8 @@ resume_kernel:
addi r6,r6,8
bdnz 2b
/* Do real store operation to complete stwu */
lwz r5,GPR1(r1)
/* Do real store operation to complete stdu */
ld r5,GPR1(r1)
std r8,0(r5)
/* Clear _TIF_EMULATE_STACK_STORE flag */

View File

@ -982,7 +982,7 @@ TRAMP_REAL_BEGIN(hmi_exception_early)
EXCEPTION_PROLOG_COMMON_2(PACA_EXGEN)
EXCEPTION_PROLOG_COMMON_3(0xe60)
addi r3,r1,STACK_FRAME_OVERHEAD
BRANCH_LINK_TO_FAR(r4, hmi_exception_realmode)
BRANCH_LINK_TO_FAR(hmi_exception_realmode) /* Function call ABI */
/* Windup the stack. */
/* Move original HSRR0 and HSRR1 into the respective regs */
ld r9,_MSR(r1)

View File

@ -67,7 +67,7 @@ PPC64_CACHES:
* flush all bytes from start through stop-1 inclusive
*/
_GLOBAL(flush_icache_range)
_GLOBAL_TOC(flush_icache_range)
BEGIN_FTR_SECTION
PURGE_PREFETCHED_INS
blr
@ -120,7 +120,7 @@ EXPORT_SYMBOL(flush_icache_range)
*
* flush all bytes from start to stop-1 inclusive
*/
_GLOBAL(flush_dcache_range)
_GLOBAL_TOC(flush_dcache_range)
/*
* Flush the data cache to memory

View File

@ -236,6 +236,15 @@ static void cpu_ready_for_interrupts(void)
mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
}
/*
* Fixup HFSCR:TM based on CPU features. The bit is set by our
* early asm init because at that point we haven't updated our
* CPU features from firmware and device-tree. Here we have,
* so let's do it.
*/
if (cpu_has_feature(CPU_FTR_HVMODE) && !cpu_has_feature(CPU_FTR_TM_COMP))
mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) & ~HFSCR_TM);
/* Set IR and DR in PACA MSR */
get_paca()->kernel_msr = MSR_KERNEL;
}

View File

@ -1487,6 +1487,10 @@ long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
/* start new resize */
resize = kzalloc(sizeof(*resize), GFP_KERNEL);
if (!resize) {
ret = -ENOMEM;
goto out;
}
resize->order = shift;
resize->kvm = kvm;
INIT_WORK(&resize->work, resize_hpt_prepare_work);

View File

@ -638,6 +638,10 @@ static void native_flush_hash_range(unsigned long number, int local)
unsigned long psize = batch->psize;
int ssize = batch->ssize;
int i;
unsigned int use_local;
use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) &&
mmu_psize_defs[psize].tlbiel && !cxl_ctx_in_use();
local_irq_save(flags);
@ -667,8 +671,7 @@ static void native_flush_hash_range(unsigned long number, int local)
} pte_iterate_hashed_end();
}
if (mmu_has_feature(MMU_FTR_TLBIEL) &&
mmu_psize_defs[psize].tlbiel && local) {
if (use_local) {
asm volatile("ptesync":::"memory");
for (i = 0; i < number; i++) {
vpn = batch->vpn[i];

View File

@ -141,31 +141,34 @@ static void check_ipl_parmblock(void *start, unsigned long size)
unsigned long decompress_kernel(void)
{
unsigned long output_addr;
unsigned char *output;
void *output, *kernel_end;
output_addr = ((unsigned long) &_end + HEAP_SIZE + 4095UL) & -4096UL;
check_ipl_parmblock((void *) 0, output_addr + SZ__bss_start);
memset(&_bss, 0, &_ebss - &_bss);
free_mem_ptr = (unsigned long)&_end;
free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
output = (unsigned char *) output_addr;
output = (void *) ALIGN((unsigned long) &_end + HEAP_SIZE, PAGE_SIZE);
kernel_end = output + SZ__bss_start;
check_ipl_parmblock((void *) 0, (unsigned long) kernel_end);
#ifdef CONFIG_BLK_DEV_INITRD
/*
* Move the initrd right behind the end of the decompressed
* kernel image.
* kernel image. This also prevents initrd corruption caused by
* bss clearing since kernel_end will always be located behind the
* current bss section..
*/
if (INITRD_START && INITRD_SIZE &&
INITRD_START < (unsigned long) output + SZ__bss_start) {
check_ipl_parmblock(output + SZ__bss_start,
INITRD_START + INITRD_SIZE);
memmove(output + SZ__bss_start,
(void *) INITRD_START, INITRD_SIZE);
INITRD_START = (unsigned long) output + SZ__bss_start;
if (INITRD_START && INITRD_SIZE && kernel_end > (void *) INITRD_START) {
check_ipl_parmblock(kernel_end, INITRD_SIZE);
memmove(kernel_end, (void *) INITRD_START, INITRD_SIZE);
INITRD_START = (unsigned long) kernel_end;
}
#endif
/*
* Clear bss section. free_mem_ptr and free_mem_end_ptr need to be
* initialized afterwards since they reside in bss.
*/
memset(&_bss, 0, &_ebss - &_bss);
free_mem_ptr = (unsigned long) &_end;
free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
puts("Uncompressing Linux... ");
__decompress(input_data, input_len, NULL, NULL, output, 0, NULL, error);
puts("Ok, booting the kernel.\n");

View File

@ -1051,6 +1051,8 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
{
if (!MACHINE_HAS_NX)
pte_val(entry) &= ~_PAGE_NOEXEC;
if (pte_present(entry))
pte_val(entry) &= ~_PAGE_UNUSED;
if (mm_has_pgste(mm))
ptep_set_pte_at(mm, addr, ptep, entry);
else

Some files were not shown because too many files have changed in this diff Show More