parisc architecture updates for kernel v5.17-rc1
- Fix lpa and lpa_user defines (John David Anglin) - Fix symbol lookup of init functions with an __is_kernel() fix (Helge Deller) - Fix wrong pdc_toc_pim_11 and pdc_toc_pim_20 definitions (Helge Deller) - Add lws_atomic_xchg and lws_atomic_store syscalls (John David Anglin) - Rewrite light-weight syscall and futex code (John David Anglin) - Enable TOC (transfer of contents) feature unconditionally (Helge Deller) - Improve fault handler messages (John David Anglin) - Improve build process (Masahiro Yamada) - Reduce kernel code footprint of user access functions (Helge Deller) - Fix build error due to outX() macros (Bart Van Assche) - Ue default_groups in kobj_type in pdc_stable (Greg Kroah-Hartman) - Default to 16 CPUs on 32-bit kernel (Helge Deller) -----BEGIN PGP SIGNATURE----- iHUEABYIAB0WIQS86RI+GtKfB8BJu973ErUQojoPXwUCYd1tRgAKCRD3ErUQojoP X+k/AQDqGWQ+EQE15O+t9ZtluQVVRN30qeu3viSfutsj3DitOAEAvdzINTBakJ5N Rm1Y6b3AZ3oCrjjRR0b2TuWvt+Uxew0= =R+Ha -----END PGP SIGNATURE----- Merge tag 'for-5.17/parisc-1' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux Pull parisc architecture updates from Helge Deller: - Fix lpa and lpa_user defines (John David Anglin) - Fix symbol lookup of init functions with an __is_kernel() fix (Helge Deller) - Fix wrong pdc_toc_pim_11 and pdc_toc_pim_20 definitions (Helge Deller) - Add lws_atomic_xchg and lws_atomic_store syscalls (John David Anglin) - Rewrite light-weight syscall and futex code (John David Anglin) - Enable TOC (transfer of contents) feature unconditionally (Helge Deller) - Improve fault handler messages (John David Anglin) - Improve build process (Masahiro Yamada) - Reduce kernel code footprint of user access functions (Helge Deller) - Fix build error due to outX() macros (Bart Van Assche) - Ue default_groups in kobj_type in pdc_stable (Greg Kroah-Hartman) - Default to 16 CPUs on 32-bit kernel (Helge Deller) * tag 'for-5.17/parisc-1' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux: parisc: Default to 16 CPUs on 32-bit kernel sections: Fix __is_kernel() to include init ranges parisc: Re-use toc_stack as hpmc_stack parisc: Enable TOC (transfer of contents) feature unconditionally parisc: io: Improve the outb(), outw() and outl() macros parisc: pdc_stable: use default_groups in kobj_type parisc: Add kgdb io_module to read chars via PDC parisc: Fix pdc_toc_pim_11 and pdc_toc_pim_20 definitions parisc: Add lws_atomic_xchg and lws_atomic_store syscalls parisc: Rewrite light-weight syscall and futex code parisc: Enhance page fault termination message parisc: Don't call faulthandler_disabled() in do_page_fault() parisc: Switch user access functions to signal errors in r29 instead of r8 parisc: Avoid calling faulthandler_disabled() twice parisc: Fix lpa and lpa_user defines parisc: Define depi_safe macro parisc: decompressor: do not copy source files while building
This commit is contained in:
commit
c1eb8f6cff
@ -287,20 +287,6 @@ config SMP
|
|||||||
|
|
||||||
If you don't know what to do here, say N.
|
If you don't know what to do here, say N.
|
||||||
|
|
||||||
config TOC
|
|
||||||
bool "Support TOC switch"
|
|
||||||
default y if 64BIT || !SMP
|
|
||||||
help
|
|
||||||
Most PA-RISC machines have either a switch at the back of the machine
|
|
||||||
or a command in BMC to trigger a TOC interrupt. If you say Y here a
|
|
||||||
handler will be installed which will either show a backtrace on all
|
|
||||||
CPUs, or enter a possible configured debugger like kgdb/kdb.
|
|
||||||
|
|
||||||
Note that with this option enabled, the kernel will use an additional 16KB
|
|
||||||
per possible CPU as a special stack for the TOC handler.
|
|
||||||
|
|
||||||
If you don't want to debug the Kernel, say N.
|
|
||||||
|
|
||||||
config PARISC_CPU_TOPOLOGY
|
config PARISC_CPU_TOPOLOGY
|
||||||
bool "Support cpu topology definition"
|
bool "Support cpu topology definition"
|
||||||
depends on SMP
|
depends on SMP
|
||||||
@ -370,7 +356,8 @@ config NR_CPUS
|
|||||||
int "Maximum number of CPUs (2-32)"
|
int "Maximum number of CPUs (2-32)"
|
||||||
range 2 32
|
range 2 32
|
||||||
depends on SMP
|
depends on SMP
|
||||||
default "4"
|
default "4" if 64BIT
|
||||||
|
default "16"
|
||||||
|
|
||||||
config KEXEC
|
config KEXEC
|
||||||
bool "Kexec system call"
|
bool "Kexec system call"
|
||||||
|
2
arch/parisc/boot/compressed/.gitignore
vendored
2
arch/parisc/boot/compressed/.gitignore
vendored
@ -1,6 +1,4 @@
|
|||||||
# SPDX-License-Identifier: GPL-2.0-only
|
# SPDX-License-Identifier: GPL-2.0-only
|
||||||
firmware.c
|
|
||||||
real2.S
|
|
||||||
sizes.h
|
sizes.h
|
||||||
vmlinux
|
vmlinux
|
||||||
vmlinux.lds
|
vmlinux.lds
|
||||||
|
@ -13,7 +13,6 @@ OBJECTS := head.o real2.o firmware.o misc.o piggy.o
|
|||||||
targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2
|
targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2
|
||||||
targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4
|
targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4
|
||||||
targets += $(OBJECTS) sizes.h
|
targets += $(OBJECTS) sizes.h
|
||||||
targets += real2.S firmware.c
|
|
||||||
|
|
||||||
KBUILD_CFLAGS := -D__KERNEL__ -O2 -DBOOTLOADER
|
KBUILD_CFLAGS := -D__KERNEL__ -O2 -DBOOTLOADER
|
||||||
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
|
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
|
||||||
@ -42,14 +41,7 @@ $(obj)/head.o: $(obj)/sizes.h
|
|||||||
CFLAGS_misc.o += -I$(objtree)/$(obj)
|
CFLAGS_misc.o += -I$(objtree)/$(obj)
|
||||||
$(obj)/misc.o: $(obj)/sizes.h
|
$(obj)/misc.o: $(obj)/sizes.h
|
||||||
|
|
||||||
$(obj)/firmware.o: $(obj)/firmware.c
|
|
||||||
$(obj)/firmware.c: $(srctree)/arch/$(SRCARCH)/kernel/firmware.c
|
|
||||||
$(call cmd,shipped)
|
|
||||||
|
|
||||||
AFLAGS_real2.o += -DBOOTLOADER
|
AFLAGS_real2.o += -DBOOTLOADER
|
||||||
$(obj)/real2.o: $(obj)/real2.S
|
|
||||||
$(obj)/real2.S: $(srctree)/arch/$(SRCARCH)/kernel/real2.S
|
|
||||||
$(call cmd,shipped)
|
|
||||||
|
|
||||||
CPPFLAGS_vmlinux.lds += -I$(objtree)/$(obj) -DBOOTLOADER
|
CPPFLAGS_vmlinux.lds += -I$(objtree)/$(obj) -DBOOTLOADER
|
||||||
$(obj)/vmlinux.lds: $(obj)/sizes.h
|
$(obj)/vmlinux.lds: $(obj)/sizes.h
|
||||||
|
2
arch/parisc/boot/compressed/firmware.c
Normal file
2
arch/parisc/boot/compressed/firmware.c
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
// SPDX-License-Identifier: GPL-2.0-only
|
||||||
|
#include "../../kernel/firmware.c"
|
2
arch/parisc/boot/compressed/real2.S
Normal file
2
arch/parisc/boot/compressed/real2.S
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||||
|
#include "../../kernel/real2.S"
|
@ -158,6 +158,16 @@
|
|||||||
#endif
|
#endif
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
|
/* The depi instruction leaves the most significant 32 bits of the
|
||||||
|
* target register in an undefined state on PA 2.0 systems. */
|
||||||
|
.macro depi_safe i, p, len, t
|
||||||
|
#ifdef CONFIG_64BIT
|
||||||
|
depdi \i, 32+(\p), \len, \t
|
||||||
|
#else
|
||||||
|
depi \i, \p, \len, \t
|
||||||
|
#endif
|
||||||
|
.endm
|
||||||
|
|
||||||
/* load 32-bit 'value' into 'reg' compensating for the ldil
|
/* load 32-bit 'value' into 'reg' compensating for the ldil
|
||||||
* sign-extension when running in wide mode.
|
* sign-extension when running in wide mode.
|
||||||
* WARNING!! neither 'value' nor 'reg' can be expressions
|
* WARNING!! neither 'value' nor 'reg' can be expressions
|
||||||
|
@ -8,39 +8,47 @@
|
|||||||
#include <asm/errno.h>
|
#include <asm/errno.h>
|
||||||
|
|
||||||
/* The following has to match the LWS code in syscall.S. We have
|
/* The following has to match the LWS code in syscall.S. We have
|
||||||
sixteen four-word locks. */
|
* 256 four-word locks. We use bits 20-27 of the futex virtual
|
||||||
|
* address for the hash index.
|
||||||
|
*/
|
||||||
|
|
||||||
|
static inline unsigned long _futex_hash_index(unsigned long ua)
|
||||||
|
{
|
||||||
|
return (ua >> 2) & 0x3fc;
|
||||||
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
_futex_spin_lock(u32 __user *uaddr)
|
_futex_spin_lock_irqsave(arch_spinlock_t *s, unsigned long *flags)
|
||||||
{
|
{
|
||||||
extern u32 lws_lock_start[];
|
local_irq_save(*flags);
|
||||||
long index = ((long)uaddr & 0x7f8) >> 1;
|
|
||||||
arch_spinlock_t *s = (arch_spinlock_t *)&lws_lock_start[index];
|
|
||||||
preempt_disable();
|
|
||||||
arch_spin_lock(s);
|
arch_spin_lock(s);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
_futex_spin_unlock(u32 __user *uaddr)
|
_futex_spin_unlock_irqrestore(arch_spinlock_t *s, unsigned long *flags)
|
||||||
{
|
{
|
||||||
extern u32 lws_lock_start[];
|
|
||||||
long index = ((long)uaddr & 0x7f8) >> 1;
|
|
||||||
arch_spinlock_t *s = (arch_spinlock_t *)&lws_lock_start[index];
|
|
||||||
arch_spin_unlock(s);
|
arch_spin_unlock(s);
|
||||||
preempt_enable();
|
local_irq_restore(*flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
|
arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
|
||||||
{
|
{
|
||||||
|
extern u32 lws_lock_start[];
|
||||||
|
unsigned long ua = (unsigned long)uaddr;
|
||||||
|
arch_spinlock_t *s;
|
||||||
|
unsigned long flags;
|
||||||
int oldval, ret;
|
int oldval, ret;
|
||||||
u32 tmp;
|
u32 tmp;
|
||||||
|
|
||||||
ret = -EFAULT;
|
s = (arch_spinlock_t *)&lws_lock_start[_futex_hash_index(ua)];
|
||||||
|
_futex_spin_lock_irqsave(s, &flags);
|
||||||
|
|
||||||
_futex_spin_lock(uaddr);
|
/* Return -EFAULT if we encounter a page fault or COW break */
|
||||||
if (unlikely(get_user(oldval, uaddr) != 0))
|
if (unlikely(get_user(oldval, uaddr) != 0)) {
|
||||||
|
ret = -EFAULT;
|
||||||
goto out_pagefault_enable;
|
goto out_pagefault_enable;
|
||||||
|
}
|
||||||
|
|
||||||
ret = 0;
|
ret = 0;
|
||||||
tmp = oldval;
|
tmp = oldval;
|
||||||
@ -63,13 +71,14 @@ arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
|
|||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
ret = -ENOSYS;
|
ret = -ENOSYS;
|
||||||
|
goto out_pagefault_enable;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ret == 0 && unlikely(put_user(tmp, uaddr) != 0))
|
if (unlikely(put_user(tmp, uaddr) != 0))
|
||||||
ret = -EFAULT;
|
ret = -EFAULT;
|
||||||
|
|
||||||
out_pagefault_enable:
|
out_pagefault_enable:
|
||||||
_futex_spin_unlock(uaddr);
|
_futex_spin_unlock_irqrestore(s, &flags);
|
||||||
|
|
||||||
if (!ret)
|
if (!ret)
|
||||||
*oval = oldval;
|
*oval = oldval;
|
||||||
@ -81,7 +90,11 @@ static inline int
|
|||||||
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
||||||
u32 oldval, u32 newval)
|
u32 oldval, u32 newval)
|
||||||
{
|
{
|
||||||
|
extern u32 lws_lock_start[];
|
||||||
|
unsigned long ua = (unsigned long)uaddr;
|
||||||
|
arch_spinlock_t *s;
|
||||||
u32 val;
|
u32 val;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
/* futex.c wants to do a cmpxchg_inatomic on kernel NULL, which is
|
/* futex.c wants to do a cmpxchg_inatomic on kernel NULL, which is
|
||||||
* our gateway page, and causes no end of trouble...
|
* our gateway page, and causes no end of trouble...
|
||||||
@ -94,23 +107,25 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
|||||||
|
|
||||||
/* HPPA has no cmpxchg in hardware and therefore the
|
/* HPPA has no cmpxchg in hardware and therefore the
|
||||||
* best we can do here is use an array of locks. The
|
* best we can do here is use an array of locks. The
|
||||||
* lock selected is based on a hash of the userspace
|
* lock selected is based on a hash of the virtual
|
||||||
* address. This should scale to a couple of CPUs.
|
* address of the futex. This should scale to a couple
|
||||||
|
* of CPUs.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
_futex_spin_lock(uaddr);
|
s = (arch_spinlock_t *)&lws_lock_start[_futex_hash_index(ua)];
|
||||||
|
_futex_spin_lock_irqsave(s, &flags);
|
||||||
if (unlikely(get_user(val, uaddr) != 0)) {
|
if (unlikely(get_user(val, uaddr) != 0)) {
|
||||||
_futex_spin_unlock(uaddr);
|
_futex_spin_unlock_irqrestore(s, &flags);
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (val == oldval && unlikely(put_user(newval, uaddr) != 0)) {
|
if (val == oldval && unlikely(put_user(newval, uaddr) != 0)) {
|
||||||
_futex_spin_unlock(uaddr);
|
_futex_spin_unlock_irqrestore(s, &flags);
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
}
|
}
|
||||||
|
|
||||||
*uval = val;
|
*uval = val;
|
||||||
_futex_spin_unlock(uaddr);
|
_futex_spin_unlock_irqrestore(s, &flags);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -273,9 +273,9 @@ static inline int inl(unsigned long addr)
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define outb(x, y) BUG()
|
#define outb(x, y) ({(void)(x); (void)(y); BUG(); 0;})
|
||||||
#define outw(x, y) BUG()
|
#define outw(x, y) ({(void)(x); (void)(y); BUG(); 0;})
|
||||||
#define outl(x, y) BUG()
|
#define outl(x, y) ({(void)(x); (void)(y); BUG(); 0;})
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -5,9 +5,11 @@
|
|||||||
#define lpa(va) ({ \
|
#define lpa(va) ({ \
|
||||||
unsigned long pa; \
|
unsigned long pa; \
|
||||||
__asm__ __volatile__( \
|
__asm__ __volatile__( \
|
||||||
"copy %%r0,%0\n\t" \
|
"copy %%r0,%0\n" \
|
||||||
"lpa %%r0(%1),%0" \
|
"8:\tlpa %%r0(%1),%0\n" \
|
||||||
: "=r" (pa) \
|
"9:\n" \
|
||||||
|
ASM_EXCEPTIONTABLE_ENTRY(8b, 9b) \
|
||||||
|
: "=&r" (pa) \
|
||||||
: "r" (va) \
|
: "r" (va) \
|
||||||
: "memory" \
|
: "memory" \
|
||||||
); \
|
); \
|
||||||
@ -17,9 +19,11 @@
|
|||||||
#define lpa_user(va) ({ \
|
#define lpa_user(va) ({ \
|
||||||
unsigned long pa; \
|
unsigned long pa; \
|
||||||
__asm__ __volatile__( \
|
__asm__ __volatile__( \
|
||||||
"copy %%r0,%0\n\t" \
|
"copy %%r0,%0\n" \
|
||||||
"lpa %%r0(%%sr3,%1),%0" \
|
"8:\tlpa %%r0(%%sr3,%1),%0\n" \
|
||||||
: "=r" (pa) \
|
"9:\n" \
|
||||||
|
ASM_EXCEPTIONTABLE_ENTRY(8b, 9b) \
|
||||||
|
: "=&r" (pa) \
|
||||||
: "r" (va) \
|
: "r" (va) \
|
||||||
: "memory" \
|
: "memory" \
|
||||||
); \
|
); \
|
||||||
|
@ -53,15 +53,18 @@ struct exception_table_entry {
|
|||||||
/*
|
/*
|
||||||
* ASM_EXCEPTIONTABLE_ENTRY_EFAULT() creates a special exception table entry
|
* ASM_EXCEPTIONTABLE_ENTRY_EFAULT() creates a special exception table entry
|
||||||
* (with lowest bit set) for which the fault handler in fixup_exception() will
|
* (with lowest bit set) for which the fault handler in fixup_exception() will
|
||||||
* load -EFAULT into %r8 for a read or write fault, and zeroes the target
|
* load -EFAULT into %r29 for a read or write fault, and zeroes the target
|
||||||
* register in case of a read fault in get_user().
|
* register in case of a read fault in get_user().
|
||||||
*/
|
*/
|
||||||
|
#define ASM_EXCEPTIONTABLE_REG 29
|
||||||
|
#define ASM_EXCEPTIONTABLE_VAR(__variable) \
|
||||||
|
register long __variable __asm__ ("r29") = 0
|
||||||
#define ASM_EXCEPTIONTABLE_ENTRY_EFAULT( fault_addr, except_addr )\
|
#define ASM_EXCEPTIONTABLE_ENTRY_EFAULT( fault_addr, except_addr )\
|
||||||
ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr + 1)
|
ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr + 1)
|
||||||
|
|
||||||
#define __get_user_internal(sr, val, ptr) \
|
#define __get_user_internal(sr, val, ptr) \
|
||||||
({ \
|
({ \
|
||||||
register long __gu_err __asm__ ("r8") = 0; \
|
ASM_EXCEPTIONTABLE_VAR(__gu_err); \
|
||||||
\
|
\
|
||||||
switch (sizeof(*(ptr))) { \
|
switch (sizeof(*(ptr))) { \
|
||||||
case 1: __get_user_asm(sr, val, "ldb", ptr); break; \
|
case 1: __get_user_asm(sr, val, "ldb", ptr); break; \
|
||||||
@ -131,7 +134,7 @@ struct exception_table_entry {
|
|||||||
|
|
||||||
#define __put_user_internal(sr, x, ptr) \
|
#define __put_user_internal(sr, x, ptr) \
|
||||||
({ \
|
({ \
|
||||||
register long __pu_err __asm__ ("r8") = 0; \
|
ASM_EXCEPTIONTABLE_VAR(__pu_err); \
|
||||||
__typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x); \
|
__typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x); \
|
||||||
\
|
\
|
||||||
switch (sizeof(*(ptr))) { \
|
switch (sizeof(*(ptr))) { \
|
||||||
@ -168,7 +171,8 @@ struct exception_table_entry {
|
|||||||
* gcc knows about, so there are no aliasing issues. These macros must
|
* gcc knows about, so there are no aliasing issues. These macros must
|
||||||
* also be aware that fixups are executed in the context of the fault,
|
* also be aware that fixups are executed in the context of the fault,
|
||||||
* and any registers used there must be listed as clobbers.
|
* and any registers used there must be listed as clobbers.
|
||||||
* r8 is already listed as err.
|
* The register holding the possible EFAULT error (ASM_EXCEPTIONTABLE_REG)
|
||||||
|
* is already listed as input and output register.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define __put_user_asm(sr, stx, x, ptr) \
|
#define __put_user_asm(sr, stx, x, ptr) \
|
||||||
|
@ -403,7 +403,7 @@ struct zeropage {
|
|||||||
int vec_pad1[6];
|
int vec_pad1[6];
|
||||||
|
|
||||||
/* [0x040] reserved processor dependent */
|
/* [0x040] reserved processor dependent */
|
||||||
int pad0[112];
|
int pad0[112]; /* in QEMU pad0[0] holds "SeaBIOS\0" */
|
||||||
|
|
||||||
/* [0x200] reserved */
|
/* [0x200] reserved */
|
||||||
int pad1[84];
|
int pad1[84];
|
||||||
@ -691,6 +691,22 @@ struct pdc_hpmc_pim_20 { /* PDC_PIM */
|
|||||||
unsigned long long fr[32];
|
unsigned long long fr[32];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct pim_cpu_state_cf {
|
||||||
|
union {
|
||||||
|
unsigned int
|
||||||
|
iqv : 1, /* IIA queue Valid */
|
||||||
|
iqf : 1, /* IIA queue Failure */
|
||||||
|
ipv : 1, /* IPRs Valid */
|
||||||
|
grv : 1, /* GRs Valid */
|
||||||
|
crv : 1, /* CRs Valid */
|
||||||
|
srv : 1, /* SRs Valid */
|
||||||
|
trv : 1, /* CR24 through CR31 valid */
|
||||||
|
pad : 24, /* reserved */
|
||||||
|
td : 1; /* TOC did not cause any damage to the system state */
|
||||||
|
unsigned int val;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
struct pdc_toc_pim_11 {
|
struct pdc_toc_pim_11 {
|
||||||
unsigned int gr[32];
|
unsigned int gr[32];
|
||||||
unsigned int cr[32];
|
unsigned int cr[32];
|
||||||
@ -698,8 +714,7 @@ struct pdc_toc_pim_11 {
|
|||||||
unsigned int iasq_back;
|
unsigned int iasq_back;
|
||||||
unsigned int iaoq_back;
|
unsigned int iaoq_back;
|
||||||
unsigned int check_type;
|
unsigned int check_type;
|
||||||
unsigned int hversion;
|
struct pim_cpu_state_cf cpu_state;
|
||||||
unsigned int cpu_state;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct pdc_toc_pim_20 {
|
struct pdc_toc_pim_20 {
|
||||||
@ -709,8 +724,7 @@ struct pdc_toc_pim_20 {
|
|||||||
unsigned long long iasq_back;
|
unsigned long long iasq_back;
|
||||||
unsigned long long iaoq_back;
|
unsigned long long iaoq_back;
|
||||||
unsigned int check_type;
|
unsigned int check_type;
|
||||||
unsigned int hversion;
|
struct pim_cpu_state_cf cpu_state;
|
||||||
unsigned int cpu_state;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif /* !defined(__ASSEMBLY__) */
|
#endif /* !defined(__ASSEMBLY__) */
|
||||||
|
@ -10,7 +10,7 @@ obj-y := cache.o pacache.o setup.o pdt.o traps.o time.o irq.o \
|
|||||||
ptrace.o hardware.o inventory.o drivers.o alternative.o \
|
ptrace.o hardware.o inventory.o drivers.o alternative.o \
|
||||||
signal.o hpmc.o real2.o parisc_ksyms.o unaligned.o \
|
signal.o hpmc.o real2.o parisc_ksyms.o unaligned.o \
|
||||||
process.o processor.o pdc_cons.o pdc_chassis.o unwind.o \
|
process.o processor.o pdc_cons.o pdc_chassis.o unwind.o \
|
||||||
patch.o
|
patch.o toc.o toc_asm.o
|
||||||
|
|
||||||
ifdef CONFIG_FUNCTION_TRACER
|
ifdef CONFIG_FUNCTION_TRACER
|
||||||
# Do not profile debug and lowlevel utilities
|
# Do not profile debug and lowlevel utilities
|
||||||
@ -39,4 +39,3 @@ obj-$(CONFIG_KGDB) += kgdb.o
|
|||||||
obj-$(CONFIG_KPROBES) += kprobes.o
|
obj-$(CONFIG_KPROBES) += kprobes.o
|
||||||
obj-$(CONFIG_KEXEC_CORE) += kexec.o relocate_kernel.o
|
obj-$(CONFIG_KEXEC_CORE) += kexec.o relocate_kernel.o
|
||||||
obj-$(CONFIG_KEXEC_FILE) += kexec_file.o
|
obj-$(CONFIG_KEXEC_FILE) += kexec_file.o
|
||||||
obj-$(CONFIG_TOC) += toc.o toc_asm.o
|
|
||||||
|
@ -36,7 +36,11 @@
|
|||||||
int main(void)
|
int main(void)
|
||||||
{
|
{
|
||||||
DEFINE(TASK_TI_FLAGS, offsetof(struct task_struct, thread_info.flags));
|
DEFINE(TASK_TI_FLAGS, offsetof(struct task_struct, thread_info.flags));
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
|
DEFINE(TASK_TI_CPU, offsetof(struct task_struct, thread_info.cpu));
|
||||||
|
#endif
|
||||||
DEFINE(TASK_STACK, offsetof(struct task_struct, stack));
|
DEFINE(TASK_STACK, offsetof(struct task_struct, stack));
|
||||||
|
DEFINE(TASK_PAGEFAULT_DISABLED, offsetof(struct task_struct, pagefault_disabled));
|
||||||
BLANK();
|
BLANK();
|
||||||
DEFINE(TASK_REGS, offsetof(struct task_struct, thread.regs));
|
DEFINE(TASK_REGS, offsetof(struct task_struct, thread.regs));
|
||||||
DEFINE(TASK_PT_PSW, offsetof(struct task_struct, thread.regs.gr[ 0]));
|
DEFINE(TASK_PT_PSW, offsetof(struct task_struct, thread.regs.gr[ 0]));
|
||||||
|
@ -43,10 +43,8 @@
|
|||||||
* IODC requires 7K byte stack. That leaves 1K byte for os_hpmc.
|
* IODC requires 7K byte stack. That leaves 1K byte for os_hpmc.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
__PAGE_ALIGNED_BSS
|
.import toc_stack,data
|
||||||
.align 4096
|
#define hpmc_stack toc_stack /* re-use the TOC stack */
|
||||||
hpmc_stack:
|
|
||||||
.block 16384
|
|
||||||
|
|
||||||
#define HPMC_IODC_BUF_SIZE 0x8000
|
#define HPMC_IODC_BUF_SIZE 0x8000
|
||||||
|
|
||||||
|
@ -3,6 +3,7 @@
|
|||||||
* PA-RISC KGDB support
|
* PA-RISC KGDB support
|
||||||
*
|
*
|
||||||
* Copyright (c) 2019 Sven Schnelle <svens@stackframe.org>
|
* Copyright (c) 2019 Sven Schnelle <svens@stackframe.org>
|
||||||
|
* Copyright (c) 2022 Helge Deller <deller@gmx.de>
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
@ -207,3 +208,23 @@ int kgdb_arch_handle_exception(int trap, int signo,
|
|||||||
}
|
}
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* KGDB console driver which uses PDC to read chars from keyboard */
|
||||||
|
|
||||||
|
static void kgdb_pdc_write_char(u8 chr)
|
||||||
|
{
|
||||||
|
/* no need to print char. kgdb will do it. */
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct kgdb_io kgdb_pdc_io_ops = {
|
||||||
|
.name = "kgdb_pdc",
|
||||||
|
.read_char = pdc_iodc_getc,
|
||||||
|
.write_char = kgdb_pdc_write_char,
|
||||||
|
};
|
||||||
|
|
||||||
|
static int __init kgdb_pdc_init(void)
|
||||||
|
{
|
||||||
|
kgdb_register_io_module(&kgdb_pdc_io_ops);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
early_initcall(kgdb_pdc_init);
|
||||||
|
@ -50,6 +50,22 @@ registers).
|
|||||||
|
|
||||||
.level PA_ASM_LEVEL
|
.level PA_ASM_LEVEL
|
||||||
|
|
||||||
|
.macro lws_pagefault_disable reg1,reg2
|
||||||
|
mfctl %cr30, \reg2
|
||||||
|
ldo TASK_PAGEFAULT_DISABLED(\reg2), \reg2
|
||||||
|
ldw 0(%sr2,\reg2), \reg1
|
||||||
|
ldo 1(\reg1), \reg1
|
||||||
|
stw \reg1, 0(%sr2,\reg2)
|
||||||
|
.endm
|
||||||
|
|
||||||
|
.macro lws_pagefault_enable reg1,reg2
|
||||||
|
mfctl %cr30, \reg2
|
||||||
|
ldo TASK_PAGEFAULT_DISABLED(\reg2), \reg2
|
||||||
|
ldw 0(%sr2,\reg2), \reg1
|
||||||
|
ldo -1(\reg1), \reg1
|
||||||
|
stw \reg1, 0(%sr2,\reg2)
|
||||||
|
.endm
|
||||||
|
|
||||||
.text
|
.text
|
||||||
|
|
||||||
.import syscall_exit,code
|
.import syscall_exit,code
|
||||||
@ -74,7 +90,7 @@ ENTRY(linux_gateway_page)
|
|||||||
/* ADDRESS 0xb0 to 0xb8, lws uses two insns for entry */
|
/* ADDRESS 0xb0 to 0xb8, lws uses two insns for entry */
|
||||||
/* Light-weight-syscall entry must always be located at 0xb0 */
|
/* Light-weight-syscall entry must always be located at 0xb0 */
|
||||||
/* WARNING: Keep this number updated with table size changes */
|
/* WARNING: Keep this number updated with table size changes */
|
||||||
#define __NR_lws_entries (3)
|
#define __NR_lws_entries (5)
|
||||||
|
|
||||||
lws_entry:
|
lws_entry:
|
||||||
gate lws_start, %r0 /* increase privilege */
|
gate lws_start, %r0 /* increase privilege */
|
||||||
@ -490,8 +506,34 @@ lws_start:
|
|||||||
/* Jump to lws, lws table pointers already relocated */
|
/* Jump to lws, lws table pointers already relocated */
|
||||||
be,n 0(%sr2,%r21)
|
be,n 0(%sr2,%r21)
|
||||||
|
|
||||||
|
lws_exit_noerror:
|
||||||
|
lws_pagefault_enable %r1,%r21
|
||||||
|
stw,ma %r20, 0(%sr2,%r20)
|
||||||
|
ssm PSW_SM_I, %r0
|
||||||
|
b lws_exit
|
||||||
|
copy %r0, %r21
|
||||||
|
|
||||||
|
lws_wouldblock:
|
||||||
|
ssm PSW_SM_I, %r0
|
||||||
|
ldo 2(%r0), %r28
|
||||||
|
b lws_exit
|
||||||
|
ldo -EAGAIN(%r0), %r21
|
||||||
|
|
||||||
|
lws_pagefault:
|
||||||
|
lws_pagefault_enable %r1,%r21
|
||||||
|
stw,ma %r20, 0(%sr2,%r20)
|
||||||
|
ssm PSW_SM_I, %r0
|
||||||
|
ldo 3(%r0),%r28
|
||||||
|
b lws_exit
|
||||||
|
ldo -EAGAIN(%r0),%r21
|
||||||
|
|
||||||
|
lws_fault:
|
||||||
|
ldo 1(%r0),%r28
|
||||||
|
b lws_exit
|
||||||
|
ldo -EFAULT(%r0),%r21
|
||||||
|
|
||||||
lws_exit_nosys:
|
lws_exit_nosys:
|
||||||
ldo -ENOSYS(%r0),%r21 /* set errno */
|
ldo -ENOSYS(%r0),%r21
|
||||||
/* Fall through: Return to userspace */
|
/* Fall through: Return to userspace */
|
||||||
|
|
||||||
lws_exit:
|
lws_exit:
|
||||||
@ -518,27 +560,19 @@ lws_exit:
|
|||||||
%r28 - Return prev through this register.
|
%r28 - Return prev through this register.
|
||||||
%r21 - Kernel error code
|
%r21 - Kernel error code
|
||||||
|
|
||||||
If debugging is DISabled:
|
%r21 returns the following error codes:
|
||||||
|
|
||||||
%r21 has the following meanings:
|
|
||||||
|
|
||||||
EAGAIN - CAS is busy, ldcw failed, try again.
|
EAGAIN - CAS is busy, ldcw failed, try again.
|
||||||
EFAULT - Read or write failed.
|
EFAULT - Read or write failed.
|
||||||
|
|
||||||
If debugging is enabled:
|
If EAGAIN is returned, %r28 indicates the busy reason:
|
||||||
|
r28 == 1 - CAS is busy. lock contended.
|
||||||
EDEADLOCK - CAS called recursively.
|
r28 == 2 - CAS is busy. ldcw failed.
|
||||||
EAGAIN && r28 == 1 - CAS is busy. Lock contended.
|
r28 == 3 - CAS is busy. page fault.
|
||||||
EAGAIN && r28 == 2 - CAS is busy. ldcw failed.
|
|
||||||
EFAULT - Read or write failed.
|
|
||||||
|
|
||||||
Scratch: r20, r28, r1
|
Scratch: r20, r28, r1
|
||||||
|
|
||||||
****************************************************/
|
****************************************************/
|
||||||
|
|
||||||
/* Do not enable LWS debugging */
|
|
||||||
#define ENABLE_LWS_DEBUG 0
|
|
||||||
|
|
||||||
/* ELF64 Process entry path */
|
/* ELF64 Process entry path */
|
||||||
lws_compare_and_swap64:
|
lws_compare_and_swap64:
|
||||||
#ifdef CONFIG_64BIT
|
#ifdef CONFIG_64BIT
|
||||||
@ -551,59 +585,45 @@ lws_compare_and_swap64:
|
|||||||
b,n lws_exit_nosys
|
b,n lws_exit_nosys
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* ELF32 Process entry path */
|
/* ELF32/ELF64 Process entry path */
|
||||||
lws_compare_and_swap32:
|
lws_compare_and_swap32:
|
||||||
#ifdef CONFIG_64BIT
|
#ifdef CONFIG_64BIT
|
||||||
/* Clip all the input registers */
|
/* Wide mode user process? */
|
||||||
|
bb,<,n %sp, 31, lws_compare_and_swap
|
||||||
|
|
||||||
|
/* Clip all the input registers for 32-bit processes */
|
||||||
depdi 0, 31, 32, %r26
|
depdi 0, 31, 32, %r26
|
||||||
depdi 0, 31, 32, %r25
|
depdi 0, 31, 32, %r25
|
||||||
depdi 0, 31, 32, %r24
|
depdi 0, 31, 32, %r24
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
lws_compare_and_swap:
|
lws_compare_and_swap:
|
||||||
|
/* Trigger memory reference interruptions without writing to memory */
|
||||||
|
1: ldw 0(%r26), %r28
|
||||||
|
2: stbys,e %r0, 0(%r26)
|
||||||
|
|
||||||
|
/* Calculate 8-bit hash index from virtual address */
|
||||||
|
extru_safe %r26, 27, 8, %r20
|
||||||
|
|
||||||
/* Load start of lock table */
|
/* Load start of lock table */
|
||||||
ldil L%lws_lock_start, %r20
|
ldil L%lws_lock_start, %r28
|
||||||
ldo R%lws_lock_start(%r20), %r28
|
ldo R%lws_lock_start(%r28), %r28
|
||||||
|
|
||||||
/* Extract eight bits from r26 and hash lock (Bits 3-11) */
|
/* Find lock to use, the hash index is one of 0 to
|
||||||
extru_safe %r26, 28, 8, %r20
|
255, multiplied by 16 (keep it 16-byte aligned)
|
||||||
|
|
||||||
/* Find lock to use, the hash is either one of 0 to
|
|
||||||
15, multiplied by 16 (keep it 16-byte aligned)
|
|
||||||
and add to the lock table offset. */
|
and add to the lock table offset. */
|
||||||
shlw %r20, 4, %r20
|
shlw %r20, 4, %r20
|
||||||
add %r20, %r28, %r20
|
add %r20, %r28, %r20
|
||||||
|
|
||||||
# if ENABLE_LWS_DEBUG
|
rsm PSW_SM_I, %r0 /* Disable interrupts */
|
||||||
/*
|
|
||||||
DEBUG, check for deadlock!
|
|
||||||
If the thread register values are the same
|
|
||||||
then we were the one that locked it last and
|
|
||||||
this is a recurisve call that will deadlock.
|
|
||||||
We *must* giveup this call and fail.
|
|
||||||
*/
|
|
||||||
ldw 4(%sr2,%r20), %r28 /* Load thread register */
|
|
||||||
/* WARNING: If cr27 cycles to the same value we have problems */
|
|
||||||
mfctl %cr27, %r21 /* Get current thread register */
|
|
||||||
cmpb,<>,n %r21, %r28, cas_lock /* Called recursive? */
|
|
||||||
b lws_exit /* Return error! */
|
|
||||||
ldo -EDEADLOCK(%r0), %r21
|
|
||||||
cas_lock:
|
|
||||||
cmpb,=,n %r0, %r28, cas_nocontend /* Is nobody using it? */
|
|
||||||
ldo 1(%r0), %r28 /* 1st case */
|
|
||||||
b lws_exit /* Contended... */
|
|
||||||
ldo -EAGAIN(%r0), %r21 /* Spin in userspace */
|
|
||||||
cas_nocontend:
|
|
||||||
# endif
|
|
||||||
/* ENABLE_LWS_DEBUG */
|
|
||||||
|
|
||||||
/* COW breaks can cause contention on UP systems */
|
/* Try to acquire the lock */
|
||||||
LDCW 0(%sr2,%r20), %r28 /* Try to acquire the lock */
|
LDCW 0(%sr2,%r20), %r28
|
||||||
cmpb,<>,n %r0, %r28, cas_action /* Did we get it? */
|
comclr,<> %r0, %r28, %r0
|
||||||
cas_wouldblock:
|
b,n lws_wouldblock
|
||||||
ldo 2(%r0), %r28 /* 2nd case */
|
|
||||||
b lws_exit /* Contended... */
|
/* Disable page faults to prevent sleeping in critical region */
|
||||||
ldo -EAGAIN(%r0), %r21 /* Spin in userspace */
|
lws_pagefault_disable %r21,%r28
|
||||||
|
|
||||||
/*
|
/*
|
||||||
prev = *addr;
|
prev = *addr;
|
||||||
@ -613,59 +633,35 @@ cas_wouldblock:
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
/* NOTES:
|
/* NOTES:
|
||||||
This all works becuse intr_do_signal
|
This all works because intr_do_signal
|
||||||
and schedule both check the return iasq
|
and schedule both check the return iasq
|
||||||
and see that we are on the kernel page
|
and see that we are on the kernel page
|
||||||
so this process is never scheduled off
|
so this process is never scheduled off
|
||||||
or is ever sent any signal of any sort,
|
or is ever sent any signal of any sort,
|
||||||
thus it is wholly atomic from usrspaces
|
thus it is wholly atomic from usrspace's
|
||||||
perspective
|
perspective
|
||||||
*/
|
*/
|
||||||
cas_action:
|
|
||||||
#if defined CONFIG_SMP && ENABLE_LWS_DEBUG
|
|
||||||
/* DEBUG */
|
|
||||||
mfctl %cr27, %r1
|
|
||||||
stw %r1, 4(%sr2,%r20)
|
|
||||||
#endif
|
|
||||||
/* The load and store could fail */
|
/* The load and store could fail */
|
||||||
1: ldw 0(%r26), %r28
|
3: ldw 0(%r26), %r28
|
||||||
sub,<> %r28, %r25, %r0
|
sub,<> %r28, %r25, %r0
|
||||||
2: stw %r24, 0(%r26)
|
4: stw %r24, 0(%r26)
|
||||||
/* Free lock */
|
b,n lws_exit_noerror
|
||||||
stw,ma %r20, 0(%sr2,%r20)
|
|
||||||
#if ENABLE_LWS_DEBUG
|
|
||||||
/* Clear thread register indicator */
|
|
||||||
stw %r0, 4(%sr2,%r20)
|
|
||||||
#endif
|
|
||||||
/* Return to userspace, set no error */
|
|
||||||
b lws_exit
|
|
||||||
copy %r0, %r21
|
|
||||||
|
|
||||||
3:
|
/* A fault occurred on load or stbys,e store */
|
||||||
/* Error occurred on load or store */
|
5: b,n lws_fault
|
||||||
/* Free lock */
|
ASM_EXCEPTIONTABLE_ENTRY(1b-linux_gateway_page, 5b-linux_gateway_page)
|
||||||
stw,ma %r20, 0(%sr2,%r20)
|
ASM_EXCEPTIONTABLE_ENTRY(2b-linux_gateway_page, 5b-linux_gateway_page)
|
||||||
#if ENABLE_LWS_DEBUG
|
|
||||||
stw %r0, 4(%sr2,%r20)
|
|
||||||
#endif
|
|
||||||
b lws_exit
|
|
||||||
ldo -EFAULT(%r0),%r21 /* set errno */
|
|
||||||
nop
|
|
||||||
nop
|
|
||||||
nop
|
|
||||||
nop
|
|
||||||
|
|
||||||
/* Two exception table entries, one for the load,
|
/* A page fault occurred in critical region */
|
||||||
the other for the store. Either return -EFAULT.
|
6: b,n lws_pagefault
|
||||||
Each of the entries must be relocated. */
|
ASM_EXCEPTIONTABLE_ENTRY(3b-linux_gateway_page, 6b-linux_gateway_page)
|
||||||
ASM_EXCEPTIONTABLE_ENTRY(1b-linux_gateway_page, 3b-linux_gateway_page)
|
ASM_EXCEPTIONTABLE_ENTRY(4b-linux_gateway_page, 6b-linux_gateway_page)
|
||||||
ASM_EXCEPTIONTABLE_ENTRY(2b-linux_gateway_page, 3b-linux_gateway_page)
|
|
||||||
|
|
||||||
|
|
||||||
/***************************************************
|
/***************************************************
|
||||||
New CAS implementation which uses pointers and variable size
|
New CAS implementation which uses pointers and variable size
|
||||||
information. The value pointed by old and new MUST NOT change
|
information. The value pointed by old and new MUST NOT change
|
||||||
while performing CAS. The lock only protect the value at %r26.
|
while performing CAS. The lock only protects the value at %r26.
|
||||||
|
|
||||||
%r26 - Address to examine
|
%r26 - Address to examine
|
||||||
%r25 - Pointer to the value to check (old)
|
%r25 - Pointer to the value to check (old)
|
||||||
@ -674,25 +670,32 @@ cas_action:
|
|||||||
%r28 - Return non-zero on failure
|
%r28 - Return non-zero on failure
|
||||||
%r21 - Kernel error code
|
%r21 - Kernel error code
|
||||||
|
|
||||||
%r21 has the following meanings:
|
%r21 returns the following error codes:
|
||||||
|
|
||||||
EAGAIN - CAS is busy, ldcw failed, try again.
|
EAGAIN - CAS is busy, ldcw failed, try again.
|
||||||
EFAULT - Read or write failed.
|
EFAULT - Read or write failed.
|
||||||
|
|
||||||
|
If EAGAIN is returned, %r28 indicates the busy reason:
|
||||||
|
r28 == 1 - CAS is busy. lock contended.
|
||||||
|
r28 == 2 - CAS is busy. ldcw failed.
|
||||||
|
r28 == 3 - CAS is busy. page fault.
|
||||||
|
|
||||||
Scratch: r20, r22, r28, r29, r1, fr4 (32bit for 64bit CAS only)
|
Scratch: r20, r22, r28, r29, r1, fr4 (32bit for 64bit CAS only)
|
||||||
|
|
||||||
****************************************************/
|
****************************************************/
|
||||||
|
|
||||||
/* ELF32 Process entry path */
|
|
||||||
lws_compare_and_swap_2:
|
lws_compare_and_swap_2:
|
||||||
#ifdef CONFIG_64BIT
|
#ifdef CONFIG_64BIT
|
||||||
/* Clip the input registers. We don't need to clip %r23 as we
|
/* Wide mode user process? */
|
||||||
only use it for word operations */
|
bb,<,n %sp, 31, cas2_begin
|
||||||
|
|
||||||
|
/* Clip the input registers for 32-bit processes. We don't
|
||||||
|
need to clip %r23 as we only use it for word operations */
|
||||||
depdi 0, 31, 32, %r26
|
depdi 0, 31, 32, %r26
|
||||||
depdi 0, 31, 32, %r25
|
depdi 0, 31, 32, %r25
|
||||||
depdi 0, 31, 32, %r24
|
depdi 0, 31, 32, %r24
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
cas2_begin:
|
||||||
/* Check the validity of the size pointer */
|
/* Check the validity of the size pointer */
|
||||||
subi,>>= 3, %r23, %r0
|
subi,>>= 3, %r23, %r0
|
||||||
b,n lws_exit_nosys
|
b,n lws_exit_nosys
|
||||||
@ -703,69 +706,77 @@ lws_compare_and_swap_2:
|
|||||||
blr %r29, %r0
|
blr %r29, %r0
|
||||||
nop
|
nop
|
||||||
|
|
||||||
/* 8bit load */
|
/* 8-bit load */
|
||||||
4: ldb 0(%r25), %r25
|
1: ldb 0(%r25), %r25
|
||||||
b cas2_lock_start
|
b cas2_lock_start
|
||||||
5: ldb 0(%r24), %r24
|
2: ldb 0(%r24), %r24
|
||||||
nop
|
nop
|
||||||
nop
|
nop
|
||||||
nop
|
nop
|
||||||
nop
|
nop
|
||||||
nop
|
nop
|
||||||
|
|
||||||
/* 16bit load */
|
/* 16-bit load */
|
||||||
6: ldh 0(%r25), %r25
|
3: ldh 0(%r25), %r25
|
||||||
b cas2_lock_start
|
b cas2_lock_start
|
||||||
7: ldh 0(%r24), %r24
|
4: ldh 0(%r24), %r24
|
||||||
nop
|
nop
|
||||||
nop
|
nop
|
||||||
nop
|
nop
|
||||||
nop
|
nop
|
||||||
nop
|
nop
|
||||||
|
|
||||||
/* 32bit load */
|
/* 32-bit load */
|
||||||
8: ldw 0(%r25), %r25
|
5: ldw 0(%r25), %r25
|
||||||
b cas2_lock_start
|
b cas2_lock_start
|
||||||
9: ldw 0(%r24), %r24
|
6: ldw 0(%r24), %r24
|
||||||
nop
|
nop
|
||||||
nop
|
nop
|
||||||
nop
|
nop
|
||||||
nop
|
nop
|
||||||
nop
|
nop
|
||||||
|
|
||||||
/* 64bit load */
|
/* 64-bit load */
|
||||||
#ifdef CONFIG_64BIT
|
#ifdef CONFIG_64BIT
|
||||||
10: ldd 0(%r25), %r25
|
7: ldd 0(%r25), %r25
|
||||||
11: ldd 0(%r24), %r24
|
8: ldd 0(%r24), %r24
|
||||||
#else
|
#else
|
||||||
/* Load old value into r22/r23 - high/low */
|
/* Load old value into r22/r23 - high/low */
|
||||||
10: ldw 0(%r25), %r22
|
7: ldw 0(%r25), %r22
|
||||||
11: ldw 4(%r25), %r23
|
8: ldw 4(%r25), %r23
|
||||||
/* Load new value into fr4 for atomic store later */
|
/* Load new value into fr4 for atomic store later */
|
||||||
12: flddx 0(%r24), %fr4
|
9: flddx 0(%r24), %fr4
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
cas2_lock_start:
|
cas2_lock_start:
|
||||||
|
/* Trigger memory reference interruptions without writing to memory */
|
||||||
|
copy %r26, %r28
|
||||||
|
depi_safe 0, 31, 2, %r28
|
||||||
|
10: ldw 0(%r28), %r1
|
||||||
|
11: stbys,e %r0, 0(%r28)
|
||||||
|
|
||||||
|
/* Calculate 8-bit hash index from virtual address */
|
||||||
|
extru_safe %r26, 27, 8, %r20
|
||||||
|
|
||||||
/* Load start of lock table */
|
/* Load start of lock table */
|
||||||
ldil L%lws_lock_start, %r20
|
ldil L%lws_lock_start, %r28
|
||||||
ldo R%lws_lock_start(%r20), %r28
|
ldo R%lws_lock_start(%r28), %r28
|
||||||
|
|
||||||
/* Extract eight bits from r26 and hash lock (Bits 3-11) */
|
/* Find lock to use, the hash index is one of 0 to
|
||||||
extru_safe %r26, 28, 8, %r20
|
255, multiplied by 16 (keep it 16-byte aligned)
|
||||||
|
|
||||||
/* Find lock to use, the hash is either one of 0 to
|
|
||||||
15, multiplied by 16 (keep it 16-byte aligned)
|
|
||||||
and add to the lock table offset. */
|
and add to the lock table offset. */
|
||||||
shlw %r20, 4, %r20
|
shlw %r20, 4, %r20
|
||||||
add %r20, %r28, %r20
|
add %r20, %r28, %r20
|
||||||
|
|
||||||
/* COW breaks can cause contention on UP systems */
|
rsm PSW_SM_I, %r0 /* Disable interrupts */
|
||||||
LDCW 0(%sr2,%r20), %r28 /* Try to acquire the lock */
|
|
||||||
cmpb,<>,n %r0, %r28, cas2_action /* Did we get it? */
|
/* Try to acquire the lock */
|
||||||
cas2_wouldblock:
|
LDCW 0(%sr2,%r20), %r28
|
||||||
ldo 2(%r0), %r28 /* 2nd case */
|
comclr,<> %r0, %r28, %r0
|
||||||
b lws_exit /* Contended... */
|
b,n lws_wouldblock
|
||||||
ldo -EAGAIN(%r0), %r21 /* Spin in userspace */
|
|
||||||
|
/* Disable page faults to prevent sleeping in critical region */
|
||||||
|
lws_pagefault_disable %r21,%r28
|
||||||
|
|
||||||
/*
|
/*
|
||||||
prev = *addr;
|
prev = *addr;
|
||||||
@ -775,112 +786,493 @@ cas2_wouldblock:
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
/* NOTES:
|
/* NOTES:
|
||||||
This all works becuse intr_do_signal
|
This all works because intr_do_signal
|
||||||
and schedule both check the return iasq
|
and schedule both check the return iasq
|
||||||
and see that we are on the kernel page
|
and see that we are on the kernel page
|
||||||
so this process is never scheduled off
|
so this process is never scheduled off
|
||||||
or is ever sent any signal of any sort,
|
or is ever sent any signal of any sort,
|
||||||
thus it is wholly atomic from usrspaces
|
thus it is wholly atomic from usrspace's
|
||||||
perspective
|
perspective
|
||||||
*/
|
*/
|
||||||
cas2_action:
|
|
||||||
/* Jump to the correct function */
|
/* Jump to the correct function */
|
||||||
blr %r29, %r0
|
blr %r29, %r0
|
||||||
/* Set %r28 as non-zero for now */
|
/* Set %r28 as non-zero for now */
|
||||||
ldo 1(%r0),%r28
|
ldo 1(%r0),%r28
|
||||||
|
|
||||||
/* 8bit CAS */
|
/* 8-bit CAS */
|
||||||
13: ldb 0(%r26), %r29
|
12: ldb 0(%r26), %r29
|
||||||
sub,= %r29, %r25, %r0
|
sub,= %r29, %r25, %r0
|
||||||
b,n cas2_end
|
b,n lws_exit_noerror
|
||||||
14: stb %r24, 0(%r26)
|
13: stb %r24, 0(%r26)
|
||||||
b cas2_end
|
b lws_exit_noerror
|
||||||
copy %r0, %r28
|
copy %r0, %r28
|
||||||
nop
|
nop
|
||||||
nop
|
nop
|
||||||
|
|
||||||
/* 16bit CAS */
|
/* 16-bit CAS */
|
||||||
15: ldh 0(%r26), %r29
|
14: ldh 0(%r26), %r29
|
||||||
sub,= %r29, %r25, %r0
|
sub,= %r29, %r25, %r0
|
||||||
b,n cas2_end
|
b,n lws_exit_noerror
|
||||||
16: sth %r24, 0(%r26)
|
15: sth %r24, 0(%r26)
|
||||||
b cas2_end
|
b lws_exit_noerror
|
||||||
copy %r0, %r28
|
copy %r0, %r28
|
||||||
nop
|
nop
|
||||||
nop
|
nop
|
||||||
|
|
||||||
/* 32bit CAS */
|
/* 32-bit CAS */
|
||||||
17: ldw 0(%r26), %r29
|
16: ldw 0(%r26), %r29
|
||||||
sub,= %r29, %r25, %r0
|
sub,= %r29, %r25, %r0
|
||||||
b,n cas2_end
|
b,n lws_exit_noerror
|
||||||
18: stw %r24, 0(%r26)
|
17: stw %r24, 0(%r26)
|
||||||
b cas2_end
|
b lws_exit_noerror
|
||||||
copy %r0, %r28
|
copy %r0, %r28
|
||||||
nop
|
nop
|
||||||
nop
|
nop
|
||||||
|
|
||||||
/* 64bit CAS */
|
/* 64-bit CAS */
|
||||||
#ifdef CONFIG_64BIT
|
#ifdef CONFIG_64BIT
|
||||||
19: ldd 0(%r26), %r29
|
18: ldd 0(%r26), %r29
|
||||||
sub,*= %r29, %r25, %r0
|
sub,*= %r29, %r25, %r0
|
||||||
b,n cas2_end
|
b,n lws_exit_noerror
|
||||||
20: std %r24, 0(%r26)
|
19: std %r24, 0(%r26)
|
||||||
copy %r0, %r28
|
copy %r0, %r28
|
||||||
#else
|
#else
|
||||||
/* Compare first word */
|
/* Compare first word */
|
||||||
19: ldw 0(%r26), %r29
|
18: ldw 0(%r26), %r29
|
||||||
sub,= %r29, %r22, %r0
|
sub,= %r29, %r22, %r0
|
||||||
b,n cas2_end
|
b,n lws_exit_noerror
|
||||||
/* Compare second word */
|
/* Compare second word */
|
||||||
20: ldw 4(%r26), %r29
|
19: ldw 4(%r26), %r29
|
||||||
sub,= %r29, %r23, %r0
|
sub,= %r29, %r23, %r0
|
||||||
b,n cas2_end
|
b,n lws_exit_noerror
|
||||||
/* Perform the store */
|
/* Perform the store */
|
||||||
21: fstdx %fr4, 0(%r26)
|
20: fstdx %fr4, 0(%r26)
|
||||||
copy %r0, %r28
|
copy %r0, %r28
|
||||||
#endif
|
#endif
|
||||||
|
b lws_exit_noerror
|
||||||
|
copy %r0, %r28
|
||||||
|
|
||||||
cas2_end:
|
/* A fault occurred on load or stbys,e store */
|
||||||
/* Free lock */
|
30: b,n lws_fault
|
||||||
stw,ma %r20, 0(%sr2,%r20)
|
ASM_EXCEPTIONTABLE_ENTRY(1b-linux_gateway_page, 30b-linux_gateway_page)
|
||||||
/* Return to userspace, set no error */
|
ASM_EXCEPTIONTABLE_ENTRY(2b-linux_gateway_page, 30b-linux_gateway_page)
|
||||||
b lws_exit
|
ASM_EXCEPTIONTABLE_ENTRY(3b-linux_gateway_page, 30b-linux_gateway_page)
|
||||||
copy %r0, %r21
|
ASM_EXCEPTIONTABLE_ENTRY(4b-linux_gateway_page, 30b-linux_gateway_page)
|
||||||
|
ASM_EXCEPTIONTABLE_ENTRY(5b-linux_gateway_page, 30b-linux_gateway_page)
|
||||||
22:
|
ASM_EXCEPTIONTABLE_ENTRY(6b-linux_gateway_page, 30b-linux_gateway_page)
|
||||||
/* Error occurred on load or store */
|
ASM_EXCEPTIONTABLE_ENTRY(7b-linux_gateway_page, 30b-linux_gateway_page)
|
||||||
/* Free lock */
|
ASM_EXCEPTIONTABLE_ENTRY(8b-linux_gateway_page, 30b-linux_gateway_page)
|
||||||
stw,ma %r20, 0(%sr2,%r20)
|
|
||||||
ldo 1(%r0),%r28
|
|
||||||
b lws_exit
|
|
||||||
ldo -EFAULT(%r0),%r21 /* set errno */
|
|
||||||
nop
|
|
||||||
nop
|
|
||||||
nop
|
|
||||||
|
|
||||||
/* Exception table entries, for the load and store, return EFAULT.
|
|
||||||
Each of the entries must be relocated. */
|
|
||||||
ASM_EXCEPTIONTABLE_ENTRY(4b-linux_gateway_page, 22b-linux_gateway_page)
|
|
||||||
ASM_EXCEPTIONTABLE_ENTRY(5b-linux_gateway_page, 22b-linux_gateway_page)
|
|
||||||
ASM_EXCEPTIONTABLE_ENTRY(6b-linux_gateway_page, 22b-linux_gateway_page)
|
|
||||||
ASM_EXCEPTIONTABLE_ENTRY(7b-linux_gateway_page, 22b-linux_gateway_page)
|
|
||||||
ASM_EXCEPTIONTABLE_ENTRY(8b-linux_gateway_page, 22b-linux_gateway_page)
|
|
||||||
ASM_EXCEPTIONTABLE_ENTRY(9b-linux_gateway_page, 22b-linux_gateway_page)
|
|
||||||
ASM_EXCEPTIONTABLE_ENTRY(10b-linux_gateway_page, 22b-linux_gateway_page)
|
|
||||||
ASM_EXCEPTIONTABLE_ENTRY(11b-linux_gateway_page, 22b-linux_gateway_page)
|
|
||||||
ASM_EXCEPTIONTABLE_ENTRY(13b-linux_gateway_page, 22b-linux_gateway_page)
|
|
||||||
ASM_EXCEPTIONTABLE_ENTRY(14b-linux_gateway_page, 22b-linux_gateway_page)
|
|
||||||
ASM_EXCEPTIONTABLE_ENTRY(15b-linux_gateway_page, 22b-linux_gateway_page)
|
|
||||||
ASM_EXCEPTIONTABLE_ENTRY(16b-linux_gateway_page, 22b-linux_gateway_page)
|
|
||||||
ASM_EXCEPTIONTABLE_ENTRY(17b-linux_gateway_page, 22b-linux_gateway_page)
|
|
||||||
ASM_EXCEPTIONTABLE_ENTRY(18b-linux_gateway_page, 22b-linux_gateway_page)
|
|
||||||
ASM_EXCEPTIONTABLE_ENTRY(19b-linux_gateway_page, 22b-linux_gateway_page)
|
|
||||||
ASM_EXCEPTIONTABLE_ENTRY(20b-linux_gateway_page, 22b-linux_gateway_page)
|
|
||||||
#ifndef CONFIG_64BIT
|
#ifndef CONFIG_64BIT
|
||||||
ASM_EXCEPTIONTABLE_ENTRY(12b-linux_gateway_page, 22b-linux_gateway_page)
|
ASM_EXCEPTIONTABLE_ENTRY(9b-linux_gateway_page, 30b-linux_gateway_page)
|
||||||
ASM_EXCEPTIONTABLE_ENTRY(21b-linux_gateway_page, 22b-linux_gateway_page)
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
ASM_EXCEPTIONTABLE_ENTRY(10b-linux_gateway_page, 30b-linux_gateway_page)
|
||||||
|
ASM_EXCEPTIONTABLE_ENTRY(11b-linux_gateway_page, 30b-linux_gateway_page)
|
||||||
|
|
||||||
|
/* A page fault occurred in critical region */
|
||||||
|
31: b,n lws_pagefault
|
||||||
|
ASM_EXCEPTIONTABLE_ENTRY(12b-linux_gateway_page, 31b-linux_gateway_page)
|
||||||
|
ASM_EXCEPTIONTABLE_ENTRY(13b-linux_gateway_page, 31b-linux_gateway_page)
|
||||||
|
ASM_EXCEPTIONTABLE_ENTRY(14b-linux_gateway_page, 31b-linux_gateway_page)
|
||||||
|
ASM_EXCEPTIONTABLE_ENTRY(15b-linux_gateway_page, 31b-linux_gateway_page)
|
||||||
|
ASM_EXCEPTIONTABLE_ENTRY(16b-linux_gateway_page, 31b-linux_gateway_page)
|
||||||
|
ASM_EXCEPTIONTABLE_ENTRY(17b-linux_gateway_page, 31b-linux_gateway_page)
|
||||||
|
ASM_EXCEPTIONTABLE_ENTRY(18b-linux_gateway_page, 31b-linux_gateway_page)
|
||||||
|
ASM_EXCEPTIONTABLE_ENTRY(19b-linux_gateway_page, 31b-linux_gateway_page)
|
||||||
|
#ifndef CONFIG_64BIT
|
||||||
|
ASM_EXCEPTIONTABLE_ENTRY(20b-linux_gateway_page, 31b-linux_gateway_page)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
/***************************************************
|
||||||
|
LWS atomic exchange.
|
||||||
|
|
||||||
|
%r26 - Exchange address
|
||||||
|
%r25 - Size of the variable (0/1/2/3 for 8/16/32/64 bit)
|
||||||
|
%r24 - Address of new value
|
||||||
|
%r23 - Address of old value
|
||||||
|
%r28 - Return non-zero on failure
|
||||||
|
%r21 - Kernel error code
|
||||||
|
|
||||||
|
%r21 returns the following error codes:
|
||||||
|
EAGAIN - CAS is busy, ldcw failed, try again.
|
||||||
|
EFAULT - Read or write failed.
|
||||||
|
|
||||||
|
If EAGAIN is returned, %r28 indicates the busy reason:
|
||||||
|
r28 == 1 - CAS is busy. lock contended.
|
||||||
|
r28 == 2 - CAS is busy. ldcw failed.
|
||||||
|
r28 == 3 - CAS is busy. page fault.
|
||||||
|
|
||||||
|
Scratch: r20, r1
|
||||||
|
|
||||||
|
****************************************************/
|
||||||
|
|
||||||
|
lws_atomic_xchg:
|
||||||
|
#ifdef CONFIG_64BIT
|
||||||
|
/* Wide mode user process? */
|
||||||
|
bb,<,n %sp, 31, atomic_xchg_begin
|
||||||
|
|
||||||
|
/* Clip the input registers for 32-bit processes. We don't
|
||||||
|
need to clip %r23 as we only use it for word operations */
|
||||||
|
depdi 0, 31, 32, %r26
|
||||||
|
depdi 0, 31, 32, %r25
|
||||||
|
depdi 0, 31, 32, %r24
|
||||||
|
depdi 0, 31, 32, %r23
|
||||||
|
#endif
|
||||||
|
|
||||||
|
atomic_xchg_begin:
|
||||||
|
/* Check the validity of the size pointer */
|
||||||
|
subi,>>= 3, %r25, %r0
|
||||||
|
b,n lws_exit_nosys
|
||||||
|
|
||||||
|
/* Jump to the functions which will load the old and new values into
|
||||||
|
registers depending on the their size */
|
||||||
|
shlw %r25, 2, %r1
|
||||||
|
blr %r1, %r0
|
||||||
|
nop
|
||||||
|
|
||||||
|
/* Perform exception checks */
|
||||||
|
|
||||||
|
/* 8-bit exchange */
|
||||||
|
1: ldb 0(%r24), %r20
|
||||||
|
copy %r23, %r20
|
||||||
|
depi_safe 0, 31, 2, %r20
|
||||||
|
b atomic_xchg_start
|
||||||
|
2: stbys,e %r0, 0(%r20)
|
||||||
|
nop
|
||||||
|
nop
|
||||||
|
nop
|
||||||
|
|
||||||
|
/* 16-bit exchange */
|
||||||
|
3: ldh 0(%r24), %r20
|
||||||
|
copy %r23, %r20
|
||||||
|
depi_safe 0, 31, 2, %r20
|
||||||
|
b atomic_xchg_start
|
||||||
|
4: stbys,e %r0, 0(%r20)
|
||||||
|
nop
|
||||||
|
nop
|
||||||
|
nop
|
||||||
|
|
||||||
|
/* 32-bit exchange */
|
||||||
|
5: ldw 0(%r24), %r20
|
||||||
|
b atomic_xchg_start
|
||||||
|
6: stbys,e %r0, 0(%r23)
|
||||||
|
nop
|
||||||
|
nop
|
||||||
|
nop
|
||||||
|
nop
|
||||||
|
nop
|
||||||
|
|
||||||
|
/* 64-bit exchange */
|
||||||
|
#ifdef CONFIG_64BIT
|
||||||
|
7: ldd 0(%r24), %r20
|
||||||
|
8: stdby,e %r0, 0(%r23)
|
||||||
|
#else
|
||||||
|
7: ldw 0(%r24), %r20
|
||||||
|
8: ldw 4(%r24), %r20
|
||||||
|
copy %r23, %r20
|
||||||
|
depi_safe 0, 31, 2, %r20
|
||||||
|
9: stbys,e %r0, 0(%r20)
|
||||||
|
10: stbys,e %r0, 4(%r20)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
atomic_xchg_start:
|
||||||
|
/* Trigger memory reference interruptions without writing to memory */
|
||||||
|
copy %r26, %r28
|
||||||
|
depi_safe 0, 31, 2, %r28
|
||||||
|
11: ldw 0(%r28), %r1
|
||||||
|
12: stbys,e %r0, 0(%r28)
|
||||||
|
|
||||||
|
/* Calculate 8-bit hash index from virtual address */
|
||||||
|
extru_safe %r26, 27, 8, %r20
|
||||||
|
|
||||||
|
/* Load start of lock table */
|
||||||
|
ldil L%lws_lock_start, %r28
|
||||||
|
ldo R%lws_lock_start(%r28), %r28
|
||||||
|
|
||||||
|
/* Find lock to use, the hash index is one of 0 to
|
||||||
|
255, multiplied by 16 (keep it 16-byte aligned)
|
||||||
|
and add to the lock table offset. */
|
||||||
|
shlw %r20, 4, %r20
|
||||||
|
add %r20, %r28, %r20
|
||||||
|
|
||||||
|
rsm PSW_SM_I, %r0 /* Disable interrupts */
|
||||||
|
|
||||||
|
/* Try to acquire the lock */
|
||||||
|
LDCW 0(%sr2,%r20), %r28
|
||||||
|
comclr,<> %r0, %r28, %r0
|
||||||
|
b,n lws_wouldblock
|
||||||
|
|
||||||
|
/* Disable page faults to prevent sleeping in critical region */
|
||||||
|
lws_pagefault_disable %r21,%r28
|
||||||
|
|
||||||
|
/* NOTES:
|
||||||
|
This all works because intr_do_signal
|
||||||
|
and schedule both check the return iasq
|
||||||
|
and see that we are on the kernel page
|
||||||
|
so this process is never scheduled off
|
||||||
|
or is ever sent any signal of any sort,
|
||||||
|
thus it is wholly atomic from userspace's
|
||||||
|
perspective
|
||||||
|
*/
|
||||||
|
|
||||||
|
/* Jump to the correct function */
|
||||||
|
blr %r1, %r0
|
||||||
|
/* Set %r28 as non-zero for now */
|
||||||
|
ldo 1(%r0),%r28
|
||||||
|
|
||||||
|
/* 8-bit exchange */
|
||||||
|
14: ldb 0(%r26), %r1
|
||||||
|
15: stb %r1, 0(%r23)
|
||||||
|
15: ldb 0(%r24), %r1
|
||||||
|
17: stb %r1, 0(%r26)
|
||||||
|
b lws_exit_noerror
|
||||||
|
copy %r0, %r28
|
||||||
|
nop
|
||||||
|
nop
|
||||||
|
|
||||||
|
/* 16-bit exchange */
|
||||||
|
18: ldh 0(%r26), %r1
|
||||||
|
19: sth %r1, 0(%r23)
|
||||||
|
20: ldh 0(%r24), %r1
|
||||||
|
21: sth %r1, 0(%r26)
|
||||||
|
b lws_exit_noerror
|
||||||
|
copy %r0, %r28
|
||||||
|
nop
|
||||||
|
nop
|
||||||
|
|
||||||
|
/* 32-bit exchange */
|
||||||
|
22: ldw 0(%r26), %r1
|
||||||
|
23: stw %r1, 0(%r23)
|
||||||
|
24: ldw 0(%r24), %r1
|
||||||
|
25: stw %r1, 0(%r26)
|
||||||
|
b lws_exit_noerror
|
||||||
|
copy %r0, %r28
|
||||||
|
nop
|
||||||
|
nop
|
||||||
|
|
||||||
|
/* 64-bit exchange */
|
||||||
|
#ifdef CONFIG_64BIT
|
||||||
|
26: ldd 0(%r26), %r1
|
||||||
|
27: std %r1, 0(%r23)
|
||||||
|
28: ldd 0(%r24), %r1
|
||||||
|
29: std %r1, 0(%r26)
|
||||||
|
#else
|
||||||
|
26: flddx 0(%r26), %fr4
|
||||||
|
27: fstdx %fr4, 0(%r23)
|
||||||
|
28: flddx 0(%r24), %fr4
|
||||||
|
29: fstdx %fr4, 0(%r26)
|
||||||
|
#endif
|
||||||
|
b lws_exit_noerror
|
||||||
|
copy %r0, %r28
|
||||||
|
|
||||||
|
/* A fault occurred on load or stbys,e store */
|
||||||
|
30: b,n lws_fault
|
||||||
|
ASM_EXCEPTIONTABLE_ENTRY(1b-linux_gateway_page, 30b-linux_gateway_page)
|
||||||
|
ASM_EXCEPTIONTABLE_ENTRY(2b-linux_gateway_page, 30b-linux_gateway_page)
|
||||||
|
ASM_EXCEPTIONTABLE_ENTRY(3b-linux_gateway_page, 30b-linux_gateway_page)
|
||||||
|
ASM_EXCEPTIONTABLE_ENTRY(4b-linux_gateway_page, 30b-linux_gateway_page)
|
||||||
|
ASM_EXCEPTIONTABLE_ENTRY(5b-linux_gateway_page, 30b-linux_gateway_page)
|
||||||
|
ASM_EXCEPTIONTABLE_ENTRY(6b-linux_gateway_page, 30b-linux_gateway_page)
|
||||||
|
ASM_EXCEPTIONTABLE_ENTRY(7b-linux_gateway_page, 30b-linux_gateway_page)
|
||||||
|
ASM_EXCEPTIONTABLE_ENTRY(8b-linux_gateway_page, 30b-linux_gateway_page)
|
||||||
|
#ifndef CONFIG_64BIT
|
||||||
|
ASM_EXCEPTIONTABLE_ENTRY(9b-linux_gateway_page, 30b-linux_gateway_page)
|
||||||
|
ASM_EXCEPTIONTABLE_ENTRY(10b-linux_gateway_page, 30b-linux_gateway_page)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
ASM_EXCEPTIONTABLE_ENTRY(11b-linux_gateway_page, 30b-linux_gateway_page)
|
||||||
|
ASM_EXCEPTIONTABLE_ENTRY(12b-linux_gateway_page, 30b-linux_gateway_page)
|
||||||
|
|
||||||
|
/* A page fault occurred in critical region */
|
||||||
|
31: b,n lws_pagefault
|
||||||
|
ASM_EXCEPTIONTABLE_ENTRY(14b-linux_gateway_page, 31b-linux_gateway_page)
|
||||||
|
ASM_EXCEPTIONTABLE_ENTRY(15b-linux_gateway_page, 31b-linux_gateway_page)
|
||||||
|
ASM_EXCEPTIONTABLE_ENTRY(16b-linux_gateway_page, 31b-linux_gateway_page)
|
||||||
|
ASM_EXCEPTIONTABLE_ENTRY(17b-linux_gateway_page, 31b-linux_gateway_page)
|
||||||
|
ASM_EXCEPTIONTABLE_ENTRY(18b-linux_gateway_page, 31b-linux_gateway_page)
|
||||||
|
ASM_EXCEPTIONTABLE_ENTRY(19b-linux_gateway_page, 31b-linux_gateway_page)
|
||||||
|
ASM_EXCEPTIONTABLE_ENTRY(20b-linux_gateway_page, 31b-linux_gateway_page)
|
||||||
|
ASM_EXCEPTIONTABLE_ENTRY(21b-linux_gateway_page, 31b-linux_gateway_page)
|
||||||
|
ASM_EXCEPTIONTABLE_ENTRY(22b-linux_gateway_page, 31b-linux_gateway_page)
|
||||||
|
ASM_EXCEPTIONTABLE_ENTRY(23b-linux_gateway_page, 31b-linux_gateway_page)
|
||||||
|
ASM_EXCEPTIONTABLE_ENTRY(24b-linux_gateway_page, 31b-linux_gateway_page)
|
||||||
|
ASM_EXCEPTIONTABLE_ENTRY(25b-linux_gateway_page, 31b-linux_gateway_page)
|
||||||
|
ASM_EXCEPTIONTABLE_ENTRY(26b-linux_gateway_page, 31b-linux_gateway_page)
|
||||||
|
ASM_EXCEPTIONTABLE_ENTRY(27b-linux_gateway_page, 31b-linux_gateway_page)
|
||||||
|
ASM_EXCEPTIONTABLE_ENTRY(28b-linux_gateway_page, 31b-linux_gateway_page)
|
||||||
|
ASM_EXCEPTIONTABLE_ENTRY(29b-linux_gateway_page, 31b-linux_gateway_page)
|
||||||
|
|
||||||
|
/***************************************************
|
||||||
|
LWS atomic store.
|
||||||
|
|
||||||
|
%r26 - Address to store
|
||||||
|
%r25 - Size of the variable (0/1/2/3 for 8/16/32/64 bit)
|
||||||
|
%r24 - Address of value to store
|
||||||
|
%r28 - Return non-zero on failure
|
||||||
|
%r21 - Kernel error code
|
||||||
|
|
||||||
|
%r21 returns the following error codes:
|
||||||
|
EAGAIN - CAS is busy, ldcw failed, try again.
|
||||||
|
EFAULT - Read or write failed.
|
||||||
|
|
||||||
|
If EAGAIN is returned, %r28 indicates the busy reason:
|
||||||
|
r28 == 1 - CAS is busy. lock contended.
|
||||||
|
r28 == 2 - CAS is busy. ldcw failed.
|
||||||
|
r28 == 3 - CAS is busy. page fault.
|
||||||
|
|
||||||
|
Scratch: r20, r1
|
||||||
|
|
||||||
|
****************************************************/
|
||||||
|
|
||||||
|
lws_atomic_store:
|
||||||
|
#ifdef CONFIG_64BIT
|
||||||
|
/* Wide mode user process? */
|
||||||
|
bb,<,n %sp, 31, atomic_store_begin
|
||||||
|
|
||||||
|
/* Clip the input registers for 32-bit processes. We don't
|
||||||
|
need to clip %r23 as we only use it for word operations */
|
||||||
|
depdi 0, 31, 32, %r26
|
||||||
|
depdi 0, 31, 32, %r25
|
||||||
|
depdi 0, 31, 32, %r24
|
||||||
|
#endif
|
||||||
|
|
||||||
|
atomic_store_begin:
|
||||||
|
/* Check the validity of the size pointer */
|
||||||
|
subi,>>= 3, %r25, %r0
|
||||||
|
b,n lws_exit_nosys
|
||||||
|
|
||||||
|
shlw %r25, 1, %r1
|
||||||
|
blr %r1, %r0
|
||||||
|
nop
|
||||||
|
|
||||||
|
/* Perform exception checks */
|
||||||
|
|
||||||
|
/* 8-bit store */
|
||||||
|
1: ldb 0(%r24), %r20
|
||||||
|
b,n atomic_store_start
|
||||||
|
nop
|
||||||
|
nop
|
||||||
|
|
||||||
|
/* 16-bit store */
|
||||||
|
2: ldh 0(%r24), %r20
|
||||||
|
b,n atomic_store_start
|
||||||
|
nop
|
||||||
|
nop
|
||||||
|
|
||||||
|
/* 32-bit store */
|
||||||
|
3: ldw 0(%r24), %r20
|
||||||
|
b,n atomic_store_start
|
||||||
|
nop
|
||||||
|
nop
|
||||||
|
|
||||||
|
/* 64-bit store */
|
||||||
|
#ifdef CONFIG_64BIT
|
||||||
|
4: ldd 0(%r24), %r20
|
||||||
|
#else
|
||||||
|
4: ldw 0(%r24), %r20
|
||||||
|
5: ldw 4(%r24), %r20
|
||||||
|
#endif
|
||||||
|
|
||||||
|
atomic_store_start:
|
||||||
|
/* Trigger memory reference interruptions without writing to memory */
|
||||||
|
copy %r26, %r28
|
||||||
|
depi_safe 0, 31, 2, %r28
|
||||||
|
6: ldw 0(%r28), %r1
|
||||||
|
7: stbys,e %r0, 0(%r28)
|
||||||
|
|
||||||
|
/* Calculate 8-bit hash index from virtual address */
|
||||||
|
extru_safe %r26, 27, 8, %r20
|
||||||
|
|
||||||
|
/* Load start of lock table */
|
||||||
|
ldil L%lws_lock_start, %r28
|
||||||
|
ldo R%lws_lock_start(%r28), %r28
|
||||||
|
|
||||||
|
/* Find lock to use, the hash index is one of 0 to
|
||||||
|
255, multiplied by 16 (keep it 16-byte aligned)
|
||||||
|
and add to the lock table offset. */
|
||||||
|
shlw %r20, 4, %r20
|
||||||
|
add %r20, %r28, %r20
|
||||||
|
|
||||||
|
rsm PSW_SM_I, %r0 /* Disable interrupts */
|
||||||
|
|
||||||
|
/* Try to acquire the lock */
|
||||||
|
LDCW 0(%sr2,%r20), %r28
|
||||||
|
comclr,<> %r0, %r28, %r0
|
||||||
|
b,n lws_wouldblock
|
||||||
|
|
||||||
|
/* Disable page faults to prevent sleeping in critical region */
|
||||||
|
lws_pagefault_disable %r21,%r28
|
||||||
|
|
||||||
|
/* NOTES:
|
||||||
|
This all works because intr_do_signal
|
||||||
|
and schedule both check the return iasq
|
||||||
|
and see that we are on the kernel page
|
||||||
|
so this process is never scheduled off
|
||||||
|
or is ever sent any signal of any sort,
|
||||||
|
thus it is wholly atomic from userspace's
|
||||||
|
perspective
|
||||||
|
*/
|
||||||
|
|
||||||
|
/* Jump to the correct function */
|
||||||
|
blr %r1, %r0
|
||||||
|
/* Set %r28 as non-zero for now */
|
||||||
|
ldo 1(%r0),%r28
|
||||||
|
|
||||||
|
/* 8-bit store */
|
||||||
|
9: ldb 0(%r24), %r1
|
||||||
|
10: stb %r1, 0(%r26)
|
||||||
|
b lws_exit_noerror
|
||||||
|
copy %r0, %r28
|
||||||
|
|
||||||
|
/* 16-bit store */
|
||||||
|
11: ldh 0(%r24), %r1
|
||||||
|
12: sth %r1, 0(%r26)
|
||||||
|
b lws_exit_noerror
|
||||||
|
copy %r0, %r28
|
||||||
|
|
||||||
|
/* 32-bit store */
|
||||||
|
13: ldw 0(%r24), %r1
|
||||||
|
14: stw %r1, 0(%r26)
|
||||||
|
b lws_exit_noerror
|
||||||
|
copy %r0, %r28
|
||||||
|
|
||||||
|
/* 64-bit store */
|
||||||
|
#ifdef CONFIG_64BIT
|
||||||
|
15: ldd 0(%r24), %r1
|
||||||
|
16: std %r1, 0(%r26)
|
||||||
|
#else
|
||||||
|
15: flddx 0(%r24), %fr4
|
||||||
|
16: fstdx %fr4, 0(%r26)
|
||||||
|
#endif
|
||||||
|
b lws_exit_noerror
|
||||||
|
copy %r0, %r28
|
||||||
|
|
||||||
|
/* A fault occurred on load or stbys,e store */
|
||||||
|
30: b,n lws_fault
|
||||||
|
ASM_EXCEPTIONTABLE_ENTRY(1b-linux_gateway_page, 30b-linux_gateway_page)
|
||||||
|
ASM_EXCEPTIONTABLE_ENTRY(2b-linux_gateway_page, 30b-linux_gateway_page)
|
||||||
|
ASM_EXCEPTIONTABLE_ENTRY(3b-linux_gateway_page, 30b-linux_gateway_page)
|
||||||
|
ASM_EXCEPTIONTABLE_ENTRY(4b-linux_gateway_page, 30b-linux_gateway_page)
|
||||||
|
#ifndef CONFIG_64BIT
|
||||||
|
ASM_EXCEPTIONTABLE_ENTRY(5b-linux_gateway_page, 30b-linux_gateway_page)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
ASM_EXCEPTIONTABLE_ENTRY(6b-linux_gateway_page, 30b-linux_gateway_page)
|
||||||
|
ASM_EXCEPTIONTABLE_ENTRY(7b-linux_gateway_page, 30b-linux_gateway_page)
|
||||||
|
|
||||||
|
/* A page fault occurred in critical region */
|
||||||
|
31: b,n lws_pagefault
|
||||||
|
ASM_EXCEPTIONTABLE_ENTRY(9b-linux_gateway_page, 31b-linux_gateway_page)
|
||||||
|
ASM_EXCEPTIONTABLE_ENTRY(10b-linux_gateway_page, 31b-linux_gateway_page)
|
||||||
|
ASM_EXCEPTIONTABLE_ENTRY(11b-linux_gateway_page, 31b-linux_gateway_page)
|
||||||
|
ASM_EXCEPTIONTABLE_ENTRY(12b-linux_gateway_page, 31b-linux_gateway_page)
|
||||||
|
ASM_EXCEPTIONTABLE_ENTRY(13b-linux_gateway_page, 31b-linux_gateway_page)
|
||||||
|
ASM_EXCEPTIONTABLE_ENTRY(14b-linux_gateway_page, 31b-linux_gateway_page)
|
||||||
|
ASM_EXCEPTIONTABLE_ENTRY(15b-linux_gateway_page, 31b-linux_gateway_page)
|
||||||
|
ASM_EXCEPTIONTABLE_ENTRY(16b-linux_gateway_page, 31b-linux_gateway_page)
|
||||||
|
|
||||||
/* Make sure nothing else is placed on this page */
|
/* Make sure nothing else is placed on this page */
|
||||||
.align PAGE_SIZE
|
.align PAGE_SIZE
|
||||||
END(linux_gateway_page)
|
END(linux_gateway_page)
|
||||||
@ -899,7 +1291,9 @@ ENTRY(end_linux_gateway_page)
|
|||||||
ENTRY(lws_table)
|
ENTRY(lws_table)
|
||||||
LWS_ENTRY(compare_and_swap32) /* 0 - ELF32 Atomic 32bit CAS */
|
LWS_ENTRY(compare_and_swap32) /* 0 - ELF32 Atomic 32bit CAS */
|
||||||
LWS_ENTRY(compare_and_swap64) /* 1 - ELF64 Atomic 32bit CAS */
|
LWS_ENTRY(compare_and_swap64) /* 1 - ELF64 Atomic 32bit CAS */
|
||||||
LWS_ENTRY(compare_and_swap_2) /* 2 - ELF32 Atomic 64bit CAS */
|
LWS_ENTRY(compare_and_swap_2) /* 2 - Atomic 64bit CAS */
|
||||||
|
LWS_ENTRY(atomic_xchg) /* 3 - Atomic Exchange */
|
||||||
|
LWS_ENTRY(atomic_store) /* 4 - Atomic Store */
|
||||||
END(lws_table)
|
END(lws_table)
|
||||||
/* End of lws table */
|
/* End of lws table */
|
||||||
|
|
||||||
|
@ -9,8 +9,10 @@
|
|||||||
|
|
||||||
#include <asm/pdc.h>
|
#include <asm/pdc.h>
|
||||||
#include <asm/pdc_chassis.h>
|
#include <asm/pdc_chassis.h>
|
||||||
|
#include <asm/ldcw.h>
|
||||||
|
|
||||||
unsigned int __aligned(16) toc_lock = 1;
|
static unsigned int __aligned(16) toc_lock = 1;
|
||||||
|
DEFINE_PER_CPU_PAGE_ALIGNED(char [16384], toc_stack);
|
||||||
|
|
||||||
static void toc20_to_pt_regs(struct pt_regs *regs, struct pdc_toc_pim_20 *toc)
|
static void toc20_to_pt_regs(struct pt_regs *regs, struct pdc_toc_pim_20 *toc)
|
||||||
{
|
{
|
||||||
@ -63,7 +65,8 @@ void notrace __noreturn __cold toc_intr(struct pt_regs *regs)
|
|||||||
struct pdc_toc_pim_20 pim_data20;
|
struct pdc_toc_pim_20 pim_data20;
|
||||||
struct pdc_toc_pim_11 pim_data11;
|
struct pdc_toc_pim_11 pim_data11;
|
||||||
|
|
||||||
nmi_enter();
|
/* verify we wrote regs to the correct stack */
|
||||||
|
BUG_ON(regs != (struct pt_regs *)&per_cpu(toc_stack, raw_smp_processor_id()));
|
||||||
|
|
||||||
if (boot_cpu_data.cpu_type >= pcxu) {
|
if (boot_cpu_data.cpu_type >= pcxu) {
|
||||||
if (pdc_pim_toc20(&pim_data20))
|
if (pdc_pim_toc20(&pim_data20))
|
||||||
@ -76,14 +79,25 @@ void notrace __noreturn __cold toc_intr(struct pt_regs *regs)
|
|||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_KGDB
|
#ifdef CONFIG_KGDB
|
||||||
|
nmi_enter();
|
||||||
|
|
||||||
if (atomic_read(&kgdb_active) != -1)
|
if (atomic_read(&kgdb_active) != -1)
|
||||||
kgdb_nmicallback(raw_smp_processor_id(), regs);
|
kgdb_nmicallback(raw_smp_processor_id(), regs);
|
||||||
kgdb_handle_exception(9, SIGTRAP, 0, regs);
|
kgdb_handle_exception(9, SIGTRAP, 0, regs);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/* serialize output, otherwise all CPUs write backtrace at once */
|
||||||
|
while (__ldcw(&toc_lock) == 0)
|
||||||
|
; /* wait */
|
||||||
show_regs(regs);
|
show_regs(regs);
|
||||||
|
toc_lock = 1; /* release lock for next CPU */
|
||||||
|
|
||||||
|
if (raw_smp_processor_id() != 0)
|
||||||
|
while (1) ; /* all but monarch CPU will wait endless. */
|
||||||
|
|
||||||
/* give other CPUs time to show their backtrace */
|
/* give other CPUs time to show their backtrace */
|
||||||
mdelay(2000);
|
mdelay(2000);
|
||||||
|
|
||||||
machine_restart("TOC");
|
machine_restart("TOC");
|
||||||
|
|
||||||
/* should never reach this */
|
/* should never reach this */
|
||||||
|
@ -5,34 +5,25 @@
|
|||||||
.level 1.1
|
.level 1.1
|
||||||
|
|
||||||
#include <asm/assembly.h>
|
#include <asm/assembly.h>
|
||||||
#include <asm/psw.h>
|
|
||||||
#include <linux/threads.h>
|
#include <linux/threads.h>
|
||||||
#include <linux/linkage.h>
|
#include <linux/linkage.h>
|
||||||
|
|
||||||
.text
|
.text
|
||||||
.import toc_intr,code
|
.import toc_intr,code
|
||||||
.import toc_lock,data
|
.import toc_stack,data
|
||||||
.align 16
|
.align 16
|
||||||
ENTRY_CFI(toc_handler)
|
ENTRY_CFI(toc_handler)
|
||||||
/*
|
|
||||||
* synchronize CPUs and obtain offset
|
|
||||||
* for stack setup.
|
|
||||||
*/
|
|
||||||
load32 PA(toc_lock),%r1
|
|
||||||
0: ldcw,co 0(%r1),%r2
|
|
||||||
cmpib,= 0,%r2,0b
|
|
||||||
nop
|
|
||||||
addi 1,%r2,%r4
|
|
||||||
stw %r4,0(%r1)
|
|
||||||
addi -1,%r2,%r4
|
|
||||||
|
|
||||||
load32 PA(toc_stack),%sp
|
load32 PA(toc_stack),%sp
|
||||||
/*
|
|
||||||
* deposit CPU number into stack address,
|
#ifdef CONFIG_SMP
|
||||||
* so every CPU will have its own stack.
|
/* get per-cpu toc_stack address. */
|
||||||
*/
|
mfctl %cr30, %r1
|
||||||
SHLREG %r4,14,%r4
|
tophys %r1,%r2 /* task_struct */
|
||||||
|
LDREG TASK_TI_CPU(%r2),%r4 /* cpu */
|
||||||
|
load32 PA(__per_cpu_offset),%r1
|
||||||
|
LDREGX %r4(%r1),%r4
|
||||||
add %r4,%sp,%sp
|
add %r4,%sp,%sp
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* setup pt_regs on stack and save the
|
* setup pt_regs on stack and save the
|
||||||
@ -82,7 +73,3 @@ ENDPROC_CFI(toc_handler)
|
|||||||
*/
|
*/
|
||||||
SYM_DATA(toc_handler_csum, .long 0)
|
SYM_DATA(toc_handler_csum, .long 0)
|
||||||
SYM_DATA(toc_handler_size, .long . - toc_handler)
|
SYM_DATA(toc_handler_size, .long . - toc_handler)
|
||||||
|
|
||||||
__PAGE_ALIGNED_BSS
|
|
||||||
.align 64
|
|
||||||
SYM_DATA(toc_stack, .block 16384*NR_CPUS)
|
|
||||||
|
@ -785,7 +785,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
|
|||||||
* unless pagefault_disable() was called before.
|
* unless pagefault_disable() was called before.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
if (fault_space == 0 && !faulthandler_disabled())
|
if (faulthandler_disabled() || fault_space == 0)
|
||||||
{
|
{
|
||||||
/* Clean up and return if in exception table. */
|
/* Clean up and return if in exception table. */
|
||||||
if (fixup_exception(regs))
|
if (fixup_exception(regs))
|
||||||
|
@ -148,11 +148,11 @@ int fixup_exception(struct pt_regs *regs)
|
|||||||
* Fix up get_user() and put_user().
|
* Fix up get_user() and put_user().
|
||||||
* ASM_EXCEPTIONTABLE_ENTRY_EFAULT() sets the least-significant
|
* ASM_EXCEPTIONTABLE_ENTRY_EFAULT() sets the least-significant
|
||||||
* bit in the relative address of the fixup routine to indicate
|
* bit in the relative address of the fixup routine to indicate
|
||||||
* that %r8 should be loaded with -EFAULT to report a userspace
|
* that gr[ASM_EXCEPTIONTABLE_REG] should be loaded with
|
||||||
* access error.
|
* -EFAULT to report a userspace access error.
|
||||||
*/
|
*/
|
||||||
if (fix->fixup & 1) {
|
if (fix->fixup & 1) {
|
||||||
regs->gr[8] = -EFAULT;
|
regs->gr[ASM_EXCEPTIONTABLE_REG] = -EFAULT;
|
||||||
|
|
||||||
/* zero target register for get_user() */
|
/* zero target register for get_user() */
|
||||||
if (parisc_acctyp(0, regs->iir) == VM_READ) {
|
if (parisc_acctyp(0, regs->iir) == VM_READ) {
|
||||||
@ -266,14 +266,14 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
|
|||||||
unsigned long acc_type;
|
unsigned long acc_type;
|
||||||
vm_fault_t fault = 0;
|
vm_fault_t fault = 0;
|
||||||
unsigned int flags;
|
unsigned int flags;
|
||||||
|
char *msg;
|
||||||
if (faulthandler_disabled())
|
|
||||||
goto no_context;
|
|
||||||
|
|
||||||
tsk = current;
|
tsk = current;
|
||||||
mm = tsk->mm;
|
mm = tsk->mm;
|
||||||
if (!mm)
|
if (!mm) {
|
||||||
|
msg = "Page fault: no context";
|
||||||
goto no_context;
|
goto no_context;
|
||||||
|
}
|
||||||
|
|
||||||
flags = FAULT_FLAG_DEFAULT;
|
flags = FAULT_FLAG_DEFAULT;
|
||||||
if (user_mode(regs))
|
if (user_mode(regs))
|
||||||
@ -409,6 +409,7 @@ bad_area:
|
|||||||
force_sig_fault(signo, si_code, (void __user *) address);
|
force_sig_fault(signo, si_code, (void __user *) address);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
msg = "Page fault: bad address";
|
||||||
|
|
||||||
no_context:
|
no_context:
|
||||||
|
|
||||||
@ -416,11 +417,13 @@ no_context:
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
parisc_terminate("Bad Address (null pointer deref?)", regs, code, address);
|
parisc_terminate(msg, regs, code, address);
|
||||||
|
|
||||||
out_of_memory:
|
out_of_memory:
|
||||||
mmap_read_unlock(mm);
|
mmap_read_unlock(mm);
|
||||||
if (!user_mode(regs))
|
if (!user_mode(regs)) {
|
||||||
|
msg = "Page fault: out of memory";
|
||||||
goto no_context;
|
goto no_context;
|
||||||
|
}
|
||||||
pagefault_out_of_memory();
|
pagefault_out_of_memory();
|
||||||
}
|
}
|
||||||
|
@ -482,11 +482,12 @@ static struct attribute *paths_subsys_attrs[] = {
|
|||||||
&paths_attr_layer.attr,
|
&paths_attr_layer.attr,
|
||||||
NULL,
|
NULL,
|
||||||
};
|
};
|
||||||
|
ATTRIBUTE_GROUPS(paths_subsys);
|
||||||
|
|
||||||
/* Specific kobject type for our PDC paths */
|
/* Specific kobject type for our PDC paths */
|
||||||
static struct kobj_type ktype_pdcspath = {
|
static struct kobj_type ktype_pdcspath = {
|
||||||
.sysfs_ops = &pdcspath_attr_ops,
|
.sysfs_ops = &pdcspath_attr_ops,
|
||||||
.default_attrs = paths_subsys_attrs,
|
.default_groups = paths_subsys_groups,
|
||||||
};
|
};
|
||||||
|
|
||||||
/* We hard define the 4 types of path we expect to find */
|
/* We hard define the 4 types of path we expect to find */
|
||||||
|
@ -199,12 +199,16 @@ static inline bool __is_kernel_text(unsigned long addr)
|
|||||||
* @addr: address to check
|
* @addr: address to check
|
||||||
*
|
*
|
||||||
* Returns: true if the address is located in the kernel range, false otherwise.
|
* Returns: true if the address is located in the kernel range, false otherwise.
|
||||||
* Note: an internal helper, only check the range of _stext to _end.
|
* Note: an internal helper, check the range of _stext to _end,
|
||||||
|
* and range from __init_begin to __init_end, which can be outside
|
||||||
|
* of the _stext to _end range.
|
||||||
*/
|
*/
|
||||||
static inline bool __is_kernel(unsigned long addr)
|
static inline bool __is_kernel(unsigned long addr)
|
||||||
{
|
{
|
||||||
return addr >= (unsigned long)_stext &&
|
return ((addr >= (unsigned long)_stext &&
|
||||||
addr < (unsigned long)_end;
|
addr < (unsigned long)_end) ||
|
||||||
|
(addr >= (unsigned long)__init_begin &&
|
||||||
|
addr < (unsigned long)__init_end));
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* _ASM_GENERIC_SECTIONS_H_ */
|
#endif /* _ASM_GENERIC_SECTIONS_H_ */
|
||||||
|
@ -33,4 +33,9 @@ if [ -n "${building_out_of_srctree}" ]; then
|
|||||||
do
|
do
|
||||||
rm -f arch/mips/boot/compressed/${f}
|
rm -f arch/mips/boot/compressed/${f}
|
||||||
done
|
done
|
||||||
|
|
||||||
|
for f in firmware.c real2.S
|
||||||
|
do
|
||||||
|
rm -f arch/parisc/boot/compressed/${f}
|
||||||
|
done
|
||||||
fi
|
fi
|
||||||
|
Loading…
Reference in New Issue
Block a user