mirror of
https://github.com/torvalds/linux.git
synced 2024-11-24 21:21:41 +00:00
ARM development for 5.16:
- Rejig task/thread info to place thread info in task struct - Amba bus cleanups (removing unused functions) - Handle Amba device probe without IRQ domains - Parse linux,usable-memory-range in decompressor - Mark OCRAM as read-only after initialisation - Refactor page fault handling - Fix PXN handling with LPAE kernels - Warning and build fixes from Arnd -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEuNNh8scc2k/wOAE+9OeQG+StrGQFAmGBEFIACgkQ9OeQG+St rGRbLw/9EQPEVAnx4obKSfQ7+H6JF8EZnSUvmFe0tK2zyR5T8dCoifh5bmdkdu51 r2gtxse9gbCKzGyfAlhcR+p47P94ULt3/slRb+thc1E3USAkP9mDLH2gXlWraVOL TdBn6WD2zHdGWmLYB3RPjh/FpQy2IEKajQU9pFC+Rp0Hf8OKg7KH1E5Ap8W9kjz6 o2HieVxsteuWKwCauQq95IDNZ/fpq/FuQi38fn11O52uB8PO4OC3LUR33/4qKBYj iykzt6hxHHnDLWKMrR9hbv0J6hSjflVgqTEirTuk1EpkKcIVoc6EOPTGENus7U1r GcrVbrnAs/obgYgT1DwTS0mreIAQ2dNpekbbICqD/SFrV4Rt/zOjImFXm17L4mxU 2D0FG9iyTFgQIYOQBrbaUbeeDpH+Dxn4ldFYWZ0/PLukz901KK40xV6b9gpe52iY DJDmO8OVH55ZargQQXB13vcJ79ZYcHusEr+kBkU+kXXP7LzTBTZfj2a/xCR0H2hw urS5ocp8WXHQ+jSZGVLR82kIVK0TuqDmkuOSi+VXuCIAMC3ITaSs5X1/foVn7r2b SdBSoqa2R28HSWudtkP7Ki1QCqxzrrn4RWjCOyEa+aoHyKT/MXQ7E7qwrg8nWBXV Ep83wvL2TmE5ZaljfEcyXnjYnXTcStB+YAD4G/6dZERLXbJ1tms= =lDRU -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm Pull ARM updates from Russell King: - Rejig task/thread info to place thread info in task struct - Amba bus cleanups (removing unused functions) - Handle Amba device probe without IRQ domains - Parse linux,usable-memory-range in decompressor - Mark OCRAM as read-only after initialisation - Refactor page fault handling - Fix PXN handling with LPAE kernels - Warning and build fixes from Arnd * tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm: (32 commits) ARM: 9151/1: Thumb2: avoid __builtin_thread_pointer() on Clang ARM: 9150/1: Fix PID_IN_CONTEXTIDR regression when THREAD_INFO_IN_TASK=y ARM: 9147/1: add printf format attribute to early_print() ARM: 9146/1: RiscPC needs older gcc version ARM: 9145/1: patch: fix BE32 compilation ARM: 9144/1: forbid ftrace with clang and thumb2_kernel ARM: 9143/1: add CONFIG_PHYS_OFFSET default values ARM: 9142/1: kasan: work around LPAE build warning ARM: 9140/1: allow compile-testing without machine record ARM: 9137/1: disallow CONFIG_THUMB with ARMv4 ARM: 9136/1: ARMv7-M uses BE-8, not BE-32 ARM: 9135/1: kprobes: address gcc -Wempty-body warning ARM: 9101/1: sa1100/assabet: convert LEDs to gpiod APIs ARM: 9131/1: mm: Fix PXN process with LPAE feature ARM: 9130/1: mm: Provide die_kernel_fault() helper ARM: 9126/1: mm: Kill page table base print in show_pte() ARM: 9127/1: mm: Cleanup access_error() ARM: 9129/1: mm: Kill task_struct argument for __do_page_fault() ARM: 9128/1: mm: Refactor the __do_page_fault() ARM: imx6: mark OCRAM mapping read-only ...
This commit is contained in:
commit
ab2e7f4b46
@ -91,7 +91,7 @@ config ARM
|
||||
select HAVE_FAST_GUP if ARM_LPAE
|
||||
select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
|
||||
select HAVE_FUNCTION_GRAPH_TRACER if !THUMB2_KERNEL && !CC_IS_CLANG
|
||||
select HAVE_FUNCTION_TRACER if !XIP_KERNEL
|
||||
select HAVE_FUNCTION_TRACER if !XIP_KERNEL && !(THUMB2_KERNEL && CC_IS_CLANG)
|
||||
select HAVE_FUTEX_CMPXCHG if FUTEX
|
||||
select HAVE_GCC_PLUGINS
|
||||
select HAVE_HW_BREAKPOINT if PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7)
|
||||
@ -126,6 +126,7 @@ config ARM
|
||||
select PERF_USE_VMALLOC
|
||||
select RTC_LIB
|
||||
select SYS_SUPPORTS_APM_EMULATION
|
||||
select THREAD_INFO_IN_TASK if CURRENT_POINTER_IN_TPIDRURO
|
||||
select TRACE_IRQFLAGS_SUPPORT if !CPU_V7M
|
||||
# Above selects are sorted alphabetically; please add new ones
|
||||
# according to that. Thanks.
|
||||
@ -265,10 +266,12 @@ config PHYS_OFFSET
|
||||
hex "Physical address of main memory" if MMU
|
||||
depends on !ARM_PATCH_PHYS_VIRT
|
||||
default DRAM_BASE if !MMU
|
||||
default 0x00000000 if ARCH_FOOTBRIDGE
|
||||
default 0x00000000 if ARCH_FOOTBRIDGE || ARCH_IXP4XX
|
||||
default 0x10000000 if ARCH_OMAP1 || ARCH_RPC
|
||||
default 0x20000000 if ARCH_S5PV210
|
||||
default 0xc0000000 if ARCH_SA1100
|
||||
default 0x30000000 if ARCH_S3C24XX
|
||||
default 0xa0000000 if ARCH_IOP32X || ARCH_PXA
|
||||
default 0xc0000000 if ARCH_EP93XX || ARCH_SA1100
|
||||
default 0
|
||||
help
|
||||
Please provide the physical address corresponding to the
|
||||
location of main memory in your system.
|
||||
@ -433,6 +436,7 @@ config ARCH_PXA
|
||||
config ARCH_RPC
|
||||
bool "RiscPC"
|
||||
depends on MMU
|
||||
depends on !CC_IS_CLANG && GCC_VERSION < 90100 && GCC_VERSION >= 60000
|
||||
select ARCH_ACORN
|
||||
select ARCH_MAY_HAVE_PC_FDC
|
||||
select ARCH_SPARSEMEM_ENABLE
|
||||
@ -1158,6 +1162,11 @@ config SMP_ON_UP
|
||||
|
||||
If you don't know what to do here, say Y.
|
||||
|
||||
|
||||
config CURRENT_POINTER_IN_TPIDRURO
|
||||
def_bool y
|
||||
depends on SMP && CPU_32v6K && !CPU_V6
|
||||
|
||||
config ARM_CPU_TOPOLOGY
|
||||
bool "Support cpu topology definition"
|
||||
depends on SMP && CPU_V7
|
||||
@ -1601,7 +1610,7 @@ config XEN
|
||||
|
||||
config STACKPROTECTOR_PER_TASK
|
||||
bool "Use a unique stack canary value for each task"
|
||||
depends on GCC_PLUGINS && STACKPROTECTOR && SMP && !XIP_DEFLATED_DATA
|
||||
depends on GCC_PLUGINS && STACKPROTECTOR && THREAD_INFO_IN_TASK && !XIP_DEFLATED_DATA
|
||||
select GCC_PLUGIN_ARM_SSP_PER_TASK
|
||||
default y
|
||||
help
|
||||
|
@ -113,6 +113,10 @@ ifeq ($(CONFIG_CC_IS_CLANG),y)
|
||||
CFLAGS_ABI += -meabi gnu
|
||||
endif
|
||||
|
||||
ifeq ($(CONFIG_CURRENT_POINTER_IN_TPIDRURO),y)
|
||||
CFLAGS_ABI += -mtp=cp15
|
||||
endif
|
||||
|
||||
# Accept old syntax despite ".syntax unified"
|
||||
AFLAGS_NOWARN :=$(call as-option,-Wa$(comma)-mno-warn-deprecated,-Wa$(comma)-W)
|
||||
|
||||
@ -273,11 +277,8 @@ ifeq ($(CONFIG_STACKPROTECTOR_PER_TASK),y)
|
||||
prepare: stack_protector_prepare
|
||||
stack_protector_prepare: prepare0
|
||||
$(eval SSP_PLUGIN_CFLAGS := \
|
||||
-fplugin-arg-arm_ssp_per_task_plugin-tso=$(shell \
|
||||
awk '{if ($$2 == "THREAD_SZ_ORDER") print $$3;}'\
|
||||
include/generated/asm-offsets.h) \
|
||||
-fplugin-arg-arm_ssp_per_task_plugin-offset=$(shell \
|
||||
awk '{if ($$2 == "TI_STACK_CANARY") print $$3;}'\
|
||||
awk '{if ($$2 == "TSK_STACK_CANARY") print $$3;}'\
|
||||
include/generated/asm-offsets.h))
|
||||
$(eval KBUILD_CFLAGS += $(SSP_PLUGIN_CFLAGS))
|
||||
$(eval GCC_PLUGINS_CFLAGS += $(SSP_PLUGIN_CFLAGS))
|
||||
|
@ -55,16 +55,17 @@ static uint64_t get_val(const fdt32_t *cells, uint32_t ncells)
|
||||
* DTB, and, if out-of-range, replace it by the real start address.
|
||||
* To preserve backwards compatibility (systems reserving a block of memory
|
||||
* at the start of physical memory, kdump, ...), the traditional method is
|
||||
* always used if it yields a valid address.
|
||||
* used if it yields a valid address, unless the "linux,usable-memory-range"
|
||||
* property is present.
|
||||
*
|
||||
* Return value: start address of physical memory to use
|
||||
*/
|
||||
uint32_t fdt_check_mem_start(uint32_t mem_start, const void *fdt)
|
||||
{
|
||||
uint32_t addr_cells, size_cells, base;
|
||||
uint32_t addr_cells, size_cells, usable_base, base;
|
||||
uint32_t fdt_mem_start = 0xffffffff;
|
||||
const fdt32_t *reg, *endp;
|
||||
uint64_t size, end;
|
||||
const fdt32_t *usable, *reg, *endp;
|
||||
uint64_t size, usable_end, end;
|
||||
const char *type;
|
||||
int offset, len;
|
||||
|
||||
@ -80,6 +81,27 @@ uint32_t fdt_check_mem_start(uint32_t mem_start, const void *fdt)
|
||||
if (addr_cells > 2 || size_cells > 2)
|
||||
return mem_start;
|
||||
|
||||
/*
|
||||
* Usable memory in case of a crash dump kernel
|
||||
* This property describes a limitation: memory within this range is
|
||||
* only valid when also described through another mechanism
|
||||
*/
|
||||
usable = get_prop(fdt, "/chosen", "linux,usable-memory-range",
|
||||
(addr_cells + size_cells) * sizeof(fdt32_t));
|
||||
if (usable) {
|
||||
size = get_val(usable + addr_cells, size_cells);
|
||||
if (!size)
|
||||
return mem_start;
|
||||
|
||||
if (addr_cells > 1 && fdt32_ld(usable)) {
|
||||
/* Outside 32-bit address space */
|
||||
return mem_start;
|
||||
}
|
||||
|
||||
usable_base = fdt32_ld(usable + addr_cells - 1);
|
||||
usable_end = usable_base + size;
|
||||
}
|
||||
|
||||
/* Walk all memory nodes and regions */
|
||||
for (offset = fdt_next_node(fdt, -1, NULL); offset >= 0;
|
||||
offset = fdt_next_node(fdt, offset, NULL)) {
|
||||
@ -107,7 +129,20 @@ uint32_t fdt_check_mem_start(uint32_t mem_start, const void *fdt)
|
||||
|
||||
base = fdt32_ld(reg + addr_cells - 1);
|
||||
end = base + size;
|
||||
if (mem_start >= base && mem_start < end) {
|
||||
if (usable) {
|
||||
/*
|
||||
* Clip to usable range, which takes precedence
|
||||
* over mem_start
|
||||
*/
|
||||
if (base < usable_base)
|
||||
base = usable_base;
|
||||
|
||||
if (end > usable_end)
|
||||
end = usable_end;
|
||||
|
||||
if (end <= base)
|
||||
continue;
|
||||
} else if (mem_start >= base && mem_start < end) {
|
||||
/* Calculated address is valid, use it */
|
||||
return mem_start;
|
||||
}
|
||||
@ -123,7 +158,8 @@ uint32_t fdt_check_mem_start(uint32_t mem_start, const void *fdt)
|
||||
}
|
||||
|
||||
/*
|
||||
* The calculated address is not usable.
|
||||
* The calculated address is not usable, or was overridden by the
|
||||
* "linux,usable-memory-range" property.
|
||||
* Use the lowest usable physical memory address from the DTB instead,
|
||||
* and make sure this is a multiple of 2 MiB for phys/virt patching.
|
||||
*/
|
||||
|
@ -240,9 +240,6 @@ static int scoop_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct scoop_dev *sdev = platform_get_drvdata(pdev);
|
||||
|
||||
if (!sdev)
|
||||
return -EINVAL;
|
||||
|
||||
if (sdev->gpio.base != -1)
|
||||
gpiochip_remove(&sdev->gpio);
|
||||
|
||||
|
@ -199,14 +199,43 @@
|
||||
.endm
|
||||
.endr
|
||||
|
||||
.macro get_current, rd
|
||||
#ifdef CONFIG_CURRENT_POINTER_IN_TPIDRURO
|
||||
mrc p15, 0, \rd, c13, c0, 3 @ get TPIDRURO register
|
||||
#else
|
||||
get_thread_info \rd
|
||||
ldr \rd, [\rd, #TI_TASK]
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro set_current, rn
|
||||
#ifdef CONFIG_CURRENT_POINTER_IN_TPIDRURO
|
||||
mcr p15, 0, \rn, c13, c0, 3 @ set TPIDRURO register
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro reload_current, t1:req, t2:req
|
||||
#ifdef CONFIG_CURRENT_POINTER_IN_TPIDRURO
|
||||
adr_l \t1, __entry_task @ get __entry_task base address
|
||||
mrc p15, 0, \t2, c13, c0, 4 @ get per-CPU offset
|
||||
ldr \t1, [\t1, \t2] @ load variable
|
||||
mcr p15, 0, \t1, c13, c0, 3 @ store in TPIDRURO
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Get current thread_info.
|
||||
*/
|
||||
.macro get_thread_info, rd
|
||||
#ifdef CONFIG_THREAD_INFO_IN_TASK
|
||||
/* thread_info is the first member of struct task_struct */
|
||||
get_current \rd
|
||||
#else
|
||||
ARM( mov \rd, sp, lsr #THREAD_SIZE_ORDER + PAGE_SHIFT )
|
||||
THUMB( mov \rd, sp )
|
||||
THUMB( lsr \rd, \rd, #THREAD_SIZE_ORDER + PAGE_SHIFT )
|
||||
mov \rd, \rd, lsl #THREAD_SIZE_ORDER + PAGE_SHIFT
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/*
|
||||
|
55
arch/arm/include/asm/current.h
Normal file
55
arch/arm/include/asm/current.h
Normal file
@ -0,0 +1,55 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2021 Keith Packard <keithp@keithp.com>
|
||||
* Copyright (c) 2021 Google, LLC <ardb@kernel.org>
|
||||
*/
|
||||
|
||||
#ifndef _ASM_ARM_CURRENT_H
|
||||
#define _ASM_ARM_CURRENT_H
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
struct task_struct;
|
||||
|
||||
static inline void set_current(struct task_struct *cur)
|
||||
{
|
||||
if (!IS_ENABLED(CONFIG_CURRENT_POINTER_IN_TPIDRURO))
|
||||
return;
|
||||
|
||||
/* Set TPIDRURO */
|
||||
asm("mcr p15, 0, %0, c13, c0, 3" :: "r"(cur) : "memory");
|
||||
}
|
||||
|
||||
#ifdef CONFIG_CURRENT_POINTER_IN_TPIDRURO
|
||||
|
||||
static inline struct task_struct *get_current(void)
|
||||
{
|
||||
struct task_struct *cur;
|
||||
|
||||
#if __has_builtin(__builtin_thread_pointer) && \
|
||||
!(defined(CONFIG_THUMB2_KERNEL) && \
|
||||
defined(CONFIG_CC_IS_CLANG) && CONFIG_CLANG_VERSION < 130001)
|
||||
/*
|
||||
* Use the __builtin helper when available - this results in better
|
||||
* code, especially when using GCC in combination with the per-task
|
||||
* stack protector, as the compiler will recognize that it needs to
|
||||
* load the TLS register only once in every function.
|
||||
*
|
||||
* Clang < 13.0.1 gets this wrong for Thumb2 builds:
|
||||
* https://github.com/ClangBuiltLinux/linux/issues/1485
|
||||
*/
|
||||
cur = __builtin_thread_pointer();
|
||||
#else
|
||||
asm("mrc p15, 0, %0, c13, c0, 3" : "=r"(cur));
|
||||
#endif
|
||||
return cur;
|
||||
}
|
||||
|
||||
#define current get_current()
|
||||
#else
|
||||
#include <asm-generic/current.h>
|
||||
#endif /* CONFIG_CURRENT_POINTER_IN_TPIDRURO */
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* _ASM_ARM_CURRENT_H */
|
@ -138,6 +138,7 @@ extern void __iomem *__arm_ioremap_caller(phys_addr_t, size_t, unsigned int,
|
||||
void *);
|
||||
extern void __iomem *__arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int);
|
||||
extern void __iomem *__arm_ioremap_exec(phys_addr_t, size_t, bool cached);
|
||||
void __arm_iomem_set_ro(void __iomem *ptr, size_t size);
|
||||
extern void __iounmap(volatile void __iomem *addr);
|
||||
|
||||
extern void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t,
|
||||
|
@ -110,12 +110,17 @@ extern asmlinkage unsigned int arm_check_condition(u32 opcode, u32 psr);
|
||||
#define __opcode_to_mem_thumb16(x) ___opcode_identity16(x)
|
||||
#define ___asm_opcode_to_mem_arm(x) ___asm_opcode_identity32(x)
|
||||
#define ___asm_opcode_to_mem_thumb16(x) ___asm_opcode_identity16(x)
|
||||
#ifndef CONFIG_CPU_ENDIAN_BE32
|
||||
#ifdef CONFIG_CPU_ENDIAN_BE32
|
||||
#ifndef __ASSEMBLY__
|
||||
/*
|
||||
* On BE32 systems, using 32-bit accesses to store Thumb instructions will not
|
||||
* work in all cases, due to alignment constraints. For now, a correct
|
||||
* version is not provided for BE32.
|
||||
* version is not provided for BE32, but the prototype needs to be there
|
||||
* to compile patch.c.
|
||||
*/
|
||||
extern __u32 __opcode_to_mem_thumb32(__u32);
|
||||
#endif
|
||||
#else
|
||||
#define __opcode_to_mem_thumb32(x) ___opcode_swahw32(x)
|
||||
#define ___asm_opcode_to_mem_thumb32(x) ___asm_opcode_swahw32(x)
|
||||
#endif
|
||||
|
@ -19,7 +19,7 @@
|
||||
static const struct tagtable __tagtable_##fn __tag = { tag, fn }
|
||||
|
||||
extern int arm_add_memory(u64 start, u64 size);
|
||||
extern void early_print(const char *str, ...);
|
||||
extern __printf(1, 2) void early_print(const char *str, ...);
|
||||
extern void dump_machine_table(void);
|
||||
|
||||
#ifdef CONFIG_ATAGS_PROC
|
||||
|
@ -48,7 +48,7 @@ extern void set_smp_ipi_range(int ipi_base, int nr_ipi);
|
||||
* Called from platform specific assembly code, this is the
|
||||
* secondary CPU entry point.
|
||||
*/
|
||||
asmlinkage void secondary_start_kernel(void);
|
||||
asmlinkage void secondary_start_kernel(struct task_struct *task);
|
||||
|
||||
|
||||
/*
|
||||
@ -61,6 +61,7 @@ struct secondary_data {
|
||||
};
|
||||
unsigned long swapper_pg_dir;
|
||||
void *stack;
|
||||
struct task_struct *task;
|
||||
};
|
||||
extern struct secondary_data secondary_data;
|
||||
extern void secondary_startup(void);
|
||||
|
@ -39,8 +39,6 @@ static __always_inline void boot_init_stack_canary(void)
|
||||
current->stack_canary = canary;
|
||||
#ifndef CONFIG_STACKPROTECTOR_PER_TASK
|
||||
__stack_chk_guard = current->stack_canary;
|
||||
#else
|
||||
current_thread_info()->stack_canary = current->stack_canary;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -23,9 +23,25 @@
|
||||
*/
|
||||
extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *);
|
||||
|
||||
static inline void set_ti_cpu(struct task_struct *p)
|
||||
{
|
||||
#ifdef CONFIG_THREAD_INFO_IN_TASK
|
||||
/*
|
||||
* The core code no longer maintains the thread_info::cpu field once
|
||||
* CONFIG_THREAD_INFO_IN_TASK is in effect, but we rely on it for
|
||||
* raw_smp_processor_id(), which cannot access struct task_struct*
|
||||
* directly for reasons of circular #inclusion hell.
|
||||
*/
|
||||
task_thread_info(p)->cpu = task_cpu(p);
|
||||
#endif
|
||||
}
|
||||
|
||||
#define switch_to(prev,next,last) \
|
||||
do { \
|
||||
__complete_pending_tlbi(); \
|
||||
set_ti_cpu(next); \
|
||||
if (IS_ENABLED(CONFIG_CURRENT_POINTER_IN_TPIDRURO)) \
|
||||
__this_cpu_write(__entry_task, next); \
|
||||
last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
|
||||
} while (0)
|
||||
|
||||
|
@ -29,6 +29,8 @@
|
||||
|
||||
struct task_struct;
|
||||
|
||||
DECLARE_PER_CPU(struct task_struct *, __entry_task);
|
||||
|
||||
#include <asm/types.h>
|
||||
|
||||
struct cpu_context_save {
|
||||
@ -52,12 +54,11 @@ struct cpu_context_save {
|
||||
struct thread_info {
|
||||
unsigned long flags; /* low level flags */
|
||||
int preempt_count; /* 0 => preemptable, <0 => bug */
|
||||
#ifndef CONFIG_THREAD_INFO_IN_TASK
|
||||
struct task_struct *task; /* main task structure */
|
||||
#endif
|
||||
__u32 cpu; /* cpu */
|
||||
__u32 cpu_domain; /* cpu domain */
|
||||
#ifdef CONFIG_STACKPROTECTOR_PER_TASK
|
||||
unsigned long stack_canary;
|
||||
#endif
|
||||
struct cpu_context_save cpu_context; /* cpu context */
|
||||
__u32 abi_syscall; /* ABI type and syscall nr */
|
||||
__u8 used_cp[16]; /* thread used copro */
|
||||
@ -71,11 +72,27 @@ struct thread_info {
|
||||
|
||||
#define INIT_THREAD_INFO(tsk) \
|
||||
{ \
|
||||
.task = &tsk, \
|
||||
INIT_THREAD_INFO_TASK(tsk) \
|
||||
.flags = 0, \
|
||||
.preempt_count = INIT_PREEMPT_COUNT, \
|
||||
}
|
||||
|
||||
#ifdef CONFIG_THREAD_INFO_IN_TASK
|
||||
#define INIT_THREAD_INFO_TASK(tsk)
|
||||
|
||||
static inline struct task_struct *thread_task(struct thread_info* ti)
|
||||
{
|
||||
return (struct task_struct *)ti;
|
||||
}
|
||||
|
||||
#else
|
||||
#define INIT_THREAD_INFO_TASK(tsk) .task = &(tsk),
|
||||
|
||||
static inline struct task_struct *thread_task(struct thread_info* ti)
|
||||
{
|
||||
return ti->task;
|
||||
}
|
||||
|
||||
/*
|
||||
* how to get the thread information struct from C
|
||||
*/
|
||||
@ -86,6 +103,7 @@ static inline struct thread_info *current_thread_info(void)
|
||||
return (struct thread_info *)
|
||||
(current_stack_pointer & ~(THREAD_SIZE - 1));
|
||||
}
|
||||
#endif
|
||||
|
||||
#define thread_saved_pc(tsk) \
|
||||
((unsigned long)(task_thread_info(tsk)->cpu_context.pc))
|
||||
|
@ -12,8 +12,8 @@
|
||||
|
||||
.macro switch_tls_v6k, base, tp, tpuser, tmp1, tmp2
|
||||
mrc p15, 0, \tmp2, c13, c0, 2 @ get the user r/w register
|
||||
mcr p15, 0, \tp, c13, c0, 3 @ set TLS register
|
||||
mcr p15, 0, \tpuser, c13, c0, 2 @ and the user r/w register
|
||||
@ TLS register update is deferred until return to user space
|
||||
mcr p15, 0, \tpuser, c13, c0, 2 @ set the user r/w register
|
||||
str \tmp2, [\base, #TI_TP_VALUE + 4] @ save it
|
||||
.endm
|
||||
|
||||
@ -38,18 +38,22 @@
|
||||
#ifdef CONFIG_TLS_REG_EMUL
|
||||
#define tls_emu 1
|
||||
#define has_tls_reg 1
|
||||
#define defer_tls_reg_update 0
|
||||
#define switch_tls switch_tls_none
|
||||
#elif defined(CONFIG_CPU_V6)
|
||||
#define tls_emu 0
|
||||
#define has_tls_reg (elf_hwcap & HWCAP_TLS)
|
||||
#define defer_tls_reg_update 0
|
||||
#define switch_tls switch_tls_v6
|
||||
#elif defined(CONFIG_CPU_32v6K)
|
||||
#define tls_emu 0
|
||||
#define has_tls_reg 1
|
||||
#define defer_tls_reg_update 1
|
||||
#define switch_tls switch_tls_v6k
|
||||
#else
|
||||
#define tls_emu 0
|
||||
#define has_tls_reg 0
|
||||
#define defer_tls_reg_update 0
|
||||
#define switch_tls switch_tls_software
|
||||
#endif
|
||||
|
||||
@ -77,7 +81,7 @@ static inline void set_tls(unsigned long val)
|
||||
*/
|
||||
barrier();
|
||||
|
||||
if (!tls_emu) {
|
||||
if (!tls_emu && !defer_tls_reg_update) {
|
||||
if (has_tls_reg) {
|
||||
asm("mcr p15, 0, %0, c13, c0, 3"
|
||||
: : "r" (val));
|
||||
|
@ -43,7 +43,9 @@ int main(void)
|
||||
BLANK();
|
||||
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
|
||||
DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
|
||||
#ifndef CONFIG_THREAD_INFO_IN_TASK
|
||||
DEFINE(TI_TASK, offsetof(struct thread_info, task));
|
||||
#endif
|
||||
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
|
||||
DEFINE(TI_CPU_DOMAIN, offsetof(struct thread_info, cpu_domain));
|
||||
DEFINE(TI_CPU_SAVE, offsetof(struct thread_info, cpu_context));
|
||||
@ -63,10 +65,6 @@ int main(void)
|
||||
#ifdef CONFIG_IWMMXT
|
||||
DEFINE(TI_IWMMXT_STATE, offsetof(struct thread_info, fpstate.iwmmxt));
|
||||
#endif
|
||||
#ifdef CONFIG_STACKPROTECTOR_PER_TASK
|
||||
DEFINE(TI_STACK_CANARY, offsetof(struct thread_info, stack_canary));
|
||||
#endif
|
||||
DEFINE(THREAD_SZ_ORDER, THREAD_SIZE_ORDER);
|
||||
BLANK();
|
||||
DEFINE(S_R0, offsetof(struct pt_regs, ARM_r0));
|
||||
DEFINE(S_R1, offsetof(struct pt_regs, ARM_r1));
|
||||
|
@ -381,6 +381,8 @@ ENDPROC(__fiq_abt)
|
||||
ATRAP( teq r8, r7)
|
||||
ATRAP( mcrne p15, 0, r8, c1, c0, 0)
|
||||
|
||||
reload_current r7, r8
|
||||
|
||||
@
|
||||
@ Clear FP to mark the first stack frame
|
||||
@
|
||||
@ -759,6 +761,8 @@ ENTRY(__switch_to)
|
||||
add r7, r7, #TSK_STACK_CANARY & ~IMM12_MASK
|
||||
.endif
|
||||
ldr r7, [r7, #TSK_STACK_CANARY & IMM12_MASK]
|
||||
#elif defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO)
|
||||
mov r7, r2 @ Preserve 'next'
|
||||
#endif
|
||||
#ifdef CONFIG_CPU_USE_DOMAINS
|
||||
mcr p15, 0, r6, c3, c0, 0 @ Set domain register
|
||||
@ -773,6 +777,7 @@ ENTRY(__switch_to)
|
||||
#endif
|
||||
THUMB( mov ip, r4 )
|
||||
mov r0, r5
|
||||
set_current r7
|
||||
ARM( ldmia r4, {r4 - sl, fp, sp, pc} ) @ Load all regs saved previously
|
||||
THUMB( ldmia ip!, {r4 - sl, fp} ) @ Load all regs saved previously
|
||||
THUMB( ldr sp, [ip], #4 )
|
||||
|
@ -170,6 +170,7 @@ ENTRY(vector_swi)
|
||||
str saved_psr, [sp, #S_PSR] @ Save CPSR
|
||||
str r0, [sp, #S_OLD_R0] @ Save OLD_R0
|
||||
#endif
|
||||
reload_current r10, ip
|
||||
zero_fp
|
||||
alignment_trap r10, ip, __cr_alignment
|
||||
asm_trace_hardirqs_on save=0
|
||||
|
@ -292,6 +292,14 @@
|
||||
|
||||
|
||||
.macro restore_user_regs, fast = 0, offset = 0
|
||||
#if defined(CONFIG_CPU_32v6K) && !defined(CONFIG_CPU_V6)
|
||||
@ The TLS register update is deferred until return to user space so we
|
||||
@ can use it for other things while running in the kernel
|
||||
get_thread_info r1
|
||||
ldr r1, [r1, #TI_TP_VALUE]
|
||||
mcr p15, 0, r1, c13, c0, 3 @ set TLS register
|
||||
#endif
|
||||
|
||||
uaccess_enable r1, isb=0
|
||||
#ifndef CONFIG_THUMB2_KERNEL
|
||||
@ ARM mode restore
|
||||
|
@ -105,6 +105,11 @@ __mmap_switched:
|
||||
mov r1, #0
|
||||
bl __memset @ clear .bss
|
||||
|
||||
#ifdef CONFIG_CURRENT_POINTER_IN_TPIDRURO
|
||||
adr_l r0, init_task @ get swapper task_struct
|
||||
set_current r0
|
||||
#endif
|
||||
|
||||
ldmia r4, {r0, r1, r2, r3}
|
||||
str r9, [r0] @ Save processor ID
|
||||
str r7, [r1] @ Save machine type
|
||||
|
@ -115,6 +115,7 @@ ENTRY(secondary_startup)
|
||||
ret r12
|
||||
1: bl __after_proc_init
|
||||
ldr sp, [r7, #12] @ set up the stack pointer
|
||||
ldr r0, [r7, #16] @ set up task pointer
|
||||
mov fp, #0
|
||||
b secondary_start_kernel
|
||||
ENDPROC(secondary_startup)
|
||||
|
@ -424,8 +424,9 @@ ENDPROC(secondary_startup)
|
||||
ENDPROC(secondary_startup_arm)
|
||||
|
||||
ENTRY(__secondary_switched)
|
||||
ldr_l r7, secondary_data + 12 @ get secondary_data.stack
|
||||
mov sp, r7
|
||||
adr_l r7, secondary_data + 12 @ get secondary_data.stack
|
||||
ldr sp, [r7]
|
||||
ldr r0, [r7, #4] @ get secondary_data.task
|
||||
mov fp, #0
|
||||
b secondary_start_kernel
|
||||
ENDPROC(__secondary_switched)
|
||||
|
@ -36,6 +36,10 @@
|
||||
|
||||
#include "signal.h"
|
||||
|
||||
#ifdef CONFIG_CURRENT_POINTER_IN_TPIDRURO
|
||||
DEFINE_PER_CPU(struct task_struct *, __entry_task);
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
|
||||
#include <linux/stackprotector.h>
|
||||
unsigned long __stack_chk_guard __read_mostly;
|
||||
@ -269,10 +273,6 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
|
||||
|
||||
thread_notify(THREAD_NOTIFY_COPY, thread);
|
||||
|
||||
#ifdef CONFIG_STACKPROTECTOR_PER_TASK
|
||||
thread->stack_canary = p->stack_canary;
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -153,6 +153,10 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
|
||||
secondary_data.pgdir = virt_to_phys(idmap_pgd);
|
||||
secondary_data.swapper_pg_dir = get_arch_pgd(swapper_pg_dir);
|
||||
#endif
|
||||
secondary_data.task = idle;
|
||||
if (IS_ENABLED(CONFIG_THREAD_INFO_IN_TASK))
|
||||
task_thread_info(idle)->cpu = cpu;
|
||||
|
||||
sync_cache_w(&secondary_data);
|
||||
|
||||
/*
|
||||
@ -375,9 +379,12 @@ void arch_cpu_idle_dead(void)
|
||||
*/
|
||||
__asm__("mov sp, %0\n"
|
||||
" mov fp, #0\n"
|
||||
" mov r0, %1\n"
|
||||
" b secondary_start_kernel"
|
||||
:
|
||||
: "r" (task_stack_page(current) + THREAD_SIZE - 8));
|
||||
: "r" (task_stack_page(current) + THREAD_SIZE - 8),
|
||||
"r" (current)
|
||||
: "r0");
|
||||
}
|
||||
#endif /* CONFIG_HOTPLUG_CPU */
|
||||
|
||||
@ -400,11 +407,13 @@ static void smp_store_cpu_info(unsigned int cpuid)
|
||||
* This is the secondary CPU boot entry. We're using this CPUs
|
||||
* idle thread stack, but a set of temporary page tables.
|
||||
*/
|
||||
asmlinkage void secondary_start_kernel(void)
|
||||
asmlinkage void secondary_start_kernel(struct task_struct *task)
|
||||
{
|
||||
struct mm_struct *mm = &init_mm;
|
||||
unsigned int cpu;
|
||||
|
||||
set_current(task);
|
||||
|
||||
secondary_biglittle_init();
|
||||
|
||||
/*
|
||||
|
@ -166,7 +166,9 @@ SECTIONS
|
||||
* binutils is too old (for other reasons as well)
|
||||
*/
|
||||
ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support")
|
||||
#ifndef CONFIG_COMPILE_TEST
|
||||
ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined")
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_XIP_DEFLATED_DATA
|
||||
/*
|
||||
|
@ -174,6 +174,8 @@ __start_rodata_section_aligned = ALIGN(__start_rodata, 1 << SECTION_SHIFT);
|
||||
* binutils is too old (for other reasons as well)
|
||||
*/
|
||||
ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support")
|
||||
#ifndef CONFIG_COMPILE_TEST
|
||||
ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined")
|
||||
#endif
|
||||
|
||||
#endif /* CONFIG_XIP_KERNEL */
|
||||
|
@ -572,6 +572,8 @@ static int __init imx6q_suspend_init(const struct imx6_pm_socdata *socdata)
|
||||
&imx6_suspend,
|
||||
MX6Q_SUSPEND_OCRAM_SIZE - sizeof(*pm_info));
|
||||
|
||||
__arm_iomem_set_ro(suspend_ocram_base, MX6Q_SUSPEND_OCRAM_SIZE);
|
||||
|
||||
goto put_device;
|
||||
|
||||
pl310_cache_map_failed:
|
||||
|
@ -84,7 +84,7 @@ void ASSABET_BCR_frob(unsigned int mask, unsigned int val)
|
||||
}
|
||||
EXPORT_SYMBOL(ASSABET_BCR_frob);
|
||||
|
||||
static int __init assabet_init_gpio(void __iomem *reg, u32 def_val)
|
||||
static void __init assabet_init_gpio(void __iomem *reg, u32 def_val)
|
||||
{
|
||||
struct gpio_chip *gc;
|
||||
|
||||
@ -94,11 +94,9 @@ static int __init assabet_init_gpio(void __iomem *reg, u32 def_val)
|
||||
assabet_names, NULL, NULL);
|
||||
|
||||
if (IS_ERR(gc))
|
||||
return PTR_ERR(gc);
|
||||
return;
|
||||
|
||||
assabet_bcr_gc = gc;
|
||||
|
||||
return gc->base;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -475,16 +473,23 @@ static struct gpiod_lookup_table assabet_cf_vcc_gpio_table = {
|
||||
},
|
||||
};
|
||||
|
||||
static struct gpiod_lookup_table assabet_leds_gpio_table = {
|
||||
.dev_id = "leds-gpio",
|
||||
.table = {
|
||||
GPIO_LOOKUP("assabet", 13, NULL, GPIO_ACTIVE_LOW),
|
||||
GPIO_LOOKUP("assabet", 14, NULL, GPIO_ACTIVE_LOW),
|
||||
{ },
|
||||
},
|
||||
};
|
||||
|
||||
static struct gpio_led assabet_leds[] __initdata = {
|
||||
{
|
||||
.name = "assabet:red",
|
||||
.default_trigger = "cpu0",
|
||||
.active_low = 1,
|
||||
.default_state = LEDS_GPIO_DEFSTATE_KEEP,
|
||||
}, {
|
||||
.name = "assabet:green",
|
||||
.default_trigger = "heartbeat",
|
||||
.active_low = 1,
|
||||
.default_state = LEDS_GPIO_DEFSTATE_KEEP,
|
||||
},
|
||||
};
|
||||
@ -603,6 +608,7 @@ static void __init assabet_init(void)
|
||||
&assabet_keys_pdata,
|
||||
sizeof(assabet_keys_pdata));
|
||||
|
||||
gpiod_add_lookup_table(&assabet_leds_gpio_table);
|
||||
gpio_led_register_device(-1, &assabet_leds_pdata);
|
||||
|
||||
#ifndef ASSABET_PAL_VIDEO
|
||||
@ -739,7 +745,6 @@ static void __init assabet_map_io(void)
|
||||
|
||||
void __init assabet_init_irq(void)
|
||||
{
|
||||
unsigned int assabet_gpio_base;
|
||||
u32 def_val;
|
||||
|
||||
sa1100_init_irq();
|
||||
@ -754,10 +759,7 @@ void __init assabet_init_irq(void)
|
||||
*
|
||||
* This must precede any driver calls to BCR_set() or BCR_clear().
|
||||
*/
|
||||
assabet_gpio_base = assabet_init_gpio((void *)&ASSABET_BCR, def_val);
|
||||
|
||||
assabet_leds[0].gpio = assabet_gpio_base + 13;
|
||||
assabet_leds[1].gpio = assabet_gpio_base + 14;
|
||||
assabet_init_gpio((void *)&ASSABET_BCR, def_val);
|
||||
}
|
||||
|
||||
MACHINE_START(ASSABET, "Intel-Assabet")
|
||||
|
@ -675,7 +675,7 @@ config ARM_PV_FIXUP
|
||||
|
||||
config ARM_THUMB
|
||||
bool "Support Thumb user binaries" if !CPU_THUMBONLY && EXPERT
|
||||
depends on CPU_THUMB_CAPABLE
|
||||
depends on CPU_THUMB_CAPABLE && !CPU_32v4
|
||||
default y
|
||||
help
|
||||
Say Y if you want to include kernel support for running user space
|
||||
@ -750,7 +750,7 @@ config CPU_BIG_ENDIAN
|
||||
config CPU_ENDIAN_BE8
|
||||
bool
|
||||
depends on CPU_BIG_ENDIAN
|
||||
default CPU_V6 || CPU_V6K || CPU_V7
|
||||
default CPU_V6 || CPU_V6K || CPU_V7 || CPU_V7M
|
||||
help
|
||||
Support for the BE-8 (big-endian) mode on ARMv6 and ARMv7 processors.
|
||||
|
||||
|
@ -109,7 +109,7 @@ static int contextidr_notifier(struct notifier_block *unused, unsigned long cmd,
|
||||
if (cmd != THREAD_NOTIFY_SWITCH)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
pid = task_pid_nr(thread->task) << ASID_BITS;
|
||||
pid = task_pid_nr(thread_task(thread)) << ASID_BITS;
|
||||
asm volatile(
|
||||
" mrc p15, 0, %0, c13, c0, 1\n"
|
||||
" and %0, %0, %2\n"
|
||||
|
@ -37,7 +37,6 @@ void show_pte(const char *lvl, struct mm_struct *mm, unsigned long addr)
|
||||
if (!mm)
|
||||
mm = &init_mm;
|
||||
|
||||
printk("%spgd = %p\n", lvl, mm->pgd);
|
||||
pgd = pgd_offset(mm, addr);
|
||||
printk("%s[%08lx] *pgd=%08llx", lvl, addr, (long long)pgd_val(*pgd));
|
||||
|
||||
@ -100,6 +99,21 @@ void show_pte(const char *lvl, struct mm_struct *mm, unsigned long addr)
|
||||
{ }
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
static void die_kernel_fault(const char *msg, struct mm_struct *mm,
|
||||
unsigned long addr, unsigned int fsr,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
bust_spinlocks(1);
|
||||
pr_alert("8<--- cut here ---\n");
|
||||
pr_alert("Unable to handle kernel %s at virtual address %08lx\n",
|
||||
msg, addr);
|
||||
|
||||
show_pte(KERN_ALERT, mm, addr);
|
||||
die("Oops", regs, fsr);
|
||||
bust_spinlocks(0);
|
||||
do_exit(SIGKILL);
|
||||
}
|
||||
|
||||
/*
|
||||
* Oops. The kernel tried to access some page that wasn't present.
|
||||
*/
|
||||
@ -107,6 +121,7 @@ static void
|
||||
__do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
const char *msg;
|
||||
/*
|
||||
* Are we prepared to handle this kernel fault?
|
||||
*/
|
||||
@ -116,16 +131,12 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
|
||||
/*
|
||||
* No handler, we'll have to terminate things with extreme prejudice.
|
||||
*/
|
||||
bust_spinlocks(1);
|
||||
pr_alert("8<--- cut here ---\n");
|
||||
pr_alert("Unable to handle kernel %s at virtual address %08lx\n",
|
||||
(addr < PAGE_SIZE) ? "NULL pointer dereference" :
|
||||
"paging request", addr);
|
||||
if (addr < PAGE_SIZE)
|
||||
msg = "NULL pointer dereference";
|
||||
else
|
||||
msg = "paging request";
|
||||
|
||||
show_pte(KERN_ALERT, mm, addr);
|
||||
die("Oops", regs, fsr);
|
||||
bust_spinlocks(0);
|
||||
do_exit(SIGKILL);
|
||||
die_kernel_fault(msg, mm, addr, fsr, regs);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -183,73 +194,58 @@ void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
|
||||
#define VM_FAULT_BADMAP 0x010000
|
||||
#define VM_FAULT_BADACCESS 0x020000
|
||||
|
||||
/*
|
||||
* Check that the permissions on the VMA allow for the fault which occurred.
|
||||
* If we encountered a write fault, we must have write permission, otherwise
|
||||
* we allow any permission.
|
||||
*/
|
||||
static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma)
|
||||
static inline bool is_permission_fault(unsigned int fsr)
|
||||
{
|
||||
unsigned int mask = VM_ACCESS_FLAGS;
|
||||
|
||||
if ((fsr & FSR_WRITE) && !(fsr & FSR_CM))
|
||||
mask = VM_WRITE;
|
||||
if (fsr & FSR_LNX_PF)
|
||||
mask = VM_EXEC;
|
||||
|
||||
return vma->vm_flags & mask ? false : true;
|
||||
int fs = fsr_fs(fsr);
|
||||
#ifdef CONFIG_ARM_LPAE
|
||||
if ((fs & FS_PERM_NOLL_MASK) == FS_PERM_NOLL)
|
||||
return true;
|
||||
#else
|
||||
if (fs == FS_L1_PERM || fs == FS_L2_PERM)
|
||||
return true;
|
||||
#endif
|
||||
return false;
|
||||
}
|
||||
|
||||
static vm_fault_t __kprobes
|
||||
__do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
|
||||
unsigned int flags, struct task_struct *tsk,
|
||||
struct pt_regs *regs)
|
||||
__do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int flags,
|
||||
unsigned long vma_flags, struct pt_regs *regs)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
vm_fault_t fault;
|
||||
|
||||
vma = find_vma(mm, addr);
|
||||
fault = VM_FAULT_BADMAP;
|
||||
struct vm_area_struct *vma = find_vma(mm, addr);
|
||||
if (unlikely(!vma))
|
||||
goto out;
|
||||
if (unlikely(vma->vm_start > addr))
|
||||
goto check_stack;
|
||||
return VM_FAULT_BADMAP;
|
||||
|
||||
/*
|
||||
* Ok, we have a good vm_area for this
|
||||
* memory access, so we can handle it.
|
||||
*/
|
||||
good_area:
|
||||
if (access_error(fsr, vma)) {
|
||||
fault = VM_FAULT_BADACCESS;
|
||||
goto out;
|
||||
if (unlikely(vma->vm_start > addr)) {
|
||||
if (!(vma->vm_flags & VM_GROWSDOWN))
|
||||
return VM_FAULT_BADMAP;
|
||||
if (addr < FIRST_USER_ADDRESS)
|
||||
return VM_FAULT_BADMAP;
|
||||
if (expand_stack(vma, addr))
|
||||
return VM_FAULT_BADMAP;
|
||||
}
|
||||
|
||||
return handle_mm_fault(vma, addr & PAGE_MASK, flags, regs);
|
||||
/*
|
||||
* ok, we have a good vm_area for this memory access, check the
|
||||
* permissions on the VMA allow for the fault which occurred.
|
||||
*/
|
||||
if (!(vma->vm_flags & vma_flags))
|
||||
return VM_FAULT_BADACCESS;
|
||||
|
||||
check_stack:
|
||||
/* Don't allow expansion below FIRST_USER_ADDRESS */
|
||||
if (vma->vm_flags & VM_GROWSDOWN &&
|
||||
addr >= FIRST_USER_ADDRESS && !expand_stack(vma, addr))
|
||||
goto good_area;
|
||||
out:
|
||||
return fault;
|
||||
return handle_mm_fault(vma, addr & PAGE_MASK, flags, regs);
|
||||
}
|
||||
|
||||
static int __kprobes
|
||||
do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
|
||||
{
|
||||
struct task_struct *tsk;
|
||||
struct mm_struct *mm;
|
||||
struct mm_struct *mm = current->mm;
|
||||
int sig, code;
|
||||
vm_fault_t fault;
|
||||
unsigned int flags = FAULT_FLAG_DEFAULT;
|
||||
unsigned long vm_flags = VM_ACCESS_FLAGS;
|
||||
|
||||
if (kprobe_page_fault(regs, fsr))
|
||||
return 0;
|
||||
|
||||
tsk = current;
|
||||
mm = tsk->mm;
|
||||
|
||||
/* Enable interrupts if they were enabled in the parent context. */
|
||||
if (interrupts_enabled(regs))
|
||||
@ -264,8 +260,19 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
|
||||
|
||||
if (user_mode(regs))
|
||||
flags |= FAULT_FLAG_USER;
|
||||
if ((fsr & FSR_WRITE) && !(fsr & FSR_CM))
|
||||
|
||||
if ((fsr & FSR_WRITE) && !(fsr & FSR_CM)) {
|
||||
flags |= FAULT_FLAG_WRITE;
|
||||
vm_flags = VM_WRITE;
|
||||
}
|
||||
|
||||
if (fsr & FSR_LNX_PF) {
|
||||
vm_flags = VM_EXEC;
|
||||
|
||||
if (is_permission_fault(fsr) && !user_mode(regs))
|
||||
die_kernel_fault("execution of memory",
|
||||
mm, addr, fsr, regs);
|
||||
}
|
||||
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
|
||||
|
||||
@ -293,7 +300,7 @@ retry:
|
||||
#endif
|
||||
}
|
||||
|
||||
fault = __do_page_fault(mm, addr, fsr, flags, tsk, regs);
|
||||
fault = __do_page_fault(mm, addr, flags, vm_flags, regs);
|
||||
|
||||
/* If we need to retry but a fatal signal is pending, handle the
|
||||
* signal first. We do not need to release the mmap_lock because
|
||||
|
@ -14,6 +14,8 @@
|
||||
|
||||
#ifdef CONFIG_ARM_LPAE
|
||||
#define FSR_FS_AEA 17
|
||||
#define FS_PERM_NOLL 0xC
|
||||
#define FS_PERM_NOLL_MASK 0x3C
|
||||
|
||||
static inline int fsr_fs(unsigned int fsr)
|
||||
{
|
||||
@ -21,6 +23,8 @@ static inline int fsr_fs(unsigned int fsr)
|
||||
}
|
||||
#else
|
||||
#define FSR_FS_AEA 22
|
||||
#define FS_L1_PERM 0xD
|
||||
#define FS_L2_PERM 0xF
|
||||
|
||||
static inline int fsr_fs(unsigned int fsr)
|
||||
{
|
||||
|
@ -36,6 +36,7 @@
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/set_memory.h>
|
||||
#include <asm/system_info.h>
|
||||
|
||||
#include <asm/mach/map.h>
|
||||
@ -401,6 +402,11 @@ __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
|
||||
__builtin_return_address(0));
|
||||
}
|
||||
|
||||
void __arm_iomem_set_ro(void __iomem *ptr, size_t size)
|
||||
{
|
||||
set_memory_ro((unsigned long)ptr, PAGE_ALIGN(size) / PAGE_SIZE);
|
||||
}
|
||||
|
||||
void *arch_memremap_wb(phys_addr_t phys_addr, size_t size)
|
||||
{
|
||||
return (__force void *)arch_ioremap_caller(phys_addr, size,
|
||||
|
@ -226,7 +226,7 @@ void __init kasan_init(void)
|
||||
BUILD_BUG_ON(pgd_index(KASAN_SHADOW_START) !=
|
||||
pgd_index(KASAN_SHADOW_END));
|
||||
memcpy(tmp_pmd_table,
|
||||
pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_START)),
|
||||
(void*)pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_START)),
|
||||
sizeof(tmp_pmd_table));
|
||||
set_pgd(&tmp_pgd_table[pgd_index(KASAN_SHADOW_START)],
|
||||
__pgd(__pa(tmp_pmd_table) | PMD_TYPE_TABLE | L_PGD_SWAPPER));
|
||||
|
@ -30,8 +30,7 @@
|
||||
* act_mm - get current->active_mm
|
||||
*/
|
||||
.macro act_mm, rd
|
||||
get_thread_info \rd
|
||||
ldr \rd, [\rd, #TI_TASK]
|
||||
get_current \rd
|
||||
.if (TSK_ACTIVE_MM > IMM12_MASK)
|
||||
add \rd, \rd, #TSK_ACTIVE_MM & ~IMM12_MASK
|
||||
.endif
|
||||
|
@ -98,7 +98,7 @@ struct test_arg_end {
|
||||
#if VERBOSE
|
||||
#define verbose(fmt, ...) pr_info(fmt, ##__VA_ARGS__)
|
||||
#else
|
||||
#define verbose(fmt, ...)
|
||||
#define verbose(fmt, ...) no_printk(fmt, ##__VA_ARGS__)
|
||||
#endif
|
||||
|
||||
#define TEST_GROUP(title) \
|
||||
|
@ -19,6 +19,7 @@
|
||||
#include <linux/clk/clk-conf.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/reset.h>
|
||||
#include <linux/of_irq.h>
|
||||
|
||||
#include <asm/irq.h>
|
||||
|
||||
@ -371,14 +372,37 @@ static void amba_device_release(struct device *dev)
|
||||
kfree(d);
|
||||
}
|
||||
|
||||
static int of_amba_device_decode_irq(struct amba_device *dev)
|
||||
{
|
||||
struct device_node *node = dev->dev.of_node;
|
||||
int i, irq = 0;
|
||||
|
||||
if (IS_ENABLED(CONFIG_OF_IRQ) && node) {
|
||||
/* Decode the IRQs and address ranges */
|
||||
for (i = 0; i < AMBA_NR_IRQS; i++) {
|
||||
irq = of_irq_get(node, i);
|
||||
if (irq < 0) {
|
||||
if (irq == -EPROBE_DEFER)
|
||||
return irq;
|
||||
irq = 0;
|
||||
}
|
||||
|
||||
dev->irq[i] = irq;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amba_device_try_add(struct amba_device *dev, struct resource *parent)
|
||||
{
|
||||
u32 size;
|
||||
void __iomem *tmp;
|
||||
int i, ret;
|
||||
|
||||
WARN_ON(dev->irq[0] == (unsigned int)-1);
|
||||
WARN_ON(dev->irq[1] == (unsigned int)-1);
|
||||
ret = of_amba_device_decode_irq(dev);
|
||||
if (ret)
|
||||
goto err_out;
|
||||
|
||||
ret = request_resource(parent, &dev->res);
|
||||
if (ret)
|
||||
@ -579,78 +603,6 @@ int amba_device_add(struct amba_device *dev, struct resource *parent)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(amba_device_add);
|
||||
|
||||
static struct amba_device *
|
||||
amba_aphb_device_add(struct device *parent, const char *name,
|
||||
resource_size_t base, size_t size, int irq1, int irq2,
|
||||
void *pdata, unsigned int periphid, u64 dma_mask,
|
||||
struct resource *resbase)
|
||||
{
|
||||
struct amba_device *dev;
|
||||
int ret;
|
||||
|
||||
dev = amba_device_alloc(name, base, size);
|
||||
if (!dev)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
dev->dev.coherent_dma_mask = dma_mask;
|
||||
dev->irq[0] = irq1;
|
||||
dev->irq[1] = irq2;
|
||||
dev->periphid = periphid;
|
||||
dev->dev.platform_data = pdata;
|
||||
dev->dev.parent = parent;
|
||||
|
||||
ret = amba_device_add(dev, resbase);
|
||||
if (ret) {
|
||||
amba_device_put(dev);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
return dev;
|
||||
}
|
||||
|
||||
struct amba_device *
|
||||
amba_apb_device_add(struct device *parent, const char *name,
|
||||
resource_size_t base, size_t size, int irq1, int irq2,
|
||||
void *pdata, unsigned int periphid)
|
||||
{
|
||||
return amba_aphb_device_add(parent, name, base, size, irq1, irq2, pdata,
|
||||
periphid, 0, &iomem_resource);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(amba_apb_device_add);
|
||||
|
||||
struct amba_device *
|
||||
amba_ahb_device_add(struct device *parent, const char *name,
|
||||
resource_size_t base, size_t size, int irq1, int irq2,
|
||||
void *pdata, unsigned int periphid)
|
||||
{
|
||||
return amba_aphb_device_add(parent, name, base, size, irq1, irq2, pdata,
|
||||
periphid, ~0ULL, &iomem_resource);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(amba_ahb_device_add);
|
||||
|
||||
struct amba_device *
|
||||
amba_apb_device_add_res(struct device *parent, const char *name,
|
||||
resource_size_t base, size_t size, int irq1,
|
||||
int irq2, void *pdata, unsigned int periphid,
|
||||
struct resource *resbase)
|
||||
{
|
||||
return amba_aphb_device_add(parent, name, base, size, irq1, irq2, pdata,
|
||||
periphid, 0, resbase);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(amba_apb_device_add_res);
|
||||
|
||||
struct amba_device *
|
||||
amba_ahb_device_add_res(struct device *parent, const char *name,
|
||||
resource_size_t base, size_t size, int irq1,
|
||||
int irq2, void *pdata, unsigned int periphid,
|
||||
struct resource *resbase)
|
||||
{
|
||||
return amba_aphb_device_add(parent, name, base, size, irq1, irq2, pdata,
|
||||
periphid, ~0ULL, resbase);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(amba_ahb_device_add_res);
|
||||
|
||||
|
||||
static void amba_device_initialize(struct amba_device *dev, const char *name)
|
||||
{
|
||||
device_initialize(&dev->dev);
|
||||
|
@ -222,7 +222,7 @@ static struct amba_device *of_amba_device_create(struct device_node *node,
|
||||
{
|
||||
struct amba_device *dev;
|
||||
const void *prop;
|
||||
int i, ret;
|
||||
int ret;
|
||||
|
||||
pr_debug("Creating amba device %pOF\n", node);
|
||||
|
||||
@ -253,10 +253,6 @@ static struct amba_device *of_amba_device_create(struct device_node *node,
|
||||
if (prop)
|
||||
dev->periphid = of_read_ulong(prop, 1);
|
||||
|
||||
/* Decode the IRQs and address ranges */
|
||||
for (i = 0; i < AMBA_NR_IRQS; i++)
|
||||
dev->irq[i] = irq_of_parse_and_map(node, i);
|
||||
|
||||
ret = of_address_to_resource(node, 0, &dev->res);
|
||||
if (ret) {
|
||||
pr_err("amba: of_address_to_resource() failed (%d) for %pOF\n",
|
||||
|
@ -122,24 +122,6 @@ struct amba_device *amba_device_alloc(const char *, resource_size_t, size_t);
|
||||
void amba_device_put(struct amba_device *);
|
||||
int amba_device_add(struct amba_device *, struct resource *);
|
||||
int amba_device_register(struct amba_device *, struct resource *);
|
||||
struct amba_device *amba_apb_device_add(struct device *parent, const char *name,
|
||||
resource_size_t base, size_t size,
|
||||
int irq1, int irq2, void *pdata,
|
||||
unsigned int periphid);
|
||||
struct amba_device *amba_ahb_device_add(struct device *parent, const char *name,
|
||||
resource_size_t base, size_t size,
|
||||
int irq1, int irq2, void *pdata,
|
||||
unsigned int periphid);
|
||||
struct amba_device *
|
||||
amba_apb_device_add_res(struct device *parent, const char *name,
|
||||
resource_size_t base, size_t size, int irq1,
|
||||
int irq2, void *pdata, unsigned int periphid,
|
||||
struct resource *resbase);
|
||||
struct amba_device *
|
||||
amba_ahb_device_add_res(struct device *parent, const char *name,
|
||||
resource_size_t base, size_t size, int irq1,
|
||||
int irq2, void *pdata, unsigned int periphid,
|
||||
struct resource *resbase);
|
||||
void amba_device_unregister(struct amba_device *);
|
||||
struct amba_device *amba_find_device(const char *, struct device *, unsigned int, unsigned int);
|
||||
int amba_request_regions(struct amba_device *, const char *);
|
||||
|
@ -4,7 +4,7 @@
|
||||
|
||||
__visible int plugin_is_GPL_compatible;
|
||||
|
||||
static unsigned int sp_mask, canary_offset;
|
||||
static unsigned int canary_offset;
|
||||
|
||||
static unsigned int arm_pertask_ssp_rtl_execute(void)
|
||||
{
|
||||
@ -13,7 +13,7 @@ static unsigned int arm_pertask_ssp_rtl_execute(void)
|
||||
for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
|
||||
const char *sym;
|
||||
rtx body;
|
||||
rtx mask, masked_sp;
|
||||
rtx current;
|
||||
|
||||
/*
|
||||
* Find a SET insn involving a SYMBOL_REF to __stack_chk_guard
|
||||
@ -30,19 +30,13 @@ static unsigned int arm_pertask_ssp_rtl_execute(void)
|
||||
|
||||
/*
|
||||
* Replace the source of the SET insn with an expression that
|
||||
* produces the address of the copy of the stack canary value
|
||||
* stored in struct thread_info
|
||||
* produces the address of the current task's stack canary value
|
||||
*/
|
||||
mask = GEN_INT(sext_hwi(sp_mask, GET_MODE_PRECISION(Pmode)));
|
||||
masked_sp = gen_reg_rtx(Pmode);
|
||||
current = gen_reg_rtx(Pmode);
|
||||
|
||||
emit_insn_before(gen_rtx_set(masked_sp,
|
||||
gen_rtx_AND(Pmode,
|
||||
stack_pointer_rtx,
|
||||
mask)),
|
||||
insn);
|
||||
emit_insn_before(gen_load_tp_hard(current), insn);
|
||||
|
||||
SET_SRC(body) = gen_rtx_PLUS(Pmode, masked_sp,
|
||||
SET_SRC(body) = gen_rtx_PLUS(Pmode, current,
|
||||
GEN_INT(canary_offset));
|
||||
}
|
||||
return 0;
|
||||
@ -72,7 +66,6 @@ __visible int plugin_init(struct plugin_name_args *plugin_info,
|
||||
const char * const plugin_name = plugin_info->base_name;
|
||||
const int argc = plugin_info->argc;
|
||||
const struct plugin_argument *argv = plugin_info->argv;
|
||||
int tso = 0;
|
||||
int i;
|
||||
|
||||
if (!plugin_default_version_check(version, &gcc_version)) {
|
||||
@ -91,11 +84,6 @@ __visible int plugin_init(struct plugin_name_args *plugin_info,
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (!strcmp(argv[i].key, "tso")) {
|
||||
tso = atoi(argv[i].value);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!strcmp(argv[i].key, "offset")) {
|
||||
canary_offset = atoi(argv[i].value);
|
||||
continue;
|
||||
@ -105,9 +93,6 @@ __visible int plugin_init(struct plugin_name_args *plugin_info,
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* create the mask that produces the base of the stack */
|
||||
sp_mask = ~((1U << (12 + tso)) - 1);
|
||||
|
||||
PASS_INFO(arm_pertask_ssp_rtl, "expand", 1, PASS_POS_INSERT_AFTER);
|
||||
|
||||
register_callback(plugin_info->base_name, PLUGIN_PASS_MANAGER_SETUP,
|
||||
|
Loading…
Reference in New Issue
Block a user