2019-06-03 05:44:50 +00:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
2012-03-05 11:49:32 +00:00
|
|
|
/*
|
|
|
|
* Based on arch/arm/include/asm/uaccess.h
|
|
|
|
*
|
|
|
|
* Copyright (C) 2012 ARM Ltd.
|
|
|
|
*/
|
|
|
|
#ifndef __ASM_UACCESS_H
|
|
|
|
#define __ASM_UACCESS_H
|
|
|
|
|
2016-07-01 13:58:21 +00:00
|
|
|
#include <asm/alternative.h>
|
2016-07-01 15:53:00 +00:00
|
|
|
#include <asm/kernel-pgtable.h>
|
2016-07-01 13:58:21 +00:00
|
|
|
#include <asm/sysreg.h>
|
|
|
|
|
2012-03-05 11:49:32 +00:00
|
|
|
/*
|
|
|
|
* User space memory access functions
|
|
|
|
*/
|
2016-10-19 13:40:54 +00:00
|
|
|
#include <linux/bitops.h>
|
2016-06-08 21:40:56 +00:00
|
|
|
#include <linux/kasan-checks.h>
|
2012-03-05 11:49:32 +00:00
|
|
|
#include <linux/string.h>
|
|
|
|
|
2015-07-22 18:05:54 +00:00
|
|
|
#include <asm/cpufeature.h>
|
2020-06-30 12:53:07 +00:00
|
|
|
#include <asm/mmu.h>
|
2012-03-05 11:49:32 +00:00
|
|
|
#include <asm/ptrace.h>
|
|
|
|
#include <asm/memory.h>
|
2016-12-25 19:00:03 +00:00
|
|
|
#include <asm/extable.h>
|
2012-03-05 11:49:32 +00:00
|
|
|
|
2020-12-02 13:15:53 +00:00
|
|
|
#define HAVE_GET_KERNEL_NOFAULT
|
|
|
|
|
2012-03-05 11:49:32 +00:00
|
|
|
/*
|
|
|
|
* Test whether a block of memory is a valid user space address.
|
|
|
|
* Returns 1 if the range is valid, 0 otherwise.
|
|
|
|
*
|
|
|
|
* This is equivalent to the following test:
|
arm64: uaccess: remove set_fs()
Now that the uaccess primitives dont take addr_limit into account, we
have no need to manipulate this via set_fs() and get_fs(). Remove
support for these, along with some infrastructure this renders
redundant.
We no longer need to flip UAO to access kernel memory under KERNEL_DS,
and head.S unconditionally clears UAO for all kernel configurations via
an ERET in init_kernel_el. Thus, we don't need to dynamically flip UAO,
nor do we need to context-switch it. However, we still need to adjust
PAN during SDEI entry.
Masking of __user pointers no longer needs to use the dynamic value of
addr_limit, and can use a constant derived from the maximum possible
userspace task size. A new TASK_SIZE_MAX constant is introduced for
this, which is also used by core code. In configurations supporting
52-bit VAs, this may include a region of unusable VA space above a
48-bit TTBR0 limit, but never includes any portion of TTBR1.
Note that TASK_SIZE_MAX is an exclusive limit, while USER_DS and
KERNEL_DS were inclusive limits, and is converted to a mask by
subtracting one.
As the SDEI entry code repurposes the otherwise unnecessary
pt_regs::orig_addr_limit field to store the TTBR1 of the interrupted
context, for now we rename that to pt_regs::sdei_ttbr1. In future we can
consider factoring that out.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Acked-by: James Morse <james.morse@arm.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20201202131558.39270-10-mark.rutland@arm.com
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2020-12-02 13:15:55 +00:00
|
|
|
* (u65)addr + (u65)size <= (u65)TASK_SIZE_MAX
|
2012-03-05 11:49:32 +00:00
|
|
|
*/
|
2018-02-19 13:38:00 +00:00
|
|
|
static inline unsigned long __range_ok(const void __user *addr, unsigned long size)
|
2018-02-05 15:34:18 +00:00
|
|
|
{
|
arm64: uaccess: remove set_fs()
Now that the uaccess primitives dont take addr_limit into account, we
have no need to manipulate this via set_fs() and get_fs(). Remove
support for these, along with some infrastructure this renders
redundant.
We no longer need to flip UAO to access kernel memory under KERNEL_DS,
and head.S unconditionally clears UAO for all kernel configurations via
an ERET in init_kernel_el. Thus, we don't need to dynamically flip UAO,
nor do we need to context-switch it. However, we still need to adjust
PAN during SDEI entry.
Masking of __user pointers no longer needs to use the dynamic value of
addr_limit, and can use a constant derived from the maximum possible
userspace task size. A new TASK_SIZE_MAX constant is introduced for
this, which is also used by core code. In configurations supporting
52-bit VAs, this may include a region of unusable VA space above a
48-bit TTBR0 limit, but never includes any portion of TTBR1.
Note that TASK_SIZE_MAX is an exclusive limit, while USER_DS and
KERNEL_DS were inclusive limits, and is converted to a mask by
subtracting one.
As the SDEI entry code repurposes the otherwise unnecessary
pt_regs::orig_addr_limit field to store the TTBR1 of the interrupted
context, for now we rename that to pt_regs::sdei_ttbr1. In future we can
consider factoring that out.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Acked-by: James Morse <james.morse@arm.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20201202131558.39270-10-mark.rutland@arm.com
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2020-12-02 13:15:55 +00:00
|
|
|
unsigned long ret, limit = TASK_SIZE_MAX - 1;
|
2018-02-05 15:34:18 +00:00
|
|
|
|
2019-12-05 13:57:36 +00:00
|
|
|
/*
|
|
|
|
* Asynchronous I/O running in a kernel thread does not have the
|
|
|
|
* TIF_TAGGED_ADDR flag of the process owning the mm, so always untag
|
|
|
|
* the user address before checking.
|
|
|
|
*/
|
2019-07-23 17:58:39 +00:00
|
|
|
if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI) &&
|
2019-12-05 13:57:36 +00:00
|
|
|
(current->flags & PF_KTHREAD || test_thread_flag(TIF_TAGGED_ADDR)))
|
2019-07-23 17:58:39 +00:00
|
|
|
addr = untagged_addr(addr);
|
2019-07-23 17:58:38 +00:00
|
|
|
|
2018-02-05 15:34:18 +00:00
|
|
|
__chk_user_ptr(addr);
|
|
|
|
asm volatile(
|
|
|
|
// A + B <= C + 1 for all A,B,C, in four easy steps:
|
|
|
|
// 1: X = A + B; X' = X % 2^64
|
2018-02-19 13:38:00 +00:00
|
|
|
" adds %0, %3, %2\n"
|
2018-02-05 15:34:18 +00:00
|
|
|
// 2: Set C = 0 if X > 2^64, to guarantee X' > C in step 4
|
|
|
|
" csel %1, xzr, %1, hi\n"
|
|
|
|
// 3: Set X' = ~0 if X >= 2^64. For X == 2^64, this decrements X'
|
|
|
|
// to compensate for the carry flag being set in step 4. For
|
|
|
|
// X > 2^64, X' merely has to remain nonzero, which it does.
|
|
|
|
" csinv %0, %0, xzr, cc\n"
|
|
|
|
// 4: For X < 2^64, this gives us X' - C - 1 <= 0, where the -1
|
|
|
|
// comes from the carry in being clear. Otherwise, we are
|
|
|
|
// testing X' - C == 0, subject to the previous adjustments.
|
|
|
|
" sbcs xzr, %0, %1\n"
|
|
|
|
" cset %0, ls\n"
|
2018-02-19 13:38:00 +00:00
|
|
|
: "=&r" (ret), "+r" (limit) : "Ir" (size), "0" (addr) : "cc");
|
2018-02-05 15:34:18 +00:00
|
|
|
|
2018-02-19 13:38:00 +00:00
|
|
|
return ret;
|
2018-02-05 15:34:18 +00:00
|
|
|
}
|
2012-03-05 11:49:32 +00:00
|
|
|
|
Remove 'type' argument from access_ok() function
Nobody has actually used the type (VERIFY_READ vs VERIFY_WRITE) argument
of the user address range verification function since we got rid of the
old racy i386-only code to walk page tables by hand.
It existed because the original 80386 would not honor the write protect
bit when in kernel mode, so you had to do COW by hand before doing any
user access. But we haven't supported that in a long time, and these
days the 'type' argument is a purely historical artifact.
A discussion about extending 'user_access_begin()' to do the range
checking resulted this patch, because there is no way we're going to
move the old VERIFY_xyz interface to that model. And it's best done at
the end of the merge window when I've done most of my merges, so let's
just get this done once and for all.
This patch was mostly done with a sed-script, with manual fix-ups for
the cases that weren't of the trivial 'access_ok(VERIFY_xyz' form.
There were a couple of notable cases:
- csky still had the old "verify_area()" name as an alias.
- the iter_iov code had magical hardcoded knowledge of the actual
values of VERIFY_{READ,WRITE} (not that they mattered, since nothing
really used it)
- microblaze used the type argument for a debug printout
but other than those oddities this should be a total no-op patch.
I tried to fix up all architectures, did fairly extensive grepping for
access_ok() uses, and the changes are trivial, but I may have missed
something. Any missed conversion should be trivially fixable, though.
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2019-01-04 02:57:57 +00:00
|
|
|
#define access_ok(addr, size) __range_ok(addr, size)
|
2012-03-05 11:49:32 +00:00
|
|
|
|
2016-01-01 14:02:12 +00:00
|
|
|
#define _ASM_EXTABLE(from, to) \
|
|
|
|
" .pushsection __ex_table, \"a\"\n" \
|
|
|
|
" .align 3\n" \
|
|
|
|
" .long (" #from " - .), (" #to " - .)\n" \
|
|
|
|
" .popsection\n"
|
|
|
|
|
2016-07-01 13:58:21 +00:00
|
|
|
/*
|
|
|
|
* User access enabling/disabling.
|
|
|
|
*/
|
2016-07-01 15:53:00 +00:00
|
|
|
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
|
|
|
|
static inline void __uaccess_ttbr0_disable(void)
|
|
|
|
{
|
2018-01-10 13:18:30 +00:00
|
|
|
unsigned long flags, ttbr;
|
2016-07-01 15:53:00 +00:00
|
|
|
|
2018-01-10 13:18:30 +00:00
|
|
|
local_irq_save(flags);
|
2017-08-10 12:58:16 +00:00
|
|
|
ttbr = read_sysreg(ttbr1_el1);
|
2018-01-10 13:18:30 +00:00
|
|
|
ttbr &= ~TTBR_ASID_MASK;
|
2020-11-03 10:22:29 +00:00
|
|
|
/* reserved_pg_dir placed before swapper_pg_dir */
|
|
|
|
write_sysreg(ttbr - PAGE_SIZE, ttbr0_el1);
|
2017-08-10 12:58:16 +00:00
|
|
|
isb();
|
|
|
|
/* Set reserved ASID */
|
|
|
|
write_sysreg(ttbr, ttbr1_el1);
|
2016-07-01 15:53:00 +00:00
|
|
|
isb();
|
2018-01-10 13:18:30 +00:00
|
|
|
local_irq_restore(flags);
|
2016-07-01 15:53:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void __uaccess_ttbr0_enable(void)
|
|
|
|
{
|
2017-08-10 12:58:16 +00:00
|
|
|
unsigned long flags, ttbr0, ttbr1;
|
2016-07-01 15:53:00 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Disable interrupts to avoid preemption between reading the 'ttbr0'
|
|
|
|
* variable and the MSR. A context switch could trigger an ASID
|
|
|
|
* roll-over and an update of 'ttbr0'.
|
|
|
|
*/
|
|
|
|
local_irq_save(flags);
|
2018-01-10 13:18:30 +00:00
|
|
|
ttbr0 = READ_ONCE(current_thread_info()->ttbr0);
|
2017-08-10 12:58:16 +00:00
|
|
|
|
|
|
|
/* Restore active ASID */
|
|
|
|
ttbr1 = read_sysreg(ttbr1_el1);
|
2018-01-10 13:18:30 +00:00
|
|
|
ttbr1 &= ~TTBR_ASID_MASK; /* safety measure */
|
2017-12-01 17:33:48 +00:00
|
|
|
ttbr1 |= ttbr0 & TTBR_ASID_MASK;
|
2017-08-10 12:58:16 +00:00
|
|
|
write_sysreg(ttbr1, ttbr1_el1);
|
|
|
|
isb();
|
|
|
|
|
|
|
|
/* Restore user page table */
|
|
|
|
write_sysreg(ttbr0, ttbr0_el1);
|
2016-07-01 15:53:00 +00:00
|
|
|
isb();
|
|
|
|
local_irq_restore(flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool uaccess_ttbr0_disable(void)
|
|
|
|
{
|
|
|
|
if (!system_uses_ttbr0_pan())
|
|
|
|
return false;
|
|
|
|
__uaccess_ttbr0_disable();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool uaccess_ttbr0_enable(void)
|
|
|
|
{
|
|
|
|
if (!system_uses_ttbr0_pan())
|
|
|
|
return false;
|
|
|
|
__uaccess_ttbr0_enable();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
static inline bool uaccess_ttbr0_disable(void)
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool uaccess_ttbr0_enable(void)
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2018-01-08 15:38:11 +00:00
|
|
|
static inline void __uaccess_disable_hw_pan(void)
|
|
|
|
{
|
|
|
|
asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN,
|
|
|
|
CONFIG_ARM64_PAN));
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void __uaccess_enable_hw_pan(void)
|
|
|
|
{
|
|
|
|
asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,
|
|
|
|
CONFIG_ARM64_PAN));
|
|
|
|
}
|
|
|
|
|
2020-12-22 20:01:35 +00:00
|
|
|
/*
|
|
|
|
* The Tag Check Flag (TCF) mode for MTE is per EL, hence TCF0
|
|
|
|
* affects EL0 and TCF affects EL1 irrespective of which TTBR is
|
|
|
|
* used.
|
|
|
|
* The kernel accesses TTBR0 usually with LDTR/STTR instructions
|
|
|
|
* when UAO is available, so these would act as EL0 accesses using
|
|
|
|
* TCF0.
|
|
|
|
* However futex.h code uses exclusives which would be executed as
|
|
|
|
* EL1, this can potentially cause a tag check fault even if the
|
|
|
|
* user disables TCF0.
|
|
|
|
*
|
|
|
|
* To address the problem we set the PSTATE.TCO bit in uaccess_enable()
|
|
|
|
* and reset it in uaccess_disable().
|
|
|
|
*
|
|
|
|
* The Tag check override (TCO) bit disables temporarily the tag checking
|
|
|
|
* preventing the issue.
|
|
|
|
*/
|
arm64: uaccess: rename privileged uaccess routines
We currently have many uaccess_*{enable,disable}*() variants, which
subsequent patches will cut down as part of removing set_fs() and
friends. Once this simplification is made, most uaccess routines will
only need to ensure that the user page tables are mapped in TTBR0, as is
currently dealt with by uaccess_ttbr0_{enable,disable}().
The existing uaccess_{enable,disable}() routines ensure that user page
tables are mapped in TTBR0, and also disable PAN protections, which is
necessary to be able to use atomics on user memory, but also permit
unrelated privileged accesses to access user memory.
As preparatory step, let's rename uaccess_{enable,disable}() to
uaccess_{enable,disable}_privileged(), highlighting this caveat and
discouraging wider misuse. Subsequent patches can reuse the
uaccess_{enable,disable}() naming for the common case of ensuring the
user page tables are mapped in TTBR0.
There should be no functional change as a result of this patch.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: James Morse <james.morse@arm.com>
Cc: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20201202131558.39270-5-mark.rutland@arm.com
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2020-12-02 13:15:50 +00:00
|
|
|
static inline void uaccess_disable_privileged(void)
|
2016-07-01 13:58:21 +00:00
|
|
|
{
|
2020-12-22 20:01:35 +00:00
|
|
|
asm volatile(ALTERNATIVE("nop", SET_PSTATE_TCO(0),
|
|
|
|
ARM64_MTE, CONFIG_KASAN_HW_TAGS));
|
|
|
|
|
2020-12-02 13:15:57 +00:00
|
|
|
if (uaccess_ttbr0_disable())
|
|
|
|
return;
|
2016-07-01 13:58:21 +00:00
|
|
|
|
2020-12-02 13:15:57 +00:00
|
|
|
__uaccess_enable_hw_pan();
|
2016-07-01 13:58:21 +00:00
|
|
|
}
|
|
|
|
|
2020-12-02 13:15:57 +00:00
|
|
|
static inline void uaccess_enable_privileged(void)
|
2016-07-01 13:58:21 +00:00
|
|
|
{
|
2020-12-22 20:01:35 +00:00
|
|
|
asm volatile(ALTERNATIVE("nop", SET_PSTATE_TCO(1),
|
|
|
|
ARM64_MTE, CONFIG_KASAN_HW_TAGS));
|
|
|
|
|
2020-12-02 13:15:57 +00:00
|
|
|
if (uaccess_ttbr0_enable())
|
|
|
|
return;
|
2016-07-01 13:58:21 +00:00
|
|
|
|
2020-12-02 13:15:57 +00:00
|
|
|
__uaccess_disable_hw_pan();
|
2016-07-01 13:58:21 +00:00
|
|
|
}
|
|
|
|
|
2018-02-05 15:34:19 +00:00
|
|
|
/*
|
arm64: uaccess: remove set_fs()
Now that the uaccess primitives dont take addr_limit into account, we
have no need to manipulate this via set_fs() and get_fs(). Remove
support for these, along with some infrastructure this renders
redundant.
We no longer need to flip UAO to access kernel memory under KERNEL_DS,
and head.S unconditionally clears UAO for all kernel configurations via
an ERET in init_kernel_el. Thus, we don't need to dynamically flip UAO,
nor do we need to context-switch it. However, we still need to adjust
PAN during SDEI entry.
Masking of __user pointers no longer needs to use the dynamic value of
addr_limit, and can use a constant derived from the maximum possible
userspace task size. A new TASK_SIZE_MAX constant is introduced for
this, which is also used by core code. In configurations supporting
52-bit VAs, this may include a region of unusable VA space above a
48-bit TTBR0 limit, but never includes any portion of TTBR1.
Note that TASK_SIZE_MAX is an exclusive limit, while USER_DS and
KERNEL_DS were inclusive limits, and is converted to a mask by
subtracting one.
As the SDEI entry code repurposes the otherwise unnecessary
pt_regs::orig_addr_limit field to store the TTBR1 of the interrupted
context, for now we rename that to pt_regs::sdei_ttbr1. In future we can
consider factoring that out.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Acked-by: James Morse <james.morse@arm.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20201202131558.39270-10-mark.rutland@arm.com
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2020-12-02 13:15:55 +00:00
|
|
|
* Sanitise a uaccess pointer such that it becomes NULL if above the maximum
|
|
|
|
* user address. In case the pointer is tagged (has the top byte set), untag
|
|
|
|
* the pointer before checking.
|
2018-02-05 15:34:19 +00:00
|
|
|
*/
|
|
|
|
#define uaccess_mask_ptr(ptr) (__typeof__(ptr))__uaccess_mask_ptr(ptr)
|
|
|
|
static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
|
|
|
|
{
|
|
|
|
void __user *safe_ptr;
|
|
|
|
|
|
|
|
asm volatile(
|
2019-07-23 17:58:38 +00:00
|
|
|
" bics xzr, %3, %2\n"
|
2018-02-05 15:34:19 +00:00
|
|
|
" csel %0, %1, xzr, eq\n"
|
|
|
|
: "=&r" (safe_ptr)
|
arm64: uaccess: remove set_fs()
Now that the uaccess primitives dont take addr_limit into account, we
have no need to manipulate this via set_fs() and get_fs(). Remove
support for these, along with some infrastructure this renders
redundant.
We no longer need to flip UAO to access kernel memory under KERNEL_DS,
and head.S unconditionally clears UAO for all kernel configurations via
an ERET in init_kernel_el. Thus, we don't need to dynamically flip UAO,
nor do we need to context-switch it. However, we still need to adjust
PAN during SDEI entry.
Masking of __user pointers no longer needs to use the dynamic value of
addr_limit, and can use a constant derived from the maximum possible
userspace task size. A new TASK_SIZE_MAX constant is introduced for
this, which is also used by core code. In configurations supporting
52-bit VAs, this may include a region of unusable VA space above a
48-bit TTBR0 limit, but never includes any portion of TTBR1.
Note that TASK_SIZE_MAX is an exclusive limit, while USER_DS and
KERNEL_DS were inclusive limits, and is converted to a mask by
subtracting one.
As the SDEI entry code repurposes the otherwise unnecessary
pt_regs::orig_addr_limit field to store the TTBR1 of the interrupted
context, for now we rename that to pt_regs::sdei_ttbr1. In future we can
consider factoring that out.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Acked-by: James Morse <james.morse@arm.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20201202131558.39270-10-mark.rutland@arm.com
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2020-12-02 13:15:55 +00:00
|
|
|
: "r" (ptr), "r" (TASK_SIZE_MAX - 1),
|
2019-07-23 17:58:38 +00:00
|
|
|
"r" (untagged_addr(ptr))
|
2018-02-05 15:34:19 +00:00
|
|
|
: "cc");
|
|
|
|
|
|
|
|
csdb();
|
|
|
|
return safe_ptr;
|
|
|
|
}
|
|
|
|
|
2012-03-05 11:49:32 +00:00
|
|
|
/*
|
|
|
|
* The "__xxx" versions of the user access functions do not verify the address
|
|
|
|
* space - it must have been done previously with a separate "access_ok()"
|
|
|
|
* call.
|
|
|
|
*
|
|
|
|
* The "__xxx_error" versions set the third argument to -EFAULT if an error
|
|
|
|
* occurs, and leave it unchanged on success.
|
|
|
|
*/
|
2020-12-02 13:15:53 +00:00
|
|
|
#define __get_mem_asm(load, reg, x, addr, err) \
|
2012-03-05 11:49:32 +00:00
|
|
|
asm volatile( \
|
2020-12-02 13:15:53 +00:00
|
|
|
"1: " load " " reg "1, [%2]\n" \
|
2012-03-05 11:49:32 +00:00
|
|
|
"2:\n" \
|
|
|
|
" .section .fixup, \"ax\"\n" \
|
|
|
|
" .align 2\n" \
|
|
|
|
"3: mov %w0, %3\n" \
|
|
|
|
" mov %1, #0\n" \
|
|
|
|
" b 2b\n" \
|
|
|
|
" .previous\n" \
|
2016-01-01 14:02:12 +00:00
|
|
|
_ASM_EXTABLE(1b, 3b) \
|
2012-03-05 11:49:32 +00:00
|
|
|
: "+r" (err), "=&r" (x) \
|
|
|
|
: "r" (addr), "i" (-EFAULT))
|
|
|
|
|
2020-12-02 13:15:53 +00:00
|
|
|
#define __raw_get_mem(ldr, x, ptr, err) \
|
2012-03-05 11:49:32 +00:00
|
|
|
do { \
|
|
|
|
unsigned long __gu_val; \
|
|
|
|
switch (sizeof(*(ptr))) { \
|
|
|
|
case 1: \
|
2020-12-02 13:15:53 +00:00
|
|
|
__get_mem_asm(ldr "b", "%w", __gu_val, (ptr), (err)); \
|
2012-03-05 11:49:32 +00:00
|
|
|
break; \
|
|
|
|
case 2: \
|
2020-12-02 13:15:53 +00:00
|
|
|
__get_mem_asm(ldr "h", "%w", __gu_val, (ptr), (err)); \
|
2012-03-05 11:49:32 +00:00
|
|
|
break; \
|
|
|
|
case 4: \
|
2020-12-02 13:15:53 +00:00
|
|
|
__get_mem_asm(ldr, "%w", __gu_val, (ptr), (err)); \
|
2012-03-05 11:49:32 +00:00
|
|
|
break; \
|
|
|
|
case 8: \
|
2020-12-02 13:15:53 +00:00
|
|
|
__get_mem_asm(ldr, "%x", __gu_val, (ptr), (err)); \
|
2012-03-05 11:49:32 +00:00
|
|
|
break; \
|
|
|
|
default: \
|
|
|
|
BUILD_BUG(); \
|
|
|
|
} \
|
2018-10-10 15:55:44 +00:00
|
|
|
(x) = (__force __typeof__(*(ptr)))__gu_val; \
|
2018-09-06 11:09:56 +00:00
|
|
|
} while (0)
|
|
|
|
|
2020-12-02 13:15:52 +00:00
|
|
|
#define __raw_get_user(x, ptr, err) \
|
|
|
|
do { \
|
|
|
|
__chk_user_ptr(ptr); \
|
2020-12-02 13:15:57 +00:00
|
|
|
uaccess_ttbr0_enable(); \
|
2020-12-02 13:15:53 +00:00
|
|
|
__raw_get_mem("ldtr", x, ptr, err); \
|
2020-12-02 13:15:57 +00:00
|
|
|
uaccess_ttbr0_disable(); \
|
2020-12-02 13:15:52 +00:00
|
|
|
} while (0)
|
|
|
|
|
2019-01-15 13:58:26 +00:00
|
|
|
#define __get_user_error(x, ptr, err) \
|
|
|
|
do { \
|
2018-02-05 15:34:22 +00:00
|
|
|
__typeof__(*(ptr)) __user *__p = (ptr); \
|
|
|
|
might_fault(); \
|
Remove 'type' argument from access_ok() function
Nobody has actually used the type (VERIFY_READ vs VERIFY_WRITE) argument
of the user address range verification function since we got rid of the
old racy i386-only code to walk page tables by hand.
It existed because the original 80386 would not honor the write protect
bit when in kernel mode, so you had to do COW by hand before doing any
user access. But we haven't supported that in a long time, and these
days the 'type' argument is a purely historical artifact.
A discussion about extending 'user_access_begin()' to do the range
checking resulted this patch, because there is no way we're going to
move the old VERIFY_xyz interface to that model. And it's best done at
the end of the merge window when I've done most of my merges, so let's
just get this done once and for all.
This patch was mostly done with a sed-script, with manual fix-ups for
the cases that weren't of the trivial 'access_ok(VERIFY_xyz' form.
There were a couple of notable cases:
- csky still had the old "verify_area()" name as an alias.
- the iter_iov code had magical hardcoded knowledge of the actual
values of VERIFY_{READ,WRITE} (not that they mattered, since nothing
really used it)
- microblaze used the type argument for a debug printout
but other than those oddities this should be a total no-op patch.
I tried to fix up all architectures, did fairly extensive grepping for
access_ok() uses, and the changes are trivial, but I may have missed
something. Any missed conversion should be trivially fixable, though.
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2019-01-04 02:57:57 +00:00
|
|
|
if (access_ok(__p, sizeof(*__p))) { \
|
2018-02-05 15:34:22 +00:00
|
|
|
__p = uaccess_mask_ptr(__p); \
|
2019-03-01 14:19:06 +00:00
|
|
|
__raw_get_user((x), __p, (err)); \
|
2018-02-05 15:34:22 +00:00
|
|
|
} else { \
|
2020-05-22 14:23:21 +00:00
|
|
|
(x) = (__force __typeof__(x))0; (err) = -EFAULT; \
|
2018-02-05 15:34:22 +00:00
|
|
|
} \
|
2019-01-15 13:58:26 +00:00
|
|
|
} while (0)
|
2012-03-05 11:49:32 +00:00
|
|
|
|
2018-02-05 15:34:22 +00:00
|
|
|
#define __get_user(x, ptr) \
|
2012-03-05 11:49:32 +00:00
|
|
|
({ \
|
2018-02-05 15:34:22 +00:00
|
|
|
int __gu_err = 0; \
|
2019-01-15 13:58:26 +00:00
|
|
|
__get_user_error((x), (ptr), __gu_err); \
|
2018-02-05 15:34:22 +00:00
|
|
|
__gu_err; \
|
2012-03-05 11:49:32 +00:00
|
|
|
})
|
|
|
|
|
2018-02-05 15:34:22 +00:00
|
|
|
#define get_user __get_user
|
|
|
|
|
2020-12-02 13:15:53 +00:00
|
|
|
#define __get_kernel_nofault(dst, src, type, err_label) \
|
|
|
|
do { \
|
|
|
|
int __gkn_err = 0; \
|
|
|
|
\
|
|
|
|
__raw_get_mem("ldr", *((type *)(dst)), \
|
|
|
|
(__force type *)(src), __gkn_err); \
|
|
|
|
if (unlikely(__gkn_err)) \
|
|
|
|
goto err_label; \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#define __put_mem_asm(store, reg, x, addr, err) \
|
2012-03-05 11:49:32 +00:00
|
|
|
asm volatile( \
|
2020-12-02 13:15:53 +00:00
|
|
|
"1: " store " " reg "1, [%2]\n" \
|
2012-03-05 11:49:32 +00:00
|
|
|
"2:\n" \
|
|
|
|
" .section .fixup,\"ax\"\n" \
|
|
|
|
" .align 2\n" \
|
|
|
|
"3: mov %w0, %3\n" \
|
|
|
|
" b 2b\n" \
|
|
|
|
" .previous\n" \
|
2016-01-01 14:02:12 +00:00
|
|
|
_ASM_EXTABLE(1b, 3b) \
|
2012-03-05 11:49:32 +00:00
|
|
|
: "+r" (err) \
|
|
|
|
: "r" (x), "r" (addr), "i" (-EFAULT))
|
|
|
|
|
2020-12-02 13:15:53 +00:00
|
|
|
#define __raw_put_mem(str, x, ptr, err) \
|
2012-03-05 11:49:32 +00:00
|
|
|
do { \
|
|
|
|
__typeof__(*(ptr)) __pu_val = (x); \
|
|
|
|
switch (sizeof(*(ptr))) { \
|
|
|
|
case 1: \
|
2020-12-02 13:15:53 +00:00
|
|
|
__put_mem_asm(str "b", "%w", __pu_val, (ptr), (err)); \
|
2012-03-05 11:49:32 +00:00
|
|
|
break; \
|
|
|
|
case 2: \
|
2020-12-02 13:15:53 +00:00
|
|
|
__put_mem_asm(str "h", "%w", __pu_val, (ptr), (err)); \
|
2012-03-05 11:49:32 +00:00
|
|
|
break; \
|
|
|
|
case 4: \
|
2020-12-02 13:15:53 +00:00
|
|
|
__put_mem_asm(str, "%w", __pu_val, (ptr), (err)); \
|
2012-03-05 11:49:32 +00:00
|
|
|
break; \
|
|
|
|
case 8: \
|
2020-12-02 13:15:53 +00:00
|
|
|
__put_mem_asm(str, "%x", __pu_val, (ptr), (err)); \
|
2012-03-05 11:49:32 +00:00
|
|
|
break; \
|
|
|
|
default: \
|
|
|
|
BUILD_BUG(); \
|
|
|
|
} \
|
2020-12-02 13:15:52 +00:00
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#define __raw_put_user(x, ptr, err) \
|
|
|
|
do { \
|
|
|
|
__chk_user_ptr(ptr); \
|
2020-12-02 13:15:57 +00:00
|
|
|
uaccess_ttbr0_enable(); \
|
2020-12-02 13:15:53 +00:00
|
|
|
__raw_put_mem("sttr", x, ptr, err); \
|
2020-12-02 13:15:57 +00:00
|
|
|
uaccess_ttbr0_disable(); \
|
2012-03-05 11:49:32 +00:00
|
|
|
} while (0)
|
|
|
|
|
2019-01-15 13:58:26 +00:00
|
|
|
#define __put_user_error(x, ptr, err) \
|
|
|
|
do { \
|
2018-02-05 15:34:22 +00:00
|
|
|
__typeof__(*(ptr)) __user *__p = (ptr); \
|
|
|
|
might_fault(); \
|
Remove 'type' argument from access_ok() function
Nobody has actually used the type (VERIFY_READ vs VERIFY_WRITE) argument
of the user address range verification function since we got rid of the
old racy i386-only code to walk page tables by hand.
It existed because the original 80386 would not honor the write protect
bit when in kernel mode, so you had to do COW by hand before doing any
user access. But we haven't supported that in a long time, and these
days the 'type' argument is a purely historical artifact.
A discussion about extending 'user_access_begin()' to do the range
checking resulted this patch, because there is no way we're going to
move the old VERIFY_xyz interface to that model. And it's best done at
the end of the merge window when I've done most of my merges, so let's
just get this done once and for all.
This patch was mostly done with a sed-script, with manual fix-ups for
the cases that weren't of the trivial 'access_ok(VERIFY_xyz' form.
There were a couple of notable cases:
- csky still had the old "verify_area()" name as an alias.
- the iter_iov code had magical hardcoded knowledge of the actual
values of VERIFY_{READ,WRITE} (not that they mattered, since nothing
really used it)
- microblaze used the type argument for a debug printout
but other than those oddities this should be a total no-op patch.
I tried to fix up all architectures, did fairly extensive grepping for
access_ok() uses, and the changes are trivial, but I may have missed
something. Any missed conversion should be trivially fixable, though.
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2019-01-04 02:57:57 +00:00
|
|
|
if (access_ok(__p, sizeof(*__p))) { \
|
2018-02-05 15:34:22 +00:00
|
|
|
__p = uaccess_mask_ptr(__p); \
|
2019-03-01 14:19:06 +00:00
|
|
|
__raw_put_user((x), __p, (err)); \
|
2018-02-05 15:34:22 +00:00
|
|
|
} else { \
|
|
|
|
(err) = -EFAULT; \
|
|
|
|
} \
|
2019-01-15 13:58:26 +00:00
|
|
|
} while (0)
|
2012-03-05 11:49:32 +00:00
|
|
|
|
2018-02-05 15:34:22 +00:00
|
|
|
#define __put_user(x, ptr) \
|
2012-03-05 11:49:32 +00:00
|
|
|
({ \
|
2018-02-05 15:34:22 +00:00
|
|
|
int __pu_err = 0; \
|
2019-01-15 13:58:26 +00:00
|
|
|
__put_user_error((x), (ptr), __pu_err); \
|
2018-02-05 15:34:22 +00:00
|
|
|
__pu_err; \
|
2012-03-05 11:49:32 +00:00
|
|
|
})
|
|
|
|
|
2018-02-05 15:34:22 +00:00
|
|
|
#define put_user __put_user
|
|
|
|
|
2020-12-02 13:15:53 +00:00
|
|
|
#define __put_kernel_nofault(dst, src, type, err_label) \
|
|
|
|
do { \
|
|
|
|
int __pkn_err = 0; \
|
|
|
|
\
|
|
|
|
__raw_put_mem("str", *((type *)(src)), \
|
|
|
|
(__force type *)(dst), __pkn_err); \
|
|
|
|
if (unlikely(__pkn_err)) \
|
|
|
|
goto err_label; \
|
|
|
|
} while(0)
|
|
|
|
|
2016-06-08 21:40:56 +00:00
|
|
|
extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
|
2018-02-05 15:34:23 +00:00
|
|
|
#define raw_copy_from_user(to, from, n) \
|
|
|
|
({ \
|
2019-11-20 17:07:40 +00:00
|
|
|
unsigned long __acfu_ret; \
|
2020-12-02 13:15:57 +00:00
|
|
|
uaccess_ttbr0_enable(); \
|
2019-11-20 17:07:40 +00:00
|
|
|
__acfu_ret = __arch_copy_from_user((to), \
|
|
|
|
__uaccess_mask_ptr(from), (n)); \
|
2020-12-02 13:15:57 +00:00
|
|
|
uaccess_ttbr0_disable(); \
|
2019-11-20 17:07:40 +00:00
|
|
|
__acfu_ret; \
|
2018-02-05 15:34:23 +00:00
|
|
|
})
|
|
|
|
|
2016-06-08 21:40:56 +00:00
|
|
|
extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
|
2018-02-05 15:34:23 +00:00
|
|
|
#define raw_copy_to_user(to, from, n) \
|
|
|
|
({ \
|
2019-11-20 17:07:40 +00:00
|
|
|
unsigned long __actu_ret; \
|
2020-12-02 13:15:57 +00:00
|
|
|
uaccess_ttbr0_enable(); \
|
2019-11-20 17:07:40 +00:00
|
|
|
__actu_ret = __arch_copy_to_user(__uaccess_mask_ptr(to), \
|
|
|
|
(from), (n)); \
|
2020-12-02 13:15:57 +00:00
|
|
|
uaccess_ttbr0_disable(); \
|
2019-11-20 17:07:40 +00:00
|
|
|
__actu_ret; \
|
2018-02-05 15:34:23 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
extern unsigned long __must_check __arch_copy_in_user(void __user *to, const void __user *from, unsigned long n);
|
|
|
|
#define raw_copy_in_user(to, from, n) \
|
|
|
|
({ \
|
2019-11-20 17:07:40 +00:00
|
|
|
unsigned long __aciu_ret; \
|
2020-12-02 13:15:57 +00:00
|
|
|
uaccess_ttbr0_enable(); \
|
2019-11-20 17:07:40 +00:00
|
|
|
__aciu_ret = __arch_copy_in_user(__uaccess_mask_ptr(to), \
|
|
|
|
__uaccess_mask_ptr(from), (n)); \
|
2020-12-02 13:15:57 +00:00
|
|
|
uaccess_ttbr0_disable(); \
|
2019-11-20 17:07:40 +00:00
|
|
|
__aciu_ret; \
|
2018-02-05 15:34:23 +00:00
|
|
|
})
|
|
|
|
|
2017-03-21 12:40:57 +00:00
|
|
|
#define INLINE_COPY_TO_USER
|
|
|
|
#define INLINE_COPY_FROM_USER
|
2012-03-05 11:49:32 +00:00
|
|
|
|
2018-02-05 15:34:23 +00:00
|
|
|
extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n);
|
|
|
|
static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n)
|
2012-03-05 11:49:32 +00:00
|
|
|
{
|
2019-11-20 17:07:40 +00:00
|
|
|
if (access_ok(to, n)) {
|
2020-12-02 13:15:57 +00:00
|
|
|
uaccess_ttbr0_enable();
|
2018-02-05 15:34:23 +00:00
|
|
|
n = __arch_clear_user(__uaccess_mask_ptr(to), n);
|
2020-12-02 13:15:57 +00:00
|
|
|
uaccess_ttbr0_disable();
|
2019-11-20 17:07:40 +00:00
|
|
|
}
|
2012-03-05 11:49:32 +00:00
|
|
|
return n;
|
|
|
|
}
|
2018-02-05 15:34:23 +00:00
|
|
|
#define clear_user __clear_user
|
2012-03-05 11:49:32 +00:00
|
|
|
|
2013-11-06 17:20:22 +00:00
|
|
|
extern long strncpy_from_user(char *dest, const char __user *src, long count);
|
2012-03-05 11:49:32 +00:00
|
|
|
|
2013-11-06 17:20:22 +00:00
|
|
|
extern __must_check long strnlen_user(const char __user *str, long n);
|
2012-03-05 11:49:32 +00:00
|
|
|
|
2017-07-25 10:55:43 +00:00
|
|
|
#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
|
|
|
|
struct page;
|
|
|
|
void memcpy_page_flushcache(char *to, struct page *page, size_t offset, size_t len);
|
|
|
|
extern unsigned long __must_check __copy_user_flushcache(void *to, const void __user *from, unsigned long n);
|
|
|
|
|
|
|
|
static inline int __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
|
|
|
|
{
|
|
|
|
kasan_check_write(dst, size);
|
2018-02-05 15:34:23 +00:00
|
|
|
return __copy_user_flushcache(dst, __uaccess_mask_ptr(src), size);
|
2017-07-25 10:55:43 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2012-03-05 11:49:32 +00:00
|
|
|
#endif /* __ASM_UACCESS_H */
|