2012-03-05 11:49:32 +00:00
|
|
|
/*
|
|
|
|
* Based on arch/arm/include/asm/uaccess.h
|
|
|
|
*
|
|
|
|
* Copyright (C) 2012 ARM Ltd.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
#ifndef __ASM_UACCESS_H
|
|
|
|
#define __ASM_UACCESS_H
|
|
|
|
|
2016-07-01 13:58:21 +00:00
|
|
|
#include <asm/alternative.h>
|
2016-07-01 15:53:00 +00:00
|
|
|
#include <asm/kernel-pgtable.h>
|
2016-07-01 13:58:21 +00:00
|
|
|
#include <asm/sysreg.h>
|
|
|
|
|
2012-03-05 11:49:32 +00:00
|
|
|
/*
|
|
|
|
* User space memory access functions
|
|
|
|
*/
|
2016-10-19 13:40:54 +00:00
|
|
|
#include <linux/bitops.h>
|
2016-06-08 21:40:56 +00:00
|
|
|
#include <linux/kasan-checks.h>
|
2012-03-05 11:49:32 +00:00
|
|
|
#include <linux/string.h>
|
|
|
|
|
2015-07-22 18:05:54 +00:00
|
|
|
#include <asm/cpufeature.h>
|
2012-03-05 11:49:32 +00:00
|
|
|
#include <asm/ptrace.h>
|
|
|
|
#include <asm/memory.h>
|
2016-12-25 19:00:03 +00:00
|
|
|
#include <asm/extable.h>
|
2012-03-05 11:49:32 +00:00
|
|
|
|
|
|
|
#define get_ds() (KERNEL_DS)
|
|
|
|
#define get_fs() (current_thread_info()->addr_limit)
|
|
|
|
|
|
|
|
static inline void set_fs(mm_segment_t fs)
|
|
|
|
{
|
|
|
|
current_thread_info()->addr_limit = fs;
|
2016-02-05 14:58:48 +00:00
|
|
|
|
2018-02-05 15:34:21 +00:00
|
|
|
/*
|
|
|
|
* Prevent a mispredicted conditional call to set_fs from forwarding
|
|
|
|
* the wrong address limit to access_ok under speculation.
|
|
|
|
*/
|
|
|
|
dsb(nsh);
|
|
|
|
isb();
|
|
|
|
|
2017-06-15 01:12:03 +00:00
|
|
|
/* On user-mode return, check fs is correct */
|
|
|
|
set_thread_flag(TIF_FSCHECK);
|
|
|
|
|
2016-02-05 14:58:48 +00:00
|
|
|
/*
|
|
|
|
* Enable/disable UAO so that copy_to_user() etc can access
|
|
|
|
* kernel memory with the unprivileged instructions.
|
|
|
|
*/
|
|
|
|
if (IS_ENABLED(CONFIG_ARM64_UAO) && fs == KERNEL_DS)
|
|
|
|
asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO));
|
|
|
|
else
|
|
|
|
asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO,
|
|
|
|
CONFIG_ARM64_UAO));
|
2012-03-05 11:49:32 +00:00
|
|
|
}
|
|
|
|
|
2015-01-06 13:11:13 +00:00
|
|
|
#define segment_eq(a, b) ((a) == (b))
|
2012-03-05 11:49:32 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Test whether a block of memory is a valid user space address.
|
|
|
|
* Returns 1 if the range is valid, 0 otherwise.
|
|
|
|
*
|
|
|
|
* This is equivalent to the following test:
|
2018-02-05 15:34:18 +00:00
|
|
|
* (u65)addr + (u65)size <= (u65)current->addr_limit + 1
|
2012-03-05 11:49:32 +00:00
|
|
|
*/
|
2018-02-19 13:38:00 +00:00
|
|
|
static inline unsigned long __range_ok(const void __user *addr, unsigned long size)
|
2018-02-05 15:34:18 +00:00
|
|
|
{
|
2018-02-19 13:38:00 +00:00
|
|
|
unsigned long ret, limit = current_thread_info()->addr_limit;
|
2018-02-05 15:34:18 +00:00
|
|
|
|
|
|
|
__chk_user_ptr(addr);
|
|
|
|
asm volatile(
|
|
|
|
// A + B <= C + 1 for all A,B,C, in four easy steps:
|
|
|
|
// 1: X = A + B; X' = X % 2^64
|
2018-02-19 13:38:00 +00:00
|
|
|
" adds %0, %3, %2\n"
|
2018-02-05 15:34:18 +00:00
|
|
|
// 2: Set C = 0 if X > 2^64, to guarantee X' > C in step 4
|
|
|
|
" csel %1, xzr, %1, hi\n"
|
|
|
|
// 3: Set X' = ~0 if X >= 2^64. For X == 2^64, this decrements X'
|
|
|
|
// to compensate for the carry flag being set in step 4. For
|
|
|
|
// X > 2^64, X' merely has to remain nonzero, which it does.
|
|
|
|
" csinv %0, %0, xzr, cc\n"
|
|
|
|
// 4: For X < 2^64, this gives us X' - C - 1 <= 0, where the -1
|
|
|
|
// comes from the carry in being clear. Otherwise, we are
|
|
|
|
// testing X' - C == 0, subject to the previous adjustments.
|
|
|
|
" sbcs xzr, %0, %1\n"
|
|
|
|
" cset %0, ls\n"
|
2018-02-19 13:38:00 +00:00
|
|
|
: "=&r" (ret), "+r" (limit) : "Ir" (size), "0" (addr) : "cc");
|
2018-02-05 15:34:18 +00:00
|
|
|
|
2018-02-19 13:38:00 +00:00
|
|
|
return ret;
|
2018-02-05 15:34:18 +00:00
|
|
|
}
|
2012-03-05 11:49:32 +00:00
|
|
|
|
2016-10-19 13:40:54 +00:00
|
|
|
/*
|
2017-05-03 15:37:46 +00:00
|
|
|
* When dealing with data aborts, watchpoints, or instruction traps we may end
|
|
|
|
* up with a tagged userland pointer. Clear the tag to get a sane pointer to
|
|
|
|
* pass on to access_ok(), for instance.
|
2016-10-19 13:40:54 +00:00
|
|
|
*/
|
|
|
|
#define untagged_addr(addr) sign_extend64(addr, 55)
|
|
|
|
|
2018-02-19 13:38:00 +00:00
|
|
|
#define access_ok(type, addr, size) __range_ok(addr, size)
|
2013-11-06 17:20:22 +00:00
|
|
|
#define user_addr_max get_fs
|
2012-03-05 11:49:32 +00:00
|
|
|
|
2016-01-01 14:02:12 +00:00
|
|
|
#define _ASM_EXTABLE(from, to) \
|
|
|
|
" .pushsection __ex_table, \"a\"\n" \
|
|
|
|
" .align 3\n" \
|
|
|
|
" .long (" #from " - .), (" #to " - .)\n" \
|
|
|
|
" .popsection\n"
|
|
|
|
|
2016-07-01 13:58:21 +00:00
|
|
|
/*
|
|
|
|
* User access enabling/disabling.
|
|
|
|
*/
|
2016-07-01 15:53:00 +00:00
|
|
|
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
|
|
|
|
static inline void __uaccess_ttbr0_disable(void)
|
|
|
|
{
|
2018-01-10 13:18:30 +00:00
|
|
|
unsigned long flags, ttbr;
|
2016-07-01 15:53:00 +00:00
|
|
|
|
2018-01-10 13:18:30 +00:00
|
|
|
local_irq_save(flags);
|
2017-08-10 12:58:16 +00:00
|
|
|
ttbr = read_sysreg(ttbr1_el1);
|
2018-01-10 13:18:30 +00:00
|
|
|
ttbr &= ~TTBR_ASID_MASK;
|
2018-01-11 10:11:57 +00:00
|
|
|
/* reserved_ttbr0 placed before swapper_pg_dir */
|
|
|
|
write_sysreg(ttbr - RESERVED_TTBR0_SIZE, ttbr0_el1);
|
2017-08-10 12:58:16 +00:00
|
|
|
isb();
|
|
|
|
/* Set reserved ASID */
|
|
|
|
write_sysreg(ttbr, ttbr1_el1);
|
2016-07-01 15:53:00 +00:00
|
|
|
isb();
|
2018-01-10 13:18:30 +00:00
|
|
|
local_irq_restore(flags);
|
2016-07-01 15:53:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void __uaccess_ttbr0_enable(void)
|
|
|
|
{
|
2017-08-10 12:58:16 +00:00
|
|
|
unsigned long flags, ttbr0, ttbr1;
|
2016-07-01 15:53:00 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Disable interrupts to avoid preemption between reading the 'ttbr0'
|
|
|
|
* variable and the MSR. A context switch could trigger an ASID
|
|
|
|
* roll-over and an update of 'ttbr0'.
|
|
|
|
*/
|
|
|
|
local_irq_save(flags);
|
2018-01-10 13:18:30 +00:00
|
|
|
ttbr0 = READ_ONCE(current_thread_info()->ttbr0);
|
2017-08-10 12:58:16 +00:00
|
|
|
|
|
|
|
/* Restore active ASID */
|
|
|
|
ttbr1 = read_sysreg(ttbr1_el1);
|
2018-01-10 13:18:30 +00:00
|
|
|
ttbr1 &= ~TTBR_ASID_MASK; /* safety measure */
|
2017-12-01 17:33:48 +00:00
|
|
|
ttbr1 |= ttbr0 & TTBR_ASID_MASK;
|
2017-08-10 12:58:16 +00:00
|
|
|
write_sysreg(ttbr1, ttbr1_el1);
|
|
|
|
isb();
|
|
|
|
|
|
|
|
/* Restore user page table */
|
|
|
|
write_sysreg(ttbr0, ttbr0_el1);
|
2016-07-01 15:53:00 +00:00
|
|
|
isb();
|
|
|
|
local_irq_restore(flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool uaccess_ttbr0_disable(void)
|
|
|
|
{
|
|
|
|
if (!system_uses_ttbr0_pan())
|
|
|
|
return false;
|
|
|
|
__uaccess_ttbr0_disable();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool uaccess_ttbr0_enable(void)
|
|
|
|
{
|
|
|
|
if (!system_uses_ttbr0_pan())
|
|
|
|
return false;
|
|
|
|
__uaccess_ttbr0_enable();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
static inline bool uaccess_ttbr0_disable(void)
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool uaccess_ttbr0_enable(void)
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2018-01-08 15:38:11 +00:00
|
|
|
static inline void __uaccess_disable_hw_pan(void)
|
|
|
|
{
|
|
|
|
asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN,
|
|
|
|
CONFIG_ARM64_PAN));
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void __uaccess_enable_hw_pan(void)
|
|
|
|
{
|
|
|
|
asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,
|
|
|
|
CONFIG_ARM64_PAN));
|
|
|
|
}
|
|
|
|
|
2016-07-01 13:58:21 +00:00
|
|
|
#define __uaccess_disable(alt) \
|
|
|
|
do { \
|
2016-07-01 15:53:00 +00:00
|
|
|
if (!uaccess_ttbr0_disable()) \
|
|
|
|
asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), alt, \
|
|
|
|
CONFIG_ARM64_PAN)); \
|
2016-07-01 13:58:21 +00:00
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#define __uaccess_enable(alt) \
|
|
|
|
do { \
|
2016-12-12 13:50:26 +00:00
|
|
|
if (!uaccess_ttbr0_enable()) \
|
2016-07-01 15:53:00 +00:00
|
|
|
asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), alt, \
|
|
|
|
CONFIG_ARM64_PAN)); \
|
2016-07-01 13:58:21 +00:00
|
|
|
} while (0)
|
|
|
|
|
|
|
|
static inline void uaccess_disable(void)
|
|
|
|
{
|
|
|
|
__uaccess_disable(ARM64_HAS_PAN);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void uaccess_enable(void)
|
|
|
|
{
|
|
|
|
__uaccess_enable(ARM64_HAS_PAN);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* These functions are no-ops when UAO is present.
|
|
|
|
*/
|
|
|
|
static inline void uaccess_disable_not_uao(void)
|
|
|
|
{
|
|
|
|
__uaccess_disable(ARM64_ALT_PAN_NOT_UAO);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void uaccess_enable_not_uao(void)
|
|
|
|
{
|
|
|
|
__uaccess_enable(ARM64_ALT_PAN_NOT_UAO);
|
|
|
|
}
|
|
|
|
|
2018-02-05 15:34:19 +00:00
|
|
|
/*
|
|
|
|
* Sanitise a uaccess pointer such that it becomes NULL if above the
|
|
|
|
* current addr_limit.
|
|
|
|
*/
|
|
|
|
#define uaccess_mask_ptr(ptr) (__typeof__(ptr))__uaccess_mask_ptr(ptr)
|
|
|
|
static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
|
|
|
|
{
|
|
|
|
void __user *safe_ptr;
|
|
|
|
|
|
|
|
asm volatile(
|
|
|
|
" bics xzr, %1, %2\n"
|
|
|
|
" csel %0, %1, xzr, eq\n"
|
|
|
|
: "=&r" (safe_ptr)
|
|
|
|
: "r" (ptr), "r" (current_thread_info()->addr_limit)
|
|
|
|
: "cc");
|
|
|
|
|
|
|
|
csdb();
|
|
|
|
return safe_ptr;
|
|
|
|
}
|
|
|
|
|
2012-03-05 11:49:32 +00:00
|
|
|
/*
|
|
|
|
* The "__xxx" versions of the user access functions do not verify the address
|
|
|
|
* space - it must have been done previously with a separate "access_ok()"
|
|
|
|
* call.
|
|
|
|
*
|
|
|
|
* The "__xxx_error" versions set the third argument to -EFAULT if an error
|
|
|
|
* occurs, and leave it unchanged on success.
|
|
|
|
*/
|
2016-02-05 14:58:48 +00:00
|
|
|
#define __get_user_asm(instr, alt_instr, reg, x, addr, err, feature) \
|
2012-03-05 11:49:32 +00:00
|
|
|
asm volatile( \
|
2016-02-05 14:58:48 +00:00
|
|
|
"1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \
|
|
|
|
alt_instr " " reg "1, [%2]\n", feature) \
|
2012-03-05 11:49:32 +00:00
|
|
|
"2:\n" \
|
|
|
|
" .section .fixup, \"ax\"\n" \
|
|
|
|
" .align 2\n" \
|
|
|
|
"3: mov %w0, %3\n" \
|
|
|
|
" mov %1, #0\n" \
|
|
|
|
" b 2b\n" \
|
|
|
|
" .previous\n" \
|
2016-01-01 14:02:12 +00:00
|
|
|
_ASM_EXTABLE(1b, 3b) \
|
2012-03-05 11:49:32 +00:00
|
|
|
: "+r" (err), "=&r" (x) \
|
|
|
|
: "r" (addr), "i" (-EFAULT))
|
|
|
|
|
2018-10-10 15:55:44 +00:00
|
|
|
#define __get_user_err(x, ptr, err) \
|
2012-03-05 11:49:32 +00:00
|
|
|
do { \
|
|
|
|
unsigned long __gu_val; \
|
2018-10-10 15:55:44 +00:00
|
|
|
__chk_user_ptr(ptr); \
|
|
|
|
uaccess_enable_not_uao(); \
|
2012-03-05 11:49:32 +00:00
|
|
|
switch (sizeof(*(ptr))) { \
|
|
|
|
case 1: \
|
2016-02-05 14:58:48 +00:00
|
|
|
__get_user_asm("ldrb", "ldtrb", "%w", __gu_val, (ptr), \
|
|
|
|
(err), ARM64_HAS_UAO); \
|
2012-03-05 11:49:32 +00:00
|
|
|
break; \
|
|
|
|
case 2: \
|
2016-02-05 14:58:48 +00:00
|
|
|
__get_user_asm("ldrh", "ldtrh", "%w", __gu_val, (ptr), \
|
|
|
|
(err), ARM64_HAS_UAO); \
|
2012-03-05 11:49:32 +00:00
|
|
|
break; \
|
|
|
|
case 4: \
|
2016-02-05 14:58:48 +00:00
|
|
|
__get_user_asm("ldr", "ldtr", "%w", __gu_val, (ptr), \
|
|
|
|
(err), ARM64_HAS_UAO); \
|
2012-03-05 11:49:32 +00:00
|
|
|
break; \
|
|
|
|
case 8: \
|
arm64: uaccess: suppress spurious clang warning
Clang tries to warn when there's a mismatch between an operand's size,
and the size of the register it is held in, as this may indicate a bug.
Specifically, clang warns when the operand's type is less than 64 bits
wide, and the register is used unqualified (i.e. %N rather than %xN or
%wN).
Unfortunately clang can generate these warnings for unreachable code.
For example, for code like:
do { \
typeof(*(ptr)) __v = (v); \
switch(sizeof(*(ptr))) { \
case 1: \
// assume __v is 1 byte wide \
asm ("{op}b %w0" : : "r" (v)); \
break; \
case 8: \
// assume __v is 8 bytes wide \
asm ("{op} %0" : : "r" (v)); \
break; \
}
while (0)
... if op() were passed a char value and pointer to char, clang may
produce a warning for the unreachable case where sizeof(*(ptr)) is 8.
For the same reasons, clang produces warnings when __put_user_err() is
used for types that are less than 64 bits wide.
We could avoid this with a cast to a fixed-width type in each of the
cases. However, GCC will then warn that pointer types are being cast to
mismatched integer sizes (in unreachable paths).
Another option would be to use the same union trickery as we do for
__smp_store_release() and __smp_load_acquire(), but this is fairly
invasive.
Instead, this patch suppresses the clang warning by using an x modifier
in the assembly for the 8 byte case of __put_user_err(). No additional
work is necessary as the value has been cast to typeof(*(ptr)), so the
compiler will have performed any necessary extension for the reachable
case.
For consistency, __get_user_err() is also updated to use the x modifier
for its 8 byte case.
Acked-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reported-by: Matthias Kaehlcke <mka@chromium.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2017-05-03 15:09:38 +00:00
|
|
|
__get_user_asm("ldr", "ldtr", "%x", __gu_val, (ptr), \
|
2016-02-05 14:58:48 +00:00
|
|
|
(err), ARM64_HAS_UAO); \
|
2012-03-05 11:49:32 +00:00
|
|
|
break; \
|
|
|
|
default: \
|
|
|
|
BUILD_BUG(); \
|
|
|
|
} \
|
2018-09-06 11:09:56 +00:00
|
|
|
uaccess_disable_not_uao(); \
|
2018-10-10 15:55:44 +00:00
|
|
|
(x) = (__force __typeof__(*(ptr)))__gu_val; \
|
2018-09-06 11:09:56 +00:00
|
|
|
} while (0)
|
|
|
|
|
2018-10-10 15:55:44 +00:00
|
|
|
#define __get_user_check(x, ptr, err) \
|
2012-03-05 11:49:32 +00:00
|
|
|
({ \
|
2018-02-05 15:34:22 +00:00
|
|
|
__typeof__(*(ptr)) __user *__p = (ptr); \
|
|
|
|
might_fault(); \
|
|
|
|
if (access_ok(VERIFY_READ, __p, sizeof(*__p))) { \
|
|
|
|
__p = uaccess_mask_ptr(__p); \
|
2018-10-10 15:55:44 +00:00
|
|
|
__get_user_err((x), __p, (err)); \
|
2018-02-05 15:34:22 +00:00
|
|
|
} else { \
|
|
|
|
(x) = 0; (err) = -EFAULT; \
|
|
|
|
} \
|
2012-03-05 11:49:32 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
#define __get_user_error(x, ptr, err) \
|
|
|
|
({ \
|
2018-10-10 15:55:44 +00:00
|
|
|
__get_user_check((x), (ptr), (err)); \
|
2012-03-05 11:49:32 +00:00
|
|
|
(void)0; \
|
|
|
|
})
|
|
|
|
|
2018-02-05 15:34:22 +00:00
|
|
|
#define __get_user(x, ptr) \
|
2012-03-05 11:49:32 +00:00
|
|
|
({ \
|
2018-02-05 15:34:22 +00:00
|
|
|
int __gu_err = 0; \
|
2018-10-10 15:55:44 +00:00
|
|
|
__get_user_check((x), (ptr), __gu_err); \
|
2018-02-05 15:34:22 +00:00
|
|
|
__gu_err; \
|
2012-03-05 11:49:32 +00:00
|
|
|
})
|
|
|
|
|
2018-02-05 15:34:22 +00:00
|
|
|
#define get_user __get_user
|
|
|
|
|
2016-02-05 14:58:48 +00:00
|
|
|
#define __put_user_asm(instr, alt_instr, reg, x, addr, err, feature) \
|
2012-03-05 11:49:32 +00:00
|
|
|
asm volatile( \
|
2016-02-05 14:58:48 +00:00
|
|
|
"1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \
|
|
|
|
alt_instr " " reg "1, [%2]\n", feature) \
|
2012-03-05 11:49:32 +00:00
|
|
|
"2:\n" \
|
|
|
|
" .section .fixup,\"ax\"\n" \
|
|
|
|
" .align 2\n" \
|
|
|
|
"3: mov %w0, %3\n" \
|
|
|
|
" b 2b\n" \
|
|
|
|
" .previous\n" \
|
2016-01-01 14:02:12 +00:00
|
|
|
_ASM_EXTABLE(1b, 3b) \
|
2012-03-05 11:49:32 +00:00
|
|
|
: "+r" (err) \
|
|
|
|
: "r" (x), "r" (addr), "i" (-EFAULT))
|
|
|
|
|
2018-10-10 15:55:44 +00:00
|
|
|
#define __put_user_err(x, ptr, err) \
|
2012-03-05 11:49:32 +00:00
|
|
|
do { \
|
|
|
|
__typeof__(*(ptr)) __pu_val = (x); \
|
2018-10-10 15:55:44 +00:00
|
|
|
__chk_user_ptr(ptr); \
|
|
|
|
uaccess_enable_not_uao(); \
|
2012-03-05 11:49:32 +00:00
|
|
|
switch (sizeof(*(ptr))) { \
|
|
|
|
case 1: \
|
2016-02-05 14:58:48 +00:00
|
|
|
__put_user_asm("strb", "sttrb", "%w", __pu_val, (ptr), \
|
|
|
|
(err), ARM64_HAS_UAO); \
|
2012-03-05 11:49:32 +00:00
|
|
|
break; \
|
|
|
|
case 2: \
|
2016-02-05 14:58:48 +00:00
|
|
|
__put_user_asm("strh", "sttrh", "%w", __pu_val, (ptr), \
|
|
|
|
(err), ARM64_HAS_UAO); \
|
2012-03-05 11:49:32 +00:00
|
|
|
break; \
|
|
|
|
case 4: \
|
2016-02-05 14:58:48 +00:00
|
|
|
__put_user_asm("str", "sttr", "%w", __pu_val, (ptr), \
|
|
|
|
(err), ARM64_HAS_UAO); \
|
2012-03-05 11:49:32 +00:00
|
|
|
break; \
|
|
|
|
case 8: \
|
arm64: uaccess: suppress spurious clang warning
Clang tries to warn when there's a mismatch between an operand's size,
and the size of the register it is held in, as this may indicate a bug.
Specifically, clang warns when the operand's type is less than 64 bits
wide, and the register is used unqualified (i.e. %N rather than %xN or
%wN).
Unfortunately clang can generate these warnings for unreachable code.
For example, for code like:
do { \
typeof(*(ptr)) __v = (v); \
switch(sizeof(*(ptr))) { \
case 1: \
// assume __v is 1 byte wide \
asm ("{op}b %w0" : : "r" (v)); \
break; \
case 8: \
// assume __v is 8 bytes wide \
asm ("{op} %0" : : "r" (v)); \
break; \
}
while (0)
... if op() were passed a char value and pointer to char, clang may
produce a warning for the unreachable case where sizeof(*(ptr)) is 8.
For the same reasons, clang produces warnings when __put_user_err() is
used for types that are less than 64 bits wide.
We could avoid this with a cast to a fixed-width type in each of the
cases. However, GCC will then warn that pointer types are being cast to
mismatched integer sizes (in unreachable paths).
Another option would be to use the same union trickery as we do for
__smp_store_release() and __smp_load_acquire(), but this is fairly
invasive.
Instead, this patch suppresses the clang warning by using an x modifier
in the assembly for the 8 byte case of __put_user_err(). No additional
work is necessary as the value has been cast to typeof(*(ptr)), so the
compiler will have performed any necessary extension for the reachable
case.
For consistency, __get_user_err() is also updated to use the x modifier
for its 8 byte case.
Acked-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reported-by: Matthias Kaehlcke <mka@chromium.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2017-05-03 15:09:38 +00:00
|
|
|
__put_user_asm("str", "sttr", "%x", __pu_val, (ptr), \
|
2016-02-05 14:58:48 +00:00
|
|
|
(err), ARM64_HAS_UAO); \
|
2012-03-05 11:49:32 +00:00
|
|
|
break; \
|
|
|
|
default: \
|
|
|
|
BUILD_BUG(); \
|
|
|
|
} \
|
2016-07-01 13:58:21 +00:00
|
|
|
uaccess_disable_not_uao(); \
|
2012-03-05 11:49:32 +00:00
|
|
|
} while (0)
|
|
|
|
|
2018-10-10 15:55:44 +00:00
|
|
|
#define __put_user_check(x, ptr, err) \
|
2012-03-05 11:49:32 +00:00
|
|
|
({ \
|
2018-02-05 15:34:22 +00:00
|
|
|
__typeof__(*(ptr)) __user *__p = (ptr); \
|
|
|
|
might_fault(); \
|
|
|
|
if (access_ok(VERIFY_WRITE, __p, sizeof(*__p))) { \
|
|
|
|
__p = uaccess_mask_ptr(__p); \
|
2018-10-10 15:55:44 +00:00
|
|
|
__put_user_err((x), __p, (err)); \
|
2018-02-05 15:34:22 +00:00
|
|
|
} else { \
|
|
|
|
(err) = -EFAULT; \
|
|
|
|
} \
|
2012-03-05 11:49:32 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
#define __put_user_error(x, ptr, err) \
|
|
|
|
({ \
|
2018-10-10 15:55:44 +00:00
|
|
|
__put_user_check((x), (ptr), (err)); \
|
2012-03-05 11:49:32 +00:00
|
|
|
(void)0; \
|
|
|
|
})
|
|
|
|
|
2018-02-05 15:34:22 +00:00
|
|
|
#define __put_user(x, ptr) \
|
2012-03-05 11:49:32 +00:00
|
|
|
({ \
|
2018-02-05 15:34:22 +00:00
|
|
|
int __pu_err = 0; \
|
2018-10-10 15:55:44 +00:00
|
|
|
__put_user_check((x), (ptr), __pu_err); \
|
2018-02-05 15:34:22 +00:00
|
|
|
__pu_err; \
|
2012-03-05 11:49:32 +00:00
|
|
|
})
|
|
|
|
|
2018-02-05 15:34:22 +00:00
|
|
|
#define put_user __put_user
|
|
|
|
|
2016-06-08 21:40:56 +00:00
|
|
|
extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
|
2018-02-05 15:34:23 +00:00
|
|
|
#define raw_copy_from_user(to, from, n) \
|
|
|
|
({ \
|
|
|
|
__arch_copy_from_user((to), __uaccess_mask_ptr(from), (n)); \
|
|
|
|
})
|
|
|
|
|
2016-06-08 21:40:56 +00:00
|
|
|
extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
|
2018-02-05 15:34:23 +00:00
|
|
|
#define raw_copy_to_user(to, from, n) \
|
|
|
|
({ \
|
|
|
|
__arch_copy_to_user(__uaccess_mask_ptr(to), (from), (n)); \
|
|
|
|
})
|
|
|
|
|
|
|
|
extern unsigned long __must_check __arch_copy_in_user(void __user *to, const void __user *from, unsigned long n);
|
|
|
|
#define raw_copy_in_user(to, from, n) \
|
|
|
|
({ \
|
|
|
|
__arch_copy_in_user(__uaccess_mask_ptr(to), \
|
|
|
|
__uaccess_mask_ptr(from), (n)); \
|
|
|
|
})
|
|
|
|
|
2017-03-21 12:40:57 +00:00
|
|
|
#define INLINE_COPY_TO_USER
|
|
|
|
#define INLINE_COPY_FROM_USER
|
2012-03-05 11:49:32 +00:00
|
|
|
|
2018-02-05 15:34:23 +00:00
|
|
|
extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n);
|
|
|
|
static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n)
|
2012-03-05 11:49:32 +00:00
|
|
|
{
|
|
|
|
if (access_ok(VERIFY_WRITE, to, n))
|
2018-02-05 15:34:23 +00:00
|
|
|
n = __arch_clear_user(__uaccess_mask_ptr(to), n);
|
2012-03-05 11:49:32 +00:00
|
|
|
return n;
|
|
|
|
}
|
2018-02-05 15:34:23 +00:00
|
|
|
#define clear_user __clear_user
|
2012-03-05 11:49:32 +00:00
|
|
|
|
2013-11-06 17:20:22 +00:00
|
|
|
extern long strncpy_from_user(char *dest, const char __user *src, long count);
|
2012-03-05 11:49:32 +00:00
|
|
|
|
2013-11-06 17:20:22 +00:00
|
|
|
extern __must_check long strnlen_user(const char __user *str, long n);
|
2012-03-05 11:49:32 +00:00
|
|
|
|
2017-07-25 10:55:43 +00:00
|
|
|
#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
|
|
|
|
struct page;
|
|
|
|
void memcpy_page_flushcache(char *to, struct page *page, size_t offset, size_t len);
|
|
|
|
extern unsigned long __must_check __copy_user_flushcache(void *to, const void __user *from, unsigned long n);
|
|
|
|
|
|
|
|
static inline int __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
|
|
|
|
{
|
|
|
|
kasan_check_write(dst, size);
|
2018-02-05 15:34:23 +00:00
|
|
|
return __copy_user_flushcache(dst, __uaccess_mask_ptr(src), size);
|
2017-07-25 10:55:43 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2012-03-05 11:49:32 +00:00
|
|
|
#endif /* __ASM_UACCESS_H */
|