Al reminds us that the usercopy API must only return complete failure if absolutely nothing could be copied. Currently, if userspace does something silly like giving us an unaligned pointer to Device memory, or a size which overruns MTE tag bounds, we may fail to honour that requirement when faulting on a multi-byte access even though a smaller access could have succeeded. Add a mitigation to the fixup routines to fall back to a single-byte copy if we faulted on a larger access before anything has been written to the destination, to guarantee making *some* forward progress. We needn't be too concerned about the overall performance since this should only occur when callers are doing something a bit dodgy in the first place. Particularly broken userspace might still be able to trick generic_perform_write() into an infinite loop by targeting write() at an mmap() of some read-only device register where the fault-in load succeeds but any store synchronously aborts such that copy_to_user() is genuinely unable to make progress, but, well, don't do that... CC: stable@vger.kernel.org Reported-by: Chen Huang <chenhuang5@huawei.com> Suggested-by: Al Viro <viro@zeniv.linux.org.uk> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Robin Murphy <robin.murphy@arm.com> Link: https://lore.kernel.org/r/dc03d5c675731a1f24a62417dba5429ad744234e.1626098433.git.robin.murphy@arm.com Signed-off-by: Will Deacon <will@kernel.org>
78 lines
1.5 KiB
ArmAsm
78 lines
1.5 KiB
ArmAsm
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/*
|
|
* Copy from user space to user space
|
|
*
|
|
* Copyright (C) 2012 ARM Ltd.
|
|
*/
|
|
|
|
#include <linux/linkage.h>
|
|
|
|
#include <asm/asm-uaccess.h>
|
|
#include <asm/assembler.h>
|
|
#include <asm/cache.h>
|
|
|
|
/*
|
|
* Copy from user space to user space (alignment handled by the hardware)
|
|
*
|
|
* Parameters:
|
|
* x0 - to
|
|
* x1 - from
|
|
* x2 - n
|
|
* Returns:
|
|
* x0 - bytes not copied
|
|
*/
|
|
.macro ldrb1 reg, ptr, val
|
|
user_ldst 9998f, ldtrb, \reg, \ptr, \val
|
|
.endm
|
|
|
|
.macro strb1 reg, ptr, val
|
|
user_ldst 9998f, sttrb, \reg, \ptr, \val
|
|
.endm
|
|
|
|
.macro ldrh1 reg, ptr, val
|
|
user_ldst 9997f, ldtrh, \reg, \ptr, \val
|
|
.endm
|
|
|
|
.macro strh1 reg, ptr, val
|
|
user_ldst 9997f, sttrh, \reg, \ptr, \val
|
|
.endm
|
|
|
|
.macro ldr1 reg, ptr, val
|
|
user_ldst 9997f, ldtr, \reg, \ptr, \val
|
|
.endm
|
|
|
|
.macro str1 reg, ptr, val
|
|
user_ldst 9997f, sttr, \reg, \ptr, \val
|
|
.endm
|
|
|
|
.macro ldp1 reg1, reg2, ptr, val
|
|
user_ldp 9997f, \reg1, \reg2, \ptr, \val
|
|
.endm
|
|
|
|
.macro stp1 reg1, reg2, ptr, val
|
|
user_stp 9997f, \reg1, \reg2, \ptr, \val
|
|
.endm
|
|
|
|
end .req x5
|
|
srcin .req x15
|
|
SYM_FUNC_START(__arch_copy_in_user)
|
|
add end, x0, x2
|
|
mov srcin, x1
|
|
#include "copy_template.S"
|
|
mov x0, #0
|
|
ret
|
|
SYM_FUNC_END(__arch_copy_in_user)
|
|
EXPORT_SYMBOL(__arch_copy_in_user)
|
|
|
|
.section .fixup,"ax"
|
|
.align 2
|
|
9997: cmp dst, dstin
|
|
b.ne 9998f
|
|
// Before being absolutely sure we couldn't copy anything, try harder
|
|
USER(9998f, ldtrb tmp1w, [srcin])
|
|
USER(9998f, sttrb tmp1w, [dst])
|
|
add dst, dst, #1
|
|
9998: sub x0, end, dst // bytes not copied
|
|
ret
|
|
.previous
|