forked from Minki/linux
42a886af72
Most users by far do not care about the exact return value (they only
really care about whether the copy succeeded in its entirety or not),
but a few special core routines actually care deeply about exactly how
many bytes were copied from user space.
And the unrolled versions of the x86-64 user copy routines would
sometimes report that it had copied more bytes than it actually had.
Very few uses actually have partial copies to begin with, but to make
this bug even harder to trigger, most x86 CPU's use the "rep string"
instructions for normal user copies, and that version didn't have this
issue.
To make it even harder to hit, the one user of this that really cared
about the return value (and used the uncached version of the copy that
doesn't use the "rep string" instructions) was the generic write
routine, which pre-populated its source, once more hiding the problem by
avoiding the exception case that triggers the bug.
In other words, very special thanks to Bron Gondwana who not only
triggered this, but created a test-program to show it, and bisected the
behavior down to commit 08291429cf
("mm:
fix pagecache write deadlocks") which changed the access pattern just
enough that you can now trigger it with 'writev()' with multiple
iovec's.
That commit itself was not the cause of the bug, it just allowed all the
stars to align just right that you could trigger the problem.
[ Side note: this is just the minimal fix to make the copy routines
(with __copy_from_user_inatomic_nocache as the particular version that
was involved in showing this) have the right return values.
We really should improve on the exceptional case further - to make the
copy do a byte-accurate copy up to the exact page limit that causes it
to fail. As it is, the callers have to do extra work to handle the
limit case gracefully. ]
Reported-by: Bron Gondwana <brong@fastmail.fm>
Cc: Nick Piggin <npiggin@suse.de>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Al Viro <viro@ZenIV.linux.org.uk>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
(which didn't have this problem), and since
most users that do the carethis was very hard to trigger, but
216 lines
4.1 KiB
ArmAsm
216 lines
4.1 KiB
ArmAsm
/* Copyright 2002 Andi Kleen, SuSE Labs.
|
|
* Subject to the GNU Public License v2.
|
|
*
|
|
* Functions to copy from and to user space.
|
|
*/
|
|
|
|
#include <linux/linkage.h>
|
|
#include <asm/dwarf2.h>
|
|
|
|
#define FIX_ALIGNMENT 1
|
|
|
|
#include <asm/current.h>
|
|
#include <asm/asm-offsets.h>
|
|
#include <asm/thread_info.h>
|
|
#include <asm/cpufeature.h>
|
|
|
|
/*
|
|
* copy_user_nocache - Uncached memory copy with exception handling
|
|
* This will force destination/source out of cache for more performance.
|
|
*
|
|
* Input:
|
|
* rdi destination
|
|
* rsi source
|
|
* rdx count
|
|
* rcx zero flag when 1 zero on exception
|
|
*
|
|
* Output:
|
|
* eax uncopied bytes or 0 if successful.
|
|
*/
|
|
ENTRY(__copy_user_nocache)
|
|
CFI_STARTPROC
|
|
pushq %rbx
|
|
CFI_ADJUST_CFA_OFFSET 8
|
|
CFI_REL_OFFSET rbx, 0
|
|
pushq %rcx /* save zero flag */
|
|
CFI_ADJUST_CFA_OFFSET 8
|
|
CFI_REL_OFFSET rcx, 0
|
|
|
|
xorl %eax,%eax /* zero for the exception handler */
|
|
|
|
#ifdef FIX_ALIGNMENT
|
|
/* check for bad alignment of destination */
|
|
movl %edi,%ecx
|
|
andl $7,%ecx
|
|
jnz .Lbad_alignment
|
|
.Lafter_bad_alignment:
|
|
#endif
|
|
|
|
movq %rdx,%rcx
|
|
|
|
movl $64,%ebx
|
|
shrq $6,%rdx
|
|
decq %rdx
|
|
js .Lhandle_tail
|
|
|
|
.p2align 4
|
|
.Lloop:
|
|
.Ls1: movq (%rsi),%r11
|
|
.Ls2: movq 1*8(%rsi),%r8
|
|
.Ls3: movq 2*8(%rsi),%r9
|
|
.Ls4: movq 3*8(%rsi),%r10
|
|
.Ld1: movnti %r11,(%rdi)
|
|
.Ld2: movnti %r8,1*8(%rdi)
|
|
.Ld3: movnti %r9,2*8(%rdi)
|
|
.Ld4: movnti %r10,3*8(%rdi)
|
|
|
|
.Ls5: movq 4*8(%rsi),%r11
|
|
.Ls6: movq 5*8(%rsi),%r8
|
|
.Ls7: movq 6*8(%rsi),%r9
|
|
.Ls8: movq 7*8(%rsi),%r10
|
|
.Ld5: movnti %r11,4*8(%rdi)
|
|
.Ld6: movnti %r8,5*8(%rdi)
|
|
.Ld7: movnti %r9,6*8(%rdi)
|
|
.Ld8: movnti %r10,7*8(%rdi)
|
|
|
|
dec %rdx
|
|
|
|
leaq 64(%rsi),%rsi
|
|
leaq 64(%rdi),%rdi
|
|
|
|
jns .Lloop
|
|
|
|
.p2align 4
|
|
.Lhandle_tail:
|
|
movl %ecx,%edx
|
|
andl $63,%ecx
|
|
shrl $3,%ecx
|
|
jz .Lhandle_7
|
|
movl $8,%ebx
|
|
.p2align 4
|
|
.Lloop_8:
|
|
.Ls9: movq (%rsi),%r8
|
|
.Ld9: movnti %r8,(%rdi)
|
|
decl %ecx
|
|
leaq 8(%rdi),%rdi
|
|
leaq 8(%rsi),%rsi
|
|
jnz .Lloop_8
|
|
|
|
.Lhandle_7:
|
|
movl %edx,%ecx
|
|
andl $7,%ecx
|
|
jz .Lende
|
|
.p2align 4
|
|
.Lloop_1:
|
|
.Ls10: movb (%rsi),%bl
|
|
.Ld10: movb %bl,(%rdi)
|
|
incq %rdi
|
|
incq %rsi
|
|
decl %ecx
|
|
jnz .Lloop_1
|
|
|
|
CFI_REMEMBER_STATE
|
|
.Lende:
|
|
popq %rcx
|
|
CFI_ADJUST_CFA_OFFSET -8
|
|
CFI_RESTORE %rcx
|
|
popq %rbx
|
|
CFI_ADJUST_CFA_OFFSET -8
|
|
CFI_RESTORE rbx
|
|
sfence
|
|
ret
|
|
CFI_RESTORE_STATE
|
|
|
|
#ifdef FIX_ALIGNMENT
|
|
/* align destination */
|
|
.p2align 4
|
|
.Lbad_alignment:
|
|
movl $8,%r9d
|
|
subl %ecx,%r9d
|
|
movl %r9d,%ecx
|
|
cmpq %r9,%rdx
|
|
jz .Lhandle_7
|
|
js .Lhandle_7
|
|
.Lalign_1:
|
|
.Ls11: movb (%rsi),%bl
|
|
.Ld11: movb %bl,(%rdi)
|
|
incq %rsi
|
|
incq %rdi
|
|
decl %ecx
|
|
jnz .Lalign_1
|
|
subq %r9,%rdx
|
|
jmp .Lafter_bad_alignment
|
|
#endif
|
|
|
|
/* table sorted by exception address */
|
|
.section __ex_table,"a"
|
|
.align 8
|
|
.quad .Ls1,.Ls1e /* .Ls[1-4] - 0 bytes copied */
|
|
.quad .Ls2,.Ls1e
|
|
.quad .Ls3,.Ls1e
|
|
.quad .Ls4,.Ls1e
|
|
.quad .Ld1,.Ls1e /* .Ld[1-4] - 0..24 bytes coped */
|
|
.quad .Ld2,.Ls2e
|
|
.quad .Ld3,.Ls3e
|
|
.quad .Ld4,.Ls4e
|
|
.quad .Ls5,.Ls5e /* .Ls[5-8] - 32 bytes copied */
|
|
.quad .Ls6,.Ls5e
|
|
.quad .Ls7,.Ls5e
|
|
.quad .Ls8,.Ls5e
|
|
.quad .Ld5,.Ls5e /* .Ld[5-8] - 32..56 bytes copied */
|
|
.quad .Ld6,.Ls6e
|
|
.quad .Ld7,.Ls7e
|
|
.quad .Ld8,.Ls8e
|
|
.quad .Ls9,.Le_quad
|
|
.quad .Ld9,.Le_quad
|
|
.quad .Ls10,.Le_byte
|
|
.quad .Ld10,.Le_byte
|
|
#ifdef FIX_ALIGNMENT
|
|
.quad .Ls11,.Lzero_rest
|
|
.quad .Ld11,.Lzero_rest
|
|
#endif
|
|
.quad .Le5,.Le_zero
|
|
.previous
|
|
|
|
/* eax: zero, ebx: 64 */
|
|
.Ls1e: addl $8,%eax /* eax: bytes left uncopied: Ls1e: 64 .. Ls8e: 8 */
|
|
.Ls2e: addl $8,%eax
|
|
.Ls3e: addl $8,%eax
|
|
.Ls4e: addl $8,%eax
|
|
.Ls5e: addl $8,%eax
|
|
.Ls6e: addl $8,%eax
|
|
.Ls7e: addl $8,%eax
|
|
.Ls8e: addl $8,%eax
|
|
addq %rbx,%rdi /* +64 */
|
|
subq %rax,%rdi /* correct destination with computed offset */
|
|
|
|
shlq $6,%rdx /* loop counter * 64 (stride length) */
|
|
addq %rax,%rdx /* add offset to loopcnt */
|
|
andl $63,%ecx /* remaining bytes */
|
|
addq %rcx,%rdx /* add them */
|
|
jmp .Lzero_rest
|
|
|
|
/* exception on quad word loop in tail handling */
|
|
/* ecx: loopcnt/8, %edx: length, rdi: correct */
|
|
.Le_quad:
|
|
shll $3,%ecx
|
|
andl $7,%edx
|
|
addl %ecx,%edx
|
|
/* edx: bytes to zero, rdi: dest, eax:zero */
|
|
.Lzero_rest:
|
|
cmpl $0,(%rsp) /* zero flag set? */
|
|
jz .Le_zero
|
|
movq %rdx,%rcx
|
|
.Le_byte:
|
|
xorl %eax,%eax
|
|
.Le5: rep
|
|
stosb
|
|
/* when there is another exception while zeroing the rest just return */
|
|
.Le_zero:
|
|
movq %rdx,%rax
|
|
jmp .Lende
|
|
CFI_ENDPROC
|
|
ENDPROC(__copy_user_nocache)
|
|
|
|
|