Up until now we have always paid attention to make sure the length of the new instruction replacing the old one is at least less or equal to the length of the old instruction. If the new instruction is longer, at the time it replaces the old instruction it will overwrite the beginning of the next instruction in the kernel image and cause your pants to catch fire. So instead of having to pay attention, teach the alternatives framework to pad shorter old instructions with NOPs at buildtime - but only in the case when len(old instruction(s)) < len(new instruction(s)) and add nothing in the >= case. (In that case we do add_nops() when patching). This way the alternatives user shouldn't have to care about instruction sizes and simply use the macros. Add asm ALTERNATIVE* flavor macros too, while at it. Also, we need to save the pad length in a separate struct alt_instr member for NOP optimization and the way to do that reliably is to carry the pad length instead of trying to detect whether we're looking at single-byte NOPs or at pathological instruction offsets like e9 90 90 90 90, for example, which is a valid instruction. Thanks to Michael Matz for the great help with toolchain questions. Signed-off-by: Borislav Petkov <bp@suse.de>
111 lines
2.1 KiB
ArmAsm
111 lines
2.1 KiB
ArmAsm
/* Written 2003 by Andi Kleen, based on a kernel by Evandro Menezes */
|
|
|
|
#include <linux/linkage.h>
|
|
#include <asm/dwarf2.h>
|
|
#include <asm/alternative-asm.h>
|
|
|
|
ALIGN
|
|
copy_page_rep:
|
|
CFI_STARTPROC
|
|
movl $4096/8, %ecx
|
|
rep movsq
|
|
ret
|
|
CFI_ENDPROC
|
|
ENDPROC(copy_page_rep)
|
|
|
|
/*
|
|
* Don't use streaming copy unless the CPU indicates X86_FEATURE_REP_GOOD.
|
|
* Could vary the prefetch distance based on SMP/UP.
|
|
*/
|
|
|
|
ENTRY(copy_page)
|
|
CFI_STARTPROC
|
|
subq $2*8, %rsp
|
|
CFI_ADJUST_CFA_OFFSET 2*8
|
|
movq %rbx, (%rsp)
|
|
CFI_REL_OFFSET rbx, 0
|
|
movq %r12, 1*8(%rsp)
|
|
CFI_REL_OFFSET r12, 1*8
|
|
|
|
movl $(4096/64)-5, %ecx
|
|
.p2align 4
|
|
.Loop64:
|
|
dec %rcx
|
|
movq 0x8*0(%rsi), %rax
|
|
movq 0x8*1(%rsi), %rbx
|
|
movq 0x8*2(%rsi), %rdx
|
|
movq 0x8*3(%rsi), %r8
|
|
movq 0x8*4(%rsi), %r9
|
|
movq 0x8*5(%rsi), %r10
|
|
movq 0x8*6(%rsi), %r11
|
|
movq 0x8*7(%rsi), %r12
|
|
|
|
prefetcht0 5*64(%rsi)
|
|
|
|
movq %rax, 0x8*0(%rdi)
|
|
movq %rbx, 0x8*1(%rdi)
|
|
movq %rdx, 0x8*2(%rdi)
|
|
movq %r8, 0x8*3(%rdi)
|
|
movq %r9, 0x8*4(%rdi)
|
|
movq %r10, 0x8*5(%rdi)
|
|
movq %r11, 0x8*6(%rdi)
|
|
movq %r12, 0x8*7(%rdi)
|
|
|
|
leaq 64 (%rsi), %rsi
|
|
leaq 64 (%rdi), %rdi
|
|
|
|
jnz .Loop64
|
|
|
|
movl $5, %ecx
|
|
.p2align 4
|
|
.Loop2:
|
|
decl %ecx
|
|
|
|
movq 0x8*0(%rsi), %rax
|
|
movq 0x8*1(%rsi), %rbx
|
|
movq 0x8*2(%rsi), %rdx
|
|
movq 0x8*3(%rsi), %r8
|
|
movq 0x8*4(%rsi), %r9
|
|
movq 0x8*5(%rsi), %r10
|
|
movq 0x8*6(%rsi), %r11
|
|
movq 0x8*7(%rsi), %r12
|
|
|
|
movq %rax, 0x8*0(%rdi)
|
|
movq %rbx, 0x8*1(%rdi)
|
|
movq %rdx, 0x8*2(%rdi)
|
|
movq %r8, 0x8*3(%rdi)
|
|
movq %r9, 0x8*4(%rdi)
|
|
movq %r10, 0x8*5(%rdi)
|
|
movq %r11, 0x8*6(%rdi)
|
|
movq %r12, 0x8*7(%rdi)
|
|
|
|
leaq 64(%rdi), %rdi
|
|
leaq 64(%rsi), %rsi
|
|
jnz .Loop2
|
|
|
|
movq (%rsp), %rbx
|
|
CFI_RESTORE rbx
|
|
movq 1*8(%rsp), %r12
|
|
CFI_RESTORE r12
|
|
addq $2*8, %rsp
|
|
CFI_ADJUST_CFA_OFFSET -2*8
|
|
ret
|
|
.Lcopy_page_end:
|
|
CFI_ENDPROC
|
|
ENDPROC(copy_page)
|
|
|
|
/* Some CPUs run faster using the string copy instructions.
|
|
It is also a lot simpler. Use this when possible */
|
|
|
|
#include <asm/cpufeature.h>
|
|
|
|
.section .altinstr_replacement,"ax"
|
|
1: .byte 0xeb /* jmp <disp8> */
|
|
.byte (copy_page_rep - copy_page) - (2f - 1b) /* offset */
|
|
2:
|
|
.previous
|
|
.section .altinstructions,"a"
|
|
altinstruction_entry copy_page, 1b, X86_FEATURE_REP_GOOD, \
|
|
.Lcopy_page_end-copy_page, 2b-1b, 0
|
|
.previous
|