Merge branch 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 asm updates from Ingo Molnar: - Add UMIP emulation/spoofing for 64-bit processes as well, because of Wine based gaming. - Clean up symbols/labels in low level asm code - Add an assembly optimized mul_u64_u32_div() implementation on x86-64. * 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/umip: Add emulation (spoofing) for UMIP covered instructions in 64-bit processes as well x86/asm: Make some functions local labels x86/asm/suspend: Get rid of bogus_64_magic x86/math64: Provide a sane mul_u64_u32_div() implementation for x86_64
This commit is contained in:
commit
df4c0b18f2
@ -140,7 +140,7 @@ ENTRY(startup_32)
|
|||||||
/*
|
/*
|
||||||
* Jump to the relocated address.
|
* Jump to the relocated address.
|
||||||
*/
|
*/
|
||||||
leal relocated(%ebx), %eax
|
leal .Lrelocated(%ebx), %eax
|
||||||
jmp *%eax
|
jmp *%eax
|
||||||
ENDPROC(startup_32)
|
ENDPROC(startup_32)
|
||||||
|
|
||||||
@ -209,7 +209,7 @@ ENDPROC(efi32_stub_entry)
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
.text
|
.text
|
||||||
relocated:
|
.Lrelocated:
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Clear BSS (stack is currently empty)
|
* Clear BSS (stack is currently empty)
|
||||||
|
@ -87,7 +87,7 @@ ENTRY(startup_32)
|
|||||||
|
|
||||||
call verify_cpu
|
call verify_cpu
|
||||||
testl %eax, %eax
|
testl %eax, %eax
|
||||||
jnz no_longmode
|
jnz .Lno_longmode
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Compute the delta between where we were compiled to run at
|
* Compute the delta between where we were compiled to run at
|
||||||
@ -322,7 +322,7 @@ ENTRY(startup_64)
|
|||||||
1: popq %rdi
|
1: popq %rdi
|
||||||
subq $1b, %rdi
|
subq $1b, %rdi
|
||||||
|
|
||||||
call adjust_got
|
call .Ladjust_got
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* At this point we are in long mode with 4-level paging enabled,
|
* At this point we are in long mode with 4-level paging enabled,
|
||||||
@ -421,7 +421,7 @@ trampoline_return:
|
|||||||
|
|
||||||
/* The new adjustment is the relocation address */
|
/* The new adjustment is the relocation address */
|
||||||
movq %rbx, %rdi
|
movq %rbx, %rdi
|
||||||
call adjust_got
|
call .Ladjust_got
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Copy the compressed kernel to the end of our buffer
|
* Copy the compressed kernel to the end of our buffer
|
||||||
@ -440,7 +440,7 @@ trampoline_return:
|
|||||||
/*
|
/*
|
||||||
* Jump to the relocated address.
|
* Jump to the relocated address.
|
||||||
*/
|
*/
|
||||||
leaq relocated(%rbx), %rax
|
leaq .Lrelocated(%rbx), %rax
|
||||||
jmp *%rax
|
jmp *%rax
|
||||||
|
|
||||||
#ifdef CONFIG_EFI_STUB
|
#ifdef CONFIG_EFI_STUB
|
||||||
@ -511,7 +511,7 @@ ENDPROC(efi64_stub_entry)
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
.text
|
.text
|
||||||
relocated:
|
.Lrelocated:
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Clear BSS (stack is currently empty)
|
* Clear BSS (stack is currently empty)
|
||||||
@ -548,7 +548,7 @@ relocated:
|
|||||||
* first time we touch GOT).
|
* first time we touch GOT).
|
||||||
* RDI is the new adjustment to apply.
|
* RDI is the new adjustment to apply.
|
||||||
*/
|
*/
|
||||||
adjust_got:
|
.Ladjust_got:
|
||||||
/* Walk through the GOT adding the address to the entries */
|
/* Walk through the GOT adding the address to the entries */
|
||||||
leaq _got(%rip), %rdx
|
leaq _got(%rip), %rdx
|
||||||
leaq _egot(%rip), %rcx
|
leaq _egot(%rip), %rcx
|
||||||
@ -622,7 +622,7 @@ ENTRY(trampoline_32bit_src)
|
|||||||
movl %eax, %cr4
|
movl %eax, %cr4
|
||||||
|
|
||||||
/* Calculate address of paging_enabled() once we are executing in the trampoline */
|
/* Calculate address of paging_enabled() once we are executing in the trampoline */
|
||||||
leal paging_enabled - trampoline_32bit_src + TRAMPOLINE_32BIT_CODE_OFFSET(%ecx), %eax
|
leal .Lpaging_enabled - trampoline_32bit_src + TRAMPOLINE_32BIT_CODE_OFFSET(%ecx), %eax
|
||||||
|
|
||||||
/* Prepare the stack for far return to Long Mode */
|
/* Prepare the stack for far return to Long Mode */
|
||||||
pushl $__KERNEL_CS
|
pushl $__KERNEL_CS
|
||||||
@ -635,7 +635,7 @@ ENTRY(trampoline_32bit_src)
|
|||||||
lret
|
lret
|
||||||
|
|
||||||
.code64
|
.code64
|
||||||
paging_enabled:
|
.Lpaging_enabled:
|
||||||
/* Return from the trampoline */
|
/* Return from the trampoline */
|
||||||
jmp *%rdi
|
jmp *%rdi
|
||||||
|
|
||||||
@ -647,7 +647,7 @@ paging_enabled:
|
|||||||
.org trampoline_32bit_src + TRAMPOLINE_32BIT_CODE_SIZE
|
.org trampoline_32bit_src + TRAMPOLINE_32BIT_CODE_SIZE
|
||||||
|
|
||||||
.code32
|
.code32
|
||||||
no_longmode:
|
.Lno_longmode:
|
||||||
/* This isn't an x86-64 CPU, so hang intentionally, we cannot continue */
|
/* This isn't an x86-64 CPU, so hang intentionally, we cannot continue */
|
||||||
1:
|
1:
|
||||||
hlt
|
hlt
|
||||||
|
@ -1058,10 +1058,10 @@ ENTRY(native_load_gs_index)
|
|||||||
ENDPROC(native_load_gs_index)
|
ENDPROC(native_load_gs_index)
|
||||||
EXPORT_SYMBOL(native_load_gs_index)
|
EXPORT_SYMBOL(native_load_gs_index)
|
||||||
|
|
||||||
_ASM_EXTABLE(.Lgs_change, bad_gs)
|
_ASM_EXTABLE(.Lgs_change, .Lbad_gs)
|
||||||
.section .fixup, "ax"
|
.section .fixup, "ax"
|
||||||
/* running with kernelgs */
|
/* running with kernelgs */
|
||||||
bad_gs:
|
.Lbad_gs:
|
||||||
SWAPGS /* switch back to user gs */
|
SWAPGS /* switch back to user gs */
|
||||||
.macro ZAP_GS
|
.macro ZAP_GS
|
||||||
/* This can't be a string because the preprocessor needs to see it. */
|
/* This can't be a string because the preprocessor needs to see it. */
|
||||||
|
@ -73,6 +73,19 @@ static inline u64 mul_u32_u32(u32 a, u32 b)
|
|||||||
|
|
||||||
#else
|
#else
|
||||||
# include <asm-generic/div64.h>
|
# include <asm-generic/div64.h>
|
||||||
|
|
||||||
|
static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 div)
|
||||||
|
{
|
||||||
|
u64 q;
|
||||||
|
|
||||||
|
asm ("mulq %2; divq %3" : "=a" (q)
|
||||||
|
: "a" (a), "rm" ((u64)mul), "rm" ((u64)div)
|
||||||
|
: "rdx");
|
||||||
|
|
||||||
|
return q;
|
||||||
|
}
|
||||||
|
#define mul_u64_u32_div mul_u64_u32_div
|
||||||
|
|
||||||
#endif /* CONFIG_X86_32 */
|
#endif /* CONFIG_X86_32 */
|
||||||
|
|
||||||
#endif /* _ASM_X86_DIV64_H */
|
#endif /* _ASM_X86_DIV64_H */
|
||||||
|
@ -18,8 +18,13 @@ ENTRY(wakeup_long64)
|
|||||||
movq saved_magic, %rax
|
movq saved_magic, %rax
|
||||||
movq $0x123456789abcdef0, %rdx
|
movq $0x123456789abcdef0, %rdx
|
||||||
cmpq %rdx, %rax
|
cmpq %rdx, %rax
|
||||||
jne bogus_64_magic
|
je 2f
|
||||||
|
|
||||||
|
/* stop here on a saved_magic mismatch */
|
||||||
|
movq $0xbad6d61676963, %rcx
|
||||||
|
1:
|
||||||
|
jmp 1b
|
||||||
|
2:
|
||||||
movw $__KERNEL_DS, %ax
|
movw $__KERNEL_DS, %ax
|
||||||
movw %ax, %ss
|
movw %ax, %ss
|
||||||
movw %ax, %ds
|
movw %ax, %ds
|
||||||
@ -37,9 +42,6 @@ ENTRY(wakeup_long64)
|
|||||||
jmp *%rax
|
jmp *%rax
|
||||||
ENDPROC(wakeup_long64)
|
ENDPROC(wakeup_long64)
|
||||||
|
|
||||||
bogus_64_magic:
|
|
||||||
jmp bogus_64_magic
|
|
||||||
|
|
||||||
ENTRY(do_suspend_lowlevel)
|
ENTRY(do_suspend_lowlevel)
|
||||||
FRAME_BEGIN
|
FRAME_BEGIN
|
||||||
subq $8, %rsp
|
subq $8, %rsp
|
||||||
|
@ -19,7 +19,7 @@
|
|||||||
/** DOC: Emulation for User-Mode Instruction Prevention (UMIP)
|
/** DOC: Emulation for User-Mode Instruction Prevention (UMIP)
|
||||||
*
|
*
|
||||||
* The feature User-Mode Instruction Prevention present in recent Intel
|
* The feature User-Mode Instruction Prevention present in recent Intel
|
||||||
* processor prevents a group of instructions (sgdt, sidt, sldt, smsw, and str)
|
* processor prevents a group of instructions (SGDT, SIDT, SLDT, SMSW and STR)
|
||||||
* from being executed with CPL > 0. Otherwise, a general protection fault is
|
* from being executed with CPL > 0. Otherwise, a general protection fault is
|
||||||
* issued.
|
* issued.
|
||||||
*
|
*
|
||||||
@ -36,8 +36,8 @@
|
|||||||
* DOSEMU2) rely on this subset of instructions to function.
|
* DOSEMU2) rely on this subset of instructions to function.
|
||||||
*
|
*
|
||||||
* The instructions protected by UMIP can be split in two groups. Those which
|
* The instructions protected by UMIP can be split in two groups. Those which
|
||||||
* return a kernel memory address (sgdt and sidt) and those which return a
|
* return a kernel memory address (SGDT and SIDT) and those which return a
|
||||||
* value (sldt, str and smsw).
|
* value (SLDT, STR and SMSW).
|
||||||
*
|
*
|
||||||
* For the instructions that return a kernel memory address, applications
|
* For the instructions that return a kernel memory address, applications
|
||||||
* such as WineHQ rely on the result being located in the kernel memory space,
|
* such as WineHQ rely on the result being located in the kernel memory space,
|
||||||
@ -45,15 +45,13 @@
|
|||||||
* value that, lies close to the top of the kernel memory. The limit for the GDT
|
* value that, lies close to the top of the kernel memory. The limit for the GDT
|
||||||
* and the IDT are set to zero.
|
* and the IDT are set to zero.
|
||||||
*
|
*
|
||||||
* Given that sldt and str are not commonly used in programs that run on WineHQ
|
* Given that SLDT and STR are not commonly used in programs that run on WineHQ
|
||||||
* or DOSEMU2, they are not emulated.
|
* or DOSEMU2, they are not emulated.
|
||||||
*
|
*
|
||||||
* The instruction smsw is emulated to return the value that the register CR0
|
* The instruction smsw is emulated to return the value that the register CR0
|
||||||
* has at boot time as set in the head_32.
|
* has at boot time as set in the head_32.
|
||||||
*
|
*
|
||||||
* Also, emulation is provided only for 32-bit processes; 64-bit processes
|
* Emulation is provided for both 32-bit and 64-bit processes.
|
||||||
* that attempt to use the instructions that UMIP protects will receive the
|
|
||||||
* SIGSEGV signal issued as a consequence of the general protection fault.
|
|
||||||
*
|
*
|
||||||
* Care is taken to appropriately emulate the results when segmentation is
|
* Care is taken to appropriately emulate the results when segmentation is
|
||||||
* used. That is, rather than relying on USER_DS and USER_CS, the function
|
* used. That is, rather than relying on USER_DS and USER_CS, the function
|
||||||
@ -63,17 +61,18 @@
|
|||||||
* application uses a local descriptor table.
|
* application uses a local descriptor table.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define UMIP_DUMMY_GDT_BASE 0xfffe0000
|
#define UMIP_DUMMY_GDT_BASE 0xfffffffffffe0000ULL
|
||||||
#define UMIP_DUMMY_IDT_BASE 0xffff0000
|
#define UMIP_DUMMY_IDT_BASE 0xffffffffffff0000ULL
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The SGDT and SIDT instructions store the contents of the global descriptor
|
* The SGDT and SIDT instructions store the contents of the global descriptor
|
||||||
* table and interrupt table registers, respectively. The destination is a
|
* table and interrupt table registers, respectively. The destination is a
|
||||||
* memory operand of X+2 bytes. X bytes are used to store the base address of
|
* memory operand of X+2 bytes. X bytes are used to store the base address of
|
||||||
* the table and 2 bytes are used to store the limit. In 32-bit processes, the
|
* the table and 2 bytes are used to store the limit. In 32-bit processes X
|
||||||
* only processes for which emulation is provided, X has a value of 4.
|
* has a value of 4, in 64-bit processes X has a value of 8.
|
||||||
*/
|
*/
|
||||||
#define UMIP_GDT_IDT_BASE_SIZE 4
|
#define UMIP_GDT_IDT_BASE_SIZE_64BIT 8
|
||||||
|
#define UMIP_GDT_IDT_BASE_SIZE_32BIT 4
|
||||||
#define UMIP_GDT_IDT_LIMIT_SIZE 2
|
#define UMIP_GDT_IDT_LIMIT_SIZE 2
|
||||||
|
|
||||||
#define UMIP_INST_SGDT 0 /* 0F 01 /0 */
|
#define UMIP_INST_SGDT 0 /* 0F 01 /0 */
|
||||||
@ -189,6 +188,7 @@ static int identify_insn(struct insn *insn)
|
|||||||
* @umip_inst: A constant indicating the instruction to emulate
|
* @umip_inst: A constant indicating the instruction to emulate
|
||||||
* @data: Buffer into which the dummy result is stored
|
* @data: Buffer into which the dummy result is stored
|
||||||
* @data_size: Size of the emulated result
|
* @data_size: Size of the emulated result
|
||||||
|
* @x86_64: true if process is 64-bit, false otherwise
|
||||||
*
|
*
|
||||||
* Emulate an instruction protected by UMIP and provide a dummy result. The
|
* Emulate an instruction protected by UMIP and provide a dummy result. The
|
||||||
* result of the emulation is saved in @data. The size of the results depends
|
* result of the emulation is saved in @data. The size of the results depends
|
||||||
@ -202,11 +202,8 @@ static int identify_insn(struct insn *insn)
|
|||||||
* 0 on success, -EINVAL on error while emulating.
|
* 0 on success, -EINVAL on error while emulating.
|
||||||
*/
|
*/
|
||||||
static int emulate_umip_insn(struct insn *insn, int umip_inst,
|
static int emulate_umip_insn(struct insn *insn, int umip_inst,
|
||||||
unsigned char *data, int *data_size)
|
unsigned char *data, int *data_size, bool x86_64)
|
||||||
{
|
{
|
||||||
unsigned long dummy_base_addr, dummy_value;
|
|
||||||
unsigned short dummy_limit = 0;
|
|
||||||
|
|
||||||
if (!data || !data_size || !insn)
|
if (!data || !data_size || !insn)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
/*
|
/*
|
||||||
@ -219,6 +216,9 @@ static int emulate_umip_insn(struct insn *insn, int umip_inst,
|
|||||||
* is always returned irrespective of the operand size.
|
* is always returned irrespective of the operand size.
|
||||||
*/
|
*/
|
||||||
if (umip_inst == UMIP_INST_SGDT || umip_inst == UMIP_INST_SIDT) {
|
if (umip_inst == UMIP_INST_SGDT || umip_inst == UMIP_INST_SIDT) {
|
||||||
|
u64 dummy_base_addr;
|
||||||
|
u16 dummy_limit = 0;
|
||||||
|
|
||||||
/* SGDT and SIDT do not use registers operands. */
|
/* SGDT and SIDT do not use registers operands. */
|
||||||
if (X86_MODRM_MOD(insn->modrm.value) == 3)
|
if (X86_MODRM_MOD(insn->modrm.value) == 3)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
@ -228,13 +228,24 @@ static int emulate_umip_insn(struct insn *insn, int umip_inst,
|
|||||||
else
|
else
|
||||||
dummy_base_addr = UMIP_DUMMY_IDT_BASE;
|
dummy_base_addr = UMIP_DUMMY_IDT_BASE;
|
||||||
|
|
||||||
*data_size = UMIP_GDT_IDT_LIMIT_SIZE + UMIP_GDT_IDT_BASE_SIZE;
|
/*
|
||||||
|
* 64-bit processes use the entire dummy base address.
|
||||||
|
* 32-bit processes use the lower 32 bits of the base address.
|
||||||
|
* dummy_base_addr is always 64 bits, but we memcpy the correct
|
||||||
|
* number of bytes from it to the destination.
|
||||||
|
*/
|
||||||
|
if (x86_64)
|
||||||
|
*data_size = UMIP_GDT_IDT_BASE_SIZE_64BIT;
|
||||||
|
else
|
||||||
|
*data_size = UMIP_GDT_IDT_BASE_SIZE_32BIT;
|
||||||
|
|
||||||
memcpy(data + 2, &dummy_base_addr, UMIP_GDT_IDT_BASE_SIZE);
|
memcpy(data + 2, &dummy_base_addr, *data_size);
|
||||||
|
|
||||||
|
*data_size += UMIP_GDT_IDT_LIMIT_SIZE;
|
||||||
memcpy(data, &dummy_limit, UMIP_GDT_IDT_LIMIT_SIZE);
|
memcpy(data, &dummy_limit, UMIP_GDT_IDT_LIMIT_SIZE);
|
||||||
|
|
||||||
} else if (umip_inst == UMIP_INST_SMSW) {
|
} else if (umip_inst == UMIP_INST_SMSW) {
|
||||||
dummy_value = CR0_STATE;
|
unsigned long dummy_value = CR0_STATE;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Even though the CR0 register has 4 bytes, the number
|
* Even though the CR0 register has 4 bytes, the number
|
||||||
@ -290,11 +301,10 @@ static void force_sig_info_umip_fault(void __user *addr, struct pt_regs *regs)
|
|||||||
* fixup_umip_exception() - Fixup a general protection fault caused by UMIP
|
* fixup_umip_exception() - Fixup a general protection fault caused by UMIP
|
||||||
* @regs: Registers as saved when entering the #GP handler
|
* @regs: Registers as saved when entering the #GP handler
|
||||||
*
|
*
|
||||||
* The instructions sgdt, sidt, str, smsw, sldt cause a general protection
|
* The instructions SGDT, SIDT, STR, SMSW and SLDT cause a general protection
|
||||||
* fault if executed with CPL > 0 (i.e., from user space). If the offending
|
* fault if executed with CPL > 0 (i.e., from user space). This function fixes
|
||||||
* user-space process is not in long mode, this function fixes the exception
|
* the exception up and provides dummy results for SGDT, SIDT and SMSW; STR
|
||||||
* up and provides dummy results for sgdt, sidt and smsw; str and sldt are not
|
* and SLDT are not fixed up.
|
||||||
* fixed up. Also long mode user-space processes are not fixed up.
|
|
||||||
*
|
*
|
||||||
* If operands are memory addresses, results are copied to user-space memory as
|
* If operands are memory addresses, results are copied to user-space memory as
|
||||||
* indicated by the instruction pointed by eIP using the registers indicated in
|
* indicated by the instruction pointed by eIP using the registers indicated in
|
||||||
@ -373,13 +383,14 @@ bool fixup_umip_exception(struct pt_regs *regs)
|
|||||||
umip_pr_warning(regs, "%s instruction cannot be used by applications.\n",
|
umip_pr_warning(regs, "%s instruction cannot be used by applications.\n",
|
||||||
umip_insns[umip_inst]);
|
umip_insns[umip_inst]);
|
||||||
|
|
||||||
/* Do not emulate SLDT, STR or user long mode processes. */
|
/* Do not emulate (spoof) SLDT or STR. */
|
||||||
if (umip_inst == UMIP_INST_STR || umip_inst == UMIP_INST_SLDT || user_64bit_mode(regs))
|
if (umip_inst == UMIP_INST_STR || umip_inst == UMIP_INST_SLDT)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
umip_pr_warning(regs, "For now, expensive software emulation returns the result.\n");
|
umip_pr_warning(regs, "For now, expensive software emulation returns the result.\n");
|
||||||
|
|
||||||
if (emulate_umip_insn(&insn, umip_inst, dummy_data, &dummy_data_size))
|
if (emulate_umip_insn(&insn, umip_inst, dummy_data, &dummy_data_size,
|
||||||
|
user_64bit_mode(regs)))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -33,7 +33,7 @@
|
|||||||
102:
|
102:
|
||||||
.section .fixup,"ax"
|
.section .fixup,"ax"
|
||||||
103: addl %ecx,%edx /* ecx is zerorest also */
|
103: addl %ecx,%edx /* ecx is zerorest also */
|
||||||
jmp copy_user_handle_tail
|
jmp .Lcopy_user_handle_tail
|
||||||
.previous
|
.previous
|
||||||
|
|
||||||
_ASM_EXTABLE_UA(100b, 103b)
|
_ASM_EXTABLE_UA(100b, 103b)
|
||||||
@ -113,7 +113,7 @@ ENTRY(copy_user_generic_unrolled)
|
|||||||
40: leal (%rdx,%rcx,8),%edx
|
40: leal (%rdx,%rcx,8),%edx
|
||||||
jmp 60f
|
jmp 60f
|
||||||
50: movl %ecx,%edx
|
50: movl %ecx,%edx
|
||||||
60: jmp copy_user_handle_tail /* ecx is zerorest also */
|
60: jmp .Lcopy_user_handle_tail /* ecx is zerorest also */
|
||||||
.previous
|
.previous
|
||||||
|
|
||||||
_ASM_EXTABLE_UA(1b, 30b)
|
_ASM_EXTABLE_UA(1b, 30b)
|
||||||
@ -177,7 +177,7 @@ ENTRY(copy_user_generic_string)
|
|||||||
.section .fixup,"ax"
|
.section .fixup,"ax"
|
||||||
11: leal (%rdx,%rcx,8),%ecx
|
11: leal (%rdx,%rcx,8),%ecx
|
||||||
12: movl %ecx,%edx /* ecx is zerorest also */
|
12: movl %ecx,%edx /* ecx is zerorest also */
|
||||||
jmp copy_user_handle_tail
|
jmp .Lcopy_user_handle_tail
|
||||||
.previous
|
.previous
|
||||||
|
|
||||||
_ASM_EXTABLE_UA(1b, 11b)
|
_ASM_EXTABLE_UA(1b, 11b)
|
||||||
@ -210,7 +210,7 @@ ENTRY(copy_user_enhanced_fast_string)
|
|||||||
|
|
||||||
.section .fixup,"ax"
|
.section .fixup,"ax"
|
||||||
12: movl %ecx,%edx /* ecx is zerorest also */
|
12: movl %ecx,%edx /* ecx is zerorest also */
|
||||||
jmp copy_user_handle_tail
|
jmp .Lcopy_user_handle_tail
|
||||||
.previous
|
.previous
|
||||||
|
|
||||||
_ASM_EXTABLE_UA(1b, 12b)
|
_ASM_EXTABLE_UA(1b, 12b)
|
||||||
@ -231,7 +231,7 @@ EXPORT_SYMBOL(copy_user_enhanced_fast_string)
|
|||||||
* eax uncopied bytes or 0 if successful.
|
* eax uncopied bytes or 0 if successful.
|
||||||
*/
|
*/
|
||||||
ALIGN;
|
ALIGN;
|
||||||
copy_user_handle_tail:
|
.Lcopy_user_handle_tail:
|
||||||
movl %edx,%ecx
|
movl %edx,%ecx
|
||||||
1: rep movsb
|
1: rep movsb
|
||||||
2: mov %ecx,%eax
|
2: mov %ecx,%eax
|
||||||
@ -239,7 +239,7 @@ copy_user_handle_tail:
|
|||||||
ret
|
ret
|
||||||
|
|
||||||
_ASM_EXTABLE_UA(1b, 2b)
|
_ASM_EXTABLE_UA(1b, 2b)
|
||||||
END(copy_user_handle_tail)
|
END(.Lcopy_user_handle_tail)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* copy_user_nocache - Uncached memory copy with exception handling
|
* copy_user_nocache - Uncached memory copy with exception handling
|
||||||
@ -364,7 +364,7 @@ ENTRY(__copy_user_nocache)
|
|||||||
movl %ecx,%edx
|
movl %ecx,%edx
|
||||||
.L_fixup_handle_tail:
|
.L_fixup_handle_tail:
|
||||||
sfence
|
sfence
|
||||||
jmp copy_user_handle_tail
|
jmp .Lcopy_user_handle_tail
|
||||||
.previous
|
.previous
|
||||||
|
|
||||||
_ASM_EXTABLE_UA(1b, .L_fixup_4x8b_copy)
|
_ASM_EXTABLE_UA(1b, .L_fixup_4x8b_copy)
|
||||||
|
@ -115,7 +115,7 @@ ENDPROC(__get_user_8)
|
|||||||
EXPORT_SYMBOL(__get_user_8)
|
EXPORT_SYMBOL(__get_user_8)
|
||||||
|
|
||||||
|
|
||||||
bad_get_user_clac:
|
.Lbad_get_user_clac:
|
||||||
ASM_CLAC
|
ASM_CLAC
|
||||||
bad_get_user:
|
bad_get_user:
|
||||||
xor %edx,%edx
|
xor %edx,%edx
|
||||||
@ -123,7 +123,7 @@ bad_get_user:
|
|||||||
ret
|
ret
|
||||||
|
|
||||||
#ifdef CONFIG_X86_32
|
#ifdef CONFIG_X86_32
|
||||||
bad_get_user_8_clac:
|
.Lbad_get_user_8_clac:
|
||||||
ASM_CLAC
|
ASM_CLAC
|
||||||
bad_get_user_8:
|
bad_get_user_8:
|
||||||
xor %edx,%edx
|
xor %edx,%edx
|
||||||
@ -132,12 +132,12 @@ bad_get_user_8:
|
|||||||
ret
|
ret
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
_ASM_EXTABLE_UA(1b, bad_get_user_clac)
|
_ASM_EXTABLE_UA(1b, .Lbad_get_user_clac)
|
||||||
_ASM_EXTABLE_UA(2b, bad_get_user_clac)
|
_ASM_EXTABLE_UA(2b, .Lbad_get_user_clac)
|
||||||
_ASM_EXTABLE_UA(3b, bad_get_user_clac)
|
_ASM_EXTABLE_UA(3b, .Lbad_get_user_clac)
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
_ASM_EXTABLE_UA(4b, bad_get_user_clac)
|
_ASM_EXTABLE_UA(4b, .Lbad_get_user_clac)
|
||||||
#else
|
#else
|
||||||
_ASM_EXTABLE_UA(4b, bad_get_user_8_clac)
|
_ASM_EXTABLE_UA(4b, .Lbad_get_user_8_clac)
|
||||||
_ASM_EXTABLE_UA(5b, bad_get_user_8_clac)
|
_ASM_EXTABLE_UA(5b, .Lbad_get_user_8_clac)
|
||||||
#endif
|
#endif
|
||||||
|
@ -37,7 +37,7 @@
|
|||||||
ENTRY(__put_user_1)
|
ENTRY(__put_user_1)
|
||||||
ENTER
|
ENTER
|
||||||
cmp TASK_addr_limit(%_ASM_BX),%_ASM_CX
|
cmp TASK_addr_limit(%_ASM_BX),%_ASM_CX
|
||||||
jae bad_put_user
|
jae .Lbad_put_user
|
||||||
ASM_STAC
|
ASM_STAC
|
||||||
1: movb %al,(%_ASM_CX)
|
1: movb %al,(%_ASM_CX)
|
||||||
xor %eax,%eax
|
xor %eax,%eax
|
||||||
@ -51,7 +51,7 @@ ENTRY(__put_user_2)
|
|||||||
mov TASK_addr_limit(%_ASM_BX),%_ASM_BX
|
mov TASK_addr_limit(%_ASM_BX),%_ASM_BX
|
||||||
sub $1,%_ASM_BX
|
sub $1,%_ASM_BX
|
||||||
cmp %_ASM_BX,%_ASM_CX
|
cmp %_ASM_BX,%_ASM_CX
|
||||||
jae bad_put_user
|
jae .Lbad_put_user
|
||||||
ASM_STAC
|
ASM_STAC
|
||||||
2: movw %ax,(%_ASM_CX)
|
2: movw %ax,(%_ASM_CX)
|
||||||
xor %eax,%eax
|
xor %eax,%eax
|
||||||
@ -65,7 +65,7 @@ ENTRY(__put_user_4)
|
|||||||
mov TASK_addr_limit(%_ASM_BX),%_ASM_BX
|
mov TASK_addr_limit(%_ASM_BX),%_ASM_BX
|
||||||
sub $3,%_ASM_BX
|
sub $3,%_ASM_BX
|
||||||
cmp %_ASM_BX,%_ASM_CX
|
cmp %_ASM_BX,%_ASM_CX
|
||||||
jae bad_put_user
|
jae .Lbad_put_user
|
||||||
ASM_STAC
|
ASM_STAC
|
||||||
3: movl %eax,(%_ASM_CX)
|
3: movl %eax,(%_ASM_CX)
|
||||||
xor %eax,%eax
|
xor %eax,%eax
|
||||||
@ -79,7 +79,7 @@ ENTRY(__put_user_8)
|
|||||||
mov TASK_addr_limit(%_ASM_BX),%_ASM_BX
|
mov TASK_addr_limit(%_ASM_BX),%_ASM_BX
|
||||||
sub $7,%_ASM_BX
|
sub $7,%_ASM_BX
|
||||||
cmp %_ASM_BX,%_ASM_CX
|
cmp %_ASM_BX,%_ASM_CX
|
||||||
jae bad_put_user
|
jae .Lbad_put_user
|
||||||
ASM_STAC
|
ASM_STAC
|
||||||
4: mov %_ASM_AX,(%_ASM_CX)
|
4: mov %_ASM_AX,(%_ASM_CX)
|
||||||
#ifdef CONFIG_X86_32
|
#ifdef CONFIG_X86_32
|
||||||
@ -91,16 +91,16 @@ ENTRY(__put_user_8)
|
|||||||
ENDPROC(__put_user_8)
|
ENDPROC(__put_user_8)
|
||||||
EXPORT_SYMBOL(__put_user_8)
|
EXPORT_SYMBOL(__put_user_8)
|
||||||
|
|
||||||
bad_put_user_clac:
|
.Lbad_put_user_clac:
|
||||||
ASM_CLAC
|
ASM_CLAC
|
||||||
bad_put_user:
|
.Lbad_put_user:
|
||||||
movl $-EFAULT,%eax
|
movl $-EFAULT,%eax
|
||||||
RET
|
RET
|
||||||
|
|
||||||
_ASM_EXTABLE_UA(1b, bad_put_user_clac)
|
_ASM_EXTABLE_UA(1b, .Lbad_put_user_clac)
|
||||||
_ASM_EXTABLE_UA(2b, bad_put_user_clac)
|
_ASM_EXTABLE_UA(2b, .Lbad_put_user_clac)
|
||||||
_ASM_EXTABLE_UA(3b, bad_put_user_clac)
|
_ASM_EXTABLE_UA(3b, .Lbad_put_user_clac)
|
||||||
_ASM_EXTABLE_UA(4b, bad_put_user_clac)
|
_ASM_EXTABLE_UA(4b, .Lbad_put_user_clac)
|
||||||
#ifdef CONFIG_X86_32
|
#ifdef CONFIG_X86_32
|
||||||
_ASM_EXTABLE_UA(5b, bad_put_user_clac)
|
_ASM_EXTABLE_UA(5b, .Lbad_put_user_clac)
|
||||||
#endif
|
#endif
|
||||||
|
Loading…
Reference in New Issue
Block a user