mirror of
https://github.com/torvalds/linux.git
synced 2024-12-28 22:02:28 +00:00
5331d2c7ef
Set up the temporary text mapping for the final jump address
so that the system could jump to the right address after all
the pages have been copied back to their original address -
otherwise the final mapping for the jump address is invalid.
Analogous changes were made for 64-bit in commit 65c0554b73
(x86/power/64: Fix kernel text mapping corruption during image
restoration).
Signed-off-by: Zhimin Gu <kookoo.gu@intel.com>
Acked-by: Pavel Machek <pavel@ucw.cz>
Signed-off-by: Chen Yu <yu.c.chen@intel.com>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
108 lines
2.2 KiB
ArmAsm
108 lines
2.2 KiB
ArmAsm
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/*
|
|
* This may not use any stack, nor any variable that is not "NoSave":
|
|
*
|
|
* Its rewriting one kernel image with another. What is stack in "old"
|
|
* image could very well be data page in "new" image, and overwriting
|
|
* your own stack under you is bad idea.
|
|
*/
|
|
|
|
#include <linux/linkage.h>
|
|
#include <asm/segment.h>
|
|
#include <asm/page_types.h>
|
|
#include <asm/asm-offsets.h>
|
|
#include <asm/processor-flags.h>
|
|
#include <asm/frame.h>
|
|
|
|
.text
|
|
|
|
ENTRY(swsusp_arch_suspend)
|
|
movl %esp, saved_context_esp
|
|
movl %ebx, saved_context_ebx
|
|
movl %ebp, saved_context_ebp
|
|
movl %esi, saved_context_esi
|
|
movl %edi, saved_context_edi
|
|
pushfl
|
|
popl saved_context_eflags
|
|
|
|
/* save cr3 */
|
|
movl %cr3, %eax
|
|
movl %eax, restore_cr3
|
|
|
|
FRAME_BEGIN
|
|
call swsusp_save
|
|
FRAME_END
|
|
ret
|
|
ENDPROC(swsusp_arch_suspend)
|
|
|
|
ENTRY(restore_image)
|
|
/* prepare to jump to the image kernel */
|
|
movl restore_jump_address, %ebx
|
|
movl restore_cr3, %ebp
|
|
|
|
movl mmu_cr4_features, %ecx
|
|
|
|
/* jump to relocated restore code */
|
|
movl relocated_restore_code, %eax
|
|
jmpl *%eax
|
|
|
|
/* code below has been relocated to a safe page */
|
|
ENTRY(core_restore_code)
|
|
movl temp_pgt, %eax
|
|
movl %eax, %cr3
|
|
|
|
jecxz 1f # cr4 Pentium and higher, skip if zero
|
|
andl $~(X86_CR4_PGE), %ecx
|
|
movl %ecx, %cr4; # turn off PGE
|
|
movl %cr3, %eax; # flush TLB
|
|
movl %eax, %cr3
|
|
1:
|
|
movl restore_pblist, %edx
|
|
.p2align 4,,7
|
|
|
|
copy_loop:
|
|
testl %edx, %edx
|
|
jz done
|
|
|
|
movl pbe_address(%edx), %esi
|
|
movl pbe_orig_address(%edx), %edi
|
|
|
|
movl $(PAGE_SIZE >> 2), %ecx
|
|
rep
|
|
movsl
|
|
|
|
movl pbe_next(%edx), %edx
|
|
jmp copy_loop
|
|
.p2align 4,,7
|
|
|
|
done:
|
|
jmpl *%ebx
|
|
|
|
/* code below belongs to the image kernel */
|
|
.align PAGE_SIZE
|
|
ENTRY(restore_registers)
|
|
/* go back to the original page tables */
|
|
movl %ebp, %cr3
|
|
movl mmu_cr4_features, %ecx
|
|
jecxz 1f # cr4 Pentium and higher, skip if zero
|
|
movl %ecx, %cr4; # turn PGE back on
|
|
1:
|
|
|
|
movl saved_context_esp, %esp
|
|
movl saved_context_ebp, %ebp
|
|
movl saved_context_ebx, %ebx
|
|
movl saved_context_esi, %esi
|
|
movl saved_context_edi, %edi
|
|
|
|
pushl saved_context_eflags
|
|
popfl
|
|
|
|
/* Saved in save_processor_state. */
|
|
movl $saved_context, %eax
|
|
lgdt saved_context_gdt_desc(%eax)
|
|
|
|
xorl %eax, %eax
|
|
|
|
ret
|
|
ENDPROC(restore_registers)
|