forked from Minki/linux
x86/power/64: Always create temporary identity mapping correctly
The low-level resume-from-hibernation code on x86-64 uses kernel_ident_mapping_init() to create the temoprary identity mapping, but that function assumes that the offset between kernel virtual addresses and physical addresses is aligned on the PGD level. However, with a randomized identity mapping base, it may be aligned on the PUD level and if that happens, the temporary identity mapping created by set_up_temporary_mappings() will not reflect the actual kernel identity mapping and the image restoration will fail as a result (leading to a kernel panic most of the time). To fix this problem, rework kernel_ident_mapping_init() to support unaligned offsets between KVA and PA up to the PMD level and make set_up_temporary_mappings() use it as approprtiate. Reported-and-tested-by: Thomas Garnier <thgarnie@google.com> Reported-by: Borislav Petkov <bp@suse.de> Suggested-by: Yinghai Lu <yinghai@kernel.org> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com> Acked-by: Yinghai Lu <yinghai@kernel.org>
This commit is contained in:
parent
c226fab474
commit
e4630fdd47
@ -5,10 +5,10 @@ struct x86_mapping_info {
|
||||
void *(*alloc_pgt_page)(void *); /* allocate buf for page table */
|
||||
void *context; /* context for alloc_pgt_page */
|
||||
unsigned long pmd_flag; /* page flag for PMD entry */
|
||||
bool kernel_mapping; /* kernel mapping or ident mapping */
|
||||
unsigned long offset; /* ident mapping offset */
|
||||
};
|
||||
|
||||
int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
|
||||
unsigned long addr, unsigned long end);
|
||||
unsigned long pstart, unsigned long pend);
|
||||
|
||||
#endif /* _ASM_X86_INIT_H */
|
||||
|
@ -3,15 +3,17 @@
|
||||
* included by both the compressed kernel and the regular kernel.
|
||||
*/
|
||||
|
||||
static void ident_pmd_init(unsigned long pmd_flag, pmd_t *pmd_page,
|
||||
static void ident_pmd_init(struct x86_mapping_info *info, pmd_t *pmd_page,
|
||||
unsigned long addr, unsigned long end)
|
||||
{
|
||||
addr &= PMD_MASK;
|
||||
for (; addr < end; addr += PMD_SIZE) {
|
||||
pmd_t *pmd = pmd_page + pmd_index(addr);
|
||||
|
||||
if (!pmd_present(*pmd))
|
||||
set_pmd(pmd, __pmd(addr | pmd_flag));
|
||||
if (pmd_present(*pmd))
|
||||
continue;
|
||||
|
||||
set_pmd(pmd, __pmd((addr - info->offset) | info->pmd_flag));
|
||||
}
|
||||
}
|
||||
|
||||
@ -30,13 +32,13 @@ static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
|
||||
|
||||
if (pud_present(*pud)) {
|
||||
pmd = pmd_offset(pud, 0);
|
||||
ident_pmd_init(info->pmd_flag, pmd, addr, next);
|
||||
ident_pmd_init(info, pmd, addr, next);
|
||||
continue;
|
||||
}
|
||||
pmd = (pmd_t *)info->alloc_pgt_page(info->context);
|
||||
if (!pmd)
|
||||
return -ENOMEM;
|
||||
ident_pmd_init(info->pmd_flag, pmd, addr, next);
|
||||
ident_pmd_init(info, pmd, addr, next);
|
||||
set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
|
||||
}
|
||||
|
||||
@ -44,14 +46,15 @@ static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
|
||||
}
|
||||
|
||||
int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
|
||||
unsigned long addr, unsigned long end)
|
||||
unsigned long pstart, unsigned long pend)
|
||||
{
|
||||
unsigned long addr = pstart + info->offset;
|
||||
unsigned long end = pend + info->offset;
|
||||
unsigned long next;
|
||||
int result;
|
||||
int off = info->kernel_mapping ? pgd_index(__PAGE_OFFSET) : 0;
|
||||
|
||||
for (; addr < end; addr = next) {
|
||||
pgd_t *pgd = pgd_page + pgd_index(addr) + off;
|
||||
pgd_t *pgd = pgd_page + pgd_index(addr);
|
||||
pud_t *pud;
|
||||
|
||||
next = (addr & PGDIR_MASK) + PGDIR_SIZE;
|
||||
|
@ -87,7 +87,7 @@ static int set_up_temporary_mappings(void)
|
||||
struct x86_mapping_info info = {
|
||||
.alloc_pgt_page = alloc_pgt_page,
|
||||
.pmd_flag = __PAGE_KERNEL_LARGE_EXEC,
|
||||
.kernel_mapping = true,
|
||||
.offset = __PAGE_OFFSET,
|
||||
};
|
||||
unsigned long mstart, mend;
|
||||
pgd_t *pgd;
|
||||
|
Loading…
Reference in New Issue
Block a user