forked from Minki/linux
1cd9c22fee
There are bunch of functions in mem_encrypt.c that operate on the identity mapping, which means they want virtual addresses to be equal to physical one, without PAGE_OFFSET shift. We also need to avoid paravirtualizaion call there. Getting this done is tricky. We cannot use usual page table helpers. It forces us to open-code a lot of things. It makes code ugly and hard to modify. We can get it work with the page table helpers, but it requires few preprocessor tricks. These tricks may have side effects for the rest of the file. Let's isolate such functions into own translation unit. Tested-by: Tom Lendacky <thomas.lendacky@amd.com> Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Reviewed-by: Tom Lendacky <thomas.lendacky@amd.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20180131135404.40692-2-kirill.shutemov@linux.intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
96 lines
2.9 KiB
C
96 lines
2.9 KiB
C
/*
|
|
* AMD Memory Encryption Support
|
|
*
|
|
* Copyright (C) 2016 Advanced Micro Devices, Inc.
|
|
*
|
|
* Author: Tom Lendacky <thomas.lendacky@amd.com>
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*/
|
|
|
|
#ifndef __X86_MEM_ENCRYPT_H__
|
|
#define __X86_MEM_ENCRYPT_H__
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <asm/bootparam.h>
|
|
|
|
#ifdef CONFIG_AMD_MEM_ENCRYPT
|
|
|
|
extern u64 sme_me_mask;
|
|
extern bool sev_enabled;
|
|
|
|
void sme_encrypt_execute(unsigned long encrypted_kernel_vaddr,
|
|
unsigned long decrypted_kernel_vaddr,
|
|
unsigned long kernel_len,
|
|
unsigned long encryption_wa,
|
|
unsigned long encryption_pgd);
|
|
|
|
void __init sme_early_encrypt(resource_size_t paddr,
|
|
unsigned long size);
|
|
void __init sme_early_decrypt(resource_size_t paddr,
|
|
unsigned long size);
|
|
|
|
void __init sme_map_bootdata(char *real_mode_data);
|
|
void __init sme_unmap_bootdata(char *real_mode_data);
|
|
|
|
void __init sme_early_init(void);
|
|
|
|
void __init sme_encrypt_kernel(struct boot_params *bp);
|
|
void __init sme_enable(struct boot_params *bp);
|
|
|
|
int __init early_set_memory_decrypted(unsigned long vaddr, unsigned long size);
|
|
int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size);
|
|
|
|
/* Architecture __weak replacement functions */
|
|
void __init mem_encrypt_init(void);
|
|
|
|
void swiotlb_set_mem_attributes(void *vaddr, unsigned long size);
|
|
|
|
bool sme_active(void);
|
|
bool sev_active(void);
|
|
|
|
#else /* !CONFIG_AMD_MEM_ENCRYPT */
|
|
|
|
#define sme_me_mask 0ULL
|
|
|
|
static inline void __init sme_early_encrypt(resource_size_t paddr,
|
|
unsigned long size) { }
|
|
static inline void __init sme_early_decrypt(resource_size_t paddr,
|
|
unsigned long size) { }
|
|
|
|
static inline void __init sme_map_bootdata(char *real_mode_data) { }
|
|
static inline void __init sme_unmap_bootdata(char *real_mode_data) { }
|
|
|
|
static inline void __init sme_early_init(void) { }
|
|
|
|
static inline void __init sme_encrypt_kernel(struct boot_params *bp) { }
|
|
static inline void __init sme_enable(struct boot_params *bp) { }
|
|
|
|
static inline bool sme_active(void) { return false; }
|
|
static inline bool sev_active(void) { return false; }
|
|
|
|
static inline int __init
|
|
early_set_memory_decrypted(unsigned long vaddr, unsigned long size) { return 0; }
|
|
static inline int __init
|
|
early_set_memory_encrypted(unsigned long vaddr, unsigned long size) { return 0; }
|
|
|
|
#endif /* CONFIG_AMD_MEM_ENCRYPT */
|
|
|
|
/*
|
|
* The __sme_pa() and __sme_pa_nodebug() macros are meant for use when
|
|
* writing to or comparing values from the cr3 register. Having the
|
|
* encryption mask set in cr3 enables the PGD entry to be encrypted and
|
|
* avoid special case handling of PGD allocations.
|
|
*/
|
|
#define __sme_pa(x) (__pa(x) | sme_me_mask)
|
|
#define __sme_pa_nodebug(x) (__pa_nodebug(x) | sme_me_mask)
|
|
|
|
#endif /* __ASSEMBLY__ */
|
|
|
|
#endif /* __X86_MEM_ENCRYPT_H__ */
|