forked from Minki/linux
x86/mm: Make DMA memory shared for TD guest
Intel TDX doesn't allow VMM to directly access guest private memory. Any memory that is required for communication with the VMM must be shared explicitly. The same rule applies for any DMA to and from the TDX guest. All DMA pages have to be marked as shared pages. A generic way to achieve this without any changes to device drivers is to use the SWIOTLB framework. The previous patch ("Add support for TDX shared memory") gave TDX guests the _ability_ to make some pages shared, but did not make any pages shared. This actually marks SWIOTLB buffers *as* shared. Start returning true for cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT) in TDX guests. This has several implications: - Allows the existing mem_encrypt_init() to be used for TDX which sets SWIOTLB buffers shared (aka. "decrypted"). - Ensures that all DMA is routed via the SWIOTLB mechanism (see pci_swiotlb_detect()) Stop selecting DYNAMIC_PHYSICAL_MASK directly. It will get set indirectly by selecting X86_MEM_ENCRYPT. mem_encrypt_init() is currently under an AMD-specific #ifdef. Move it to a generic area of the header. Co-developed-by: Kuppuswamy Sathyanarayanan <sathyanarayanan.kuppuswamy@linux.intel.com> Signed-off-by: Kuppuswamy Sathyanarayanan <sathyanarayanan.kuppuswamy@linux.intel.com> Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com> Reviewed-by: Andi Kleen <ak@linux.intel.com> Reviewed-by: Tony Luck <tony.luck@intel.com> Reviewed-by: Dave Hansen <dave.hansen@linux.intel.com> Link: https://lkml.kernel.org/r/20220405232939.73860-28-kirill.shutemov@linux.intel.com
This commit is contained in:
parent
7dbde76316
commit
968b493173
@ -883,7 +883,7 @@ config INTEL_TDX_GUEST
|
|||||||
depends on X86_64 && CPU_SUP_INTEL
|
depends on X86_64 && CPU_SUP_INTEL
|
||||||
depends on X86_X2APIC
|
depends on X86_X2APIC
|
||||||
select ARCH_HAS_CC_PLATFORM
|
select ARCH_HAS_CC_PLATFORM
|
||||||
select DYNAMIC_PHYSICAL_MASK
|
select X86_MEM_ENCRYPT
|
||||||
select X86_MCE
|
select X86_MCE
|
||||||
help
|
help
|
||||||
Support running as a guest under Intel TDX. Without this support,
|
Support running as a guest under Intel TDX. Without this support,
|
||||||
|
@ -22,6 +22,7 @@ static bool intel_cc_platform_has(enum cc_attr attr)
|
|||||||
case CC_ATTR_GUEST_UNROLL_STRING_IO:
|
case CC_ATTR_GUEST_UNROLL_STRING_IO:
|
||||||
case CC_ATTR_HOTPLUG_DISABLED:
|
case CC_ATTR_HOTPLUG_DISABLED:
|
||||||
case CC_ATTR_GUEST_MEM_ENCRYPT:
|
case CC_ATTR_GUEST_MEM_ENCRYPT:
|
||||||
|
case CC_ATTR_MEM_ENCRYPT:
|
||||||
return true;
|
return true;
|
||||||
default:
|
default:
|
||||||
return false;
|
return false;
|
||||||
|
@ -49,9 +49,6 @@ void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages,
|
|||||||
|
|
||||||
void __init mem_encrypt_free_decrypted_mem(void);
|
void __init mem_encrypt_free_decrypted_mem(void);
|
||||||
|
|
||||||
/* Architecture __weak replacement functions */
|
|
||||||
void __init mem_encrypt_init(void);
|
|
||||||
|
|
||||||
void __init sev_es_init_vc_handling(void);
|
void __init sev_es_init_vc_handling(void);
|
||||||
|
|
||||||
#define __bss_decrypted __section(".bss..decrypted")
|
#define __bss_decrypted __section(".bss..decrypted")
|
||||||
@ -89,6 +86,9 @@ static inline void mem_encrypt_free_decrypted_mem(void) { }
|
|||||||
|
|
||||||
#endif /* CONFIG_AMD_MEM_ENCRYPT */
|
#endif /* CONFIG_AMD_MEM_ENCRYPT */
|
||||||
|
|
||||||
|
/* Architecture __weak replacement functions */
|
||||||
|
void __init mem_encrypt_init(void);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The __sme_pa() and __sme_pa_nodebug() macros are meant for use when
|
* The __sme_pa() and __sme_pa_nodebug() macros are meant for use when
|
||||||
* writing to or comparing values from the cr3 register. Having the
|
* writing to or comparing values from the cr3 register. Having the
|
||||||
|
@ -42,7 +42,14 @@ bool force_dma_unencrypted(struct device *dev)
|
|||||||
|
|
||||||
static void print_mem_encrypt_feature_info(void)
|
static void print_mem_encrypt_feature_info(void)
|
||||||
{
|
{
|
||||||
pr_info("AMD Memory Encryption Features active:");
|
pr_info("Memory Encryption Features active:");
|
||||||
|
|
||||||
|
if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST)) {
|
||||||
|
pr_cont(" Intel TDX\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
pr_cont("AMD ");
|
||||||
|
|
||||||
/* Secure Memory Encryption */
|
/* Secure Memory Encryption */
|
||||||
if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) {
|
if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) {
|
||||||
|
Loading…
Reference in New Issue
Block a user