x86/sev-es: Compile early handler code into kernel image

Setup sev-es.c and include the code from the pre-decompression stage
to also build it into the image of the running kernel. Temporarily add
__maybe_unused annotations to avoid build warnings until the functions
get used.

 [ bp: Use the non-tracing rd/wrmsr variants because:
   vmlinux.o: warning: objtool: __sev_es_nmi_complete()+0x11f: \
	   call to do_trace_write_msr() leaves .noinstr.text section
   as __sev_es_nmi_complete() is noinstr due to being called from the
   NMI handler exc_nmi(). ]

Signed-off-by: Joerg Roedel <jroedel@suse.de>
Signed-off-by: Borislav Petkov <bp@suse.de>
Link: https://lkml.kernel.org/r/20200907131613.12703-39-joro@8bytes.org
This commit is contained in:
Joerg Roedel 2020-09-07 15:15:39 +02:00 committed by Borislav Petkov
parent c685eb0c12
commit f980f9c31a
3 changed files with 175 additions and 10 deletions

View File

@ -145,6 +145,7 @@ obj-$(CONFIG_UNWINDER_ORC) += unwind_orc.o
obj-$(CONFIG_UNWINDER_FRAME_POINTER) += unwind_frame.o
obj-$(CONFIG_UNWINDER_GUESS) += unwind_guess.o
obj-$(CONFIG_AMD_MEM_ENCRYPT) += sev-es.o
###
# 64 bit specific files
ifeq ($(CONFIG_X86_64),y)

View File

@ -9,7 +9,7 @@
* and is included directly into both code-bases.
*/
static void sev_es_terminate(unsigned int reason)
static void __maybe_unused sev_es_terminate(unsigned int reason)
{
u64 val = GHCB_SEV_TERMINATE;
@ -27,7 +27,7 @@ static void sev_es_terminate(unsigned int reason)
asm volatile("hlt\n" : : : "memory");
}
static bool sev_es_negotiate_protocol(void)
static bool __maybe_unused sev_es_negotiate_protocol(void)
{
u64 val;
@ -46,7 +46,7 @@ static bool sev_es_negotiate_protocol(void)
return true;
}
static void vc_ghcb_invalidate(struct ghcb *ghcb)
static void __maybe_unused vc_ghcb_invalidate(struct ghcb *ghcb)
{
memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
}
@ -58,7 +58,7 @@ static bool vc_decoding_needed(unsigned long exit_code)
exit_code <= SVM_EXIT_LAST_EXCP);
}
static enum es_result vc_init_em_ctxt(struct es_em_ctxt *ctxt,
static enum es_result __maybe_unused vc_init_em_ctxt(struct es_em_ctxt *ctxt,
struct pt_regs *regs,
unsigned long exit_code)
{
@ -73,7 +73,7 @@ static enum es_result vc_init_em_ctxt(struct es_em_ctxt *ctxt,
return ret;
}
static void vc_finish_insn(struct es_em_ctxt *ctxt)
static void __maybe_unused vc_finish_insn(struct es_em_ctxt *ctxt)
{
ctxt->regs->ip += ctxt->insn.length;
}
@ -325,7 +325,8 @@ static enum es_result vc_ioio_exitinfo(struct es_em_ctxt *ctxt, u64 *exitinfo)
return ES_OK;
}
static enum es_result vc_handle_ioio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
static enum es_result __maybe_unused
vc_handle_ioio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
{
struct pt_regs *regs = ctxt->regs;
u64 exit_info_1, exit_info_2;
@ -433,7 +434,7 @@ static enum es_result vc_handle_ioio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
return ret;
}
static enum es_result vc_handle_cpuid(struct ghcb *ghcb,
static enum es_result __maybe_unused vc_handle_cpuid(struct ghcb *ghcb,
struct es_em_ctxt *ctxt)
{
struct pt_regs *regs = ctxt->regs;

163
arch/x86/kernel/sev-es.c Normal file
View File

@ -0,0 +1,163 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* AMD Memory Encryption Support
*
* Copyright (C) 2019 SUSE
*
* Author: Joerg Roedel <jroedel@suse.de>
*/
#include <linux/kernel.h>
#include <linux/mm.h>
#include <asm/sev-es.h>
#include <asm/insn-eval.h>
#include <asm/fpu/internal.h>
#include <asm/processor.h>
#include <asm/trap_pf.h>
#include <asm/trapnr.h>
#include <asm/svm.h>
static inline u64 sev_es_rd_ghcb_msr(void)
{
return __rdmsr(MSR_AMD64_SEV_ES_GHCB);
}
static inline void sev_es_wr_ghcb_msr(u64 val)
{
u32 low, high;
low = (u32)(val);
high = (u32)(val >> 32);
native_wrmsr(MSR_AMD64_SEV_ES_GHCB, low, high);
}
static int vc_fetch_insn_kernel(struct es_em_ctxt *ctxt,
unsigned char *buffer)
{
return copy_from_kernel_nofault(buffer, (unsigned char *)ctxt->regs->ip, MAX_INSN_SIZE);
}
static enum es_result vc_decode_insn(struct es_em_ctxt *ctxt)
{
char buffer[MAX_INSN_SIZE];
enum es_result ret;
int res;
res = vc_fetch_insn_kernel(ctxt, buffer);
if (unlikely(res == -EFAULT)) {
ctxt->fi.vector = X86_TRAP_PF;
ctxt->fi.error_code = 0;
ctxt->fi.cr2 = ctxt->regs->ip;
return ES_EXCEPTION;
}
insn_init(&ctxt->insn, buffer, MAX_INSN_SIZE - res, 1);
insn_get_length(&ctxt->insn);
ret = ctxt->insn.immediate.got ? ES_OK : ES_DECODE_FAILED;
return ret;
}
static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
char *dst, char *buf, size_t size)
{
unsigned long error_code = X86_PF_PROT | X86_PF_WRITE;
char __user *target = (char __user *)dst;
u64 d8;
u32 d4;
u16 d2;
u8 d1;
switch (size) {
case 1:
memcpy(&d1, buf, 1);
if (put_user(d1, target))
goto fault;
break;
case 2:
memcpy(&d2, buf, 2);
if (put_user(d2, target))
goto fault;
break;
case 4:
memcpy(&d4, buf, 4);
if (put_user(d4, target))
goto fault;
break;
case 8:
memcpy(&d8, buf, 8);
if (put_user(d8, target))
goto fault;
break;
default:
WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size);
return ES_UNSUPPORTED;
}
return ES_OK;
fault:
if (user_mode(ctxt->regs))
error_code |= X86_PF_USER;
ctxt->fi.vector = X86_TRAP_PF;
ctxt->fi.error_code = error_code;
ctxt->fi.cr2 = (unsigned long)dst;
return ES_EXCEPTION;
}
static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
char *src, char *buf, size_t size)
{
unsigned long error_code = X86_PF_PROT;
char __user *s = (char __user *)src;
u64 d8;
u32 d4;
u16 d2;
u8 d1;
switch (size) {
case 1:
if (get_user(d1, s))
goto fault;
memcpy(buf, &d1, 1);
break;
case 2:
if (get_user(d2, s))
goto fault;
memcpy(buf, &d2, 2);
break;
case 4:
if (get_user(d4, s))
goto fault;
memcpy(buf, &d4, 4);
break;
case 8:
if (get_user(d8, s))
goto fault;
memcpy(buf, &d8, 8);
break;
default:
WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size);
return ES_UNSUPPORTED;
}
return ES_OK;
fault:
if (user_mode(ctxt->regs))
error_code |= X86_PF_USER;
ctxt->fi.vector = X86_TRAP_PF;
ctxt->fi.error_code = error_code;
ctxt->fi.cr2 = (unsigned long)src;
return ES_EXCEPTION;
}
/* Include code shared with pre-decompression boot stage */
#include "sev-es-shared.c"