mirror of
https://github.com/torvalds/linux.git
synced 2024-11-24 21:21:41 +00:00
KVM SVM changes for 6.7:
- Report KVM_EXIT_SHUTDOWN instead of EINVAL if KVM intercepts SHUTDOWN while running an SEV-ES guest. - Clean up handling "failures" when KVM detects it can't emulate the "skip" action for an instruction that has already been partially emulated. Drop a hack in the SVM code that was fudging around the emulator code not giving SVM enough information to do the right thing. -----BEGIN PGP SIGNATURE----- iQJGBAABCgAwFiEEMHr+pfEFOIzK+KY1YJEiAU0MEvkFAmU8GHYSHHNlYW5qY0Bn b29nbGUuY29tAAoJEGCRIgFNDBL5hwkQAIR8l1gWz/caz29biBzmRnDS+aZOXcYM 8V8WBJqJgMKE9egibF4sADAlhInXzg19Xr7bQs6VfuvmdXrCn0UJ/nLorX+H85A2 pph6iNlWO6tyQAjvk/AieaeUyZOqpCFmKOgxfN2Fr/Lrn7u3AdjXC20qPeFJSLXr YOTCQ704yvjjJp4yVA8JlclAQu38hanKiO5SZdlLzbuhUgWwQk4DVP2ZsYnhX+RO F6exxORvMnYF/LJe/kR2/DMLf2JWvyUmjRrGWoeRoksOw5BlXMc5HyTPHSJ2jDac lJaNtmZkTY1bDVWZk7N03ze5aFJa4DaqJdIFLtgujrFW8thog0P48aH6vmKi4UAA bXme9GFYbmJTkemaGRnrzidFV12uPNvvanS+1PDOw4sn4HpscoMSpZw5PeH2kBwV 6uKNCJCwLtk8oe50yroKD7rJ/ASB7CeoqzbIL9s2TA0HSAskIf65T4eZp01uniyd Q98yCdrG2mudsg5aU5yMfe0LwZby5BB5kUCqIe4hyRC68GJR8wkAzhaFRgCn4aJE yaTyjnT2V3PGMEEJOPFdSF3VQGztljzQiXlEvBVj3zvMGQNTo2NhmS3ka4W+wW5G avRYv8dITlGRs6J2gV1vp8Eb5LzDrwRpRURSmzeP5rR58saKdljTZgNfOzfLeFr1 WhLzonLz52IS =U0fq -----END PGP SIGNATURE----- Merge tag 'kvm-x86-svm-6.7' of https://github.com/kvm-x86/linux into HEAD KVM SVM changes for 6.7: - Report KVM_EXIT_SHUTDOWN instead of EINVAL if KVM intercepts SHUTDOWN while running an SEV-ES guest. - Clean up handling "failures" when KVM detects it can't emulate the "skip" action for an instruction that has already been partially emulated. Drop a hack in the SVM code that was fudging around the emulator code not giving SVM enough information to do the right thing.
This commit is contained in:
commit
be47941980
@ -127,7 +127,7 @@ KVM_X86_OP_OPTIONAL(vm_copy_enc_context_from)
|
||||
KVM_X86_OP_OPTIONAL(vm_move_enc_context_from)
|
||||
KVM_X86_OP_OPTIONAL(guest_memory_reclaimed)
|
||||
KVM_X86_OP(get_msr_feature)
|
||||
KVM_X86_OP(can_emulate_instruction)
|
||||
KVM_X86_OP(check_emulate_instruction)
|
||||
KVM_X86_OP(apic_init_signal_blocked)
|
||||
KVM_X86_OP_OPTIONAL(enable_l2_tlb_flush)
|
||||
KVM_X86_OP_OPTIONAL(migrate_timers)
|
||||
|
@ -1745,8 +1745,8 @@ struct kvm_x86_ops {
|
||||
|
||||
int (*get_msr_feature)(struct kvm_msr_entry *entry);
|
||||
|
||||
bool (*can_emulate_instruction)(struct kvm_vcpu *vcpu, int emul_type,
|
||||
void *insn, int insn_len);
|
||||
int (*check_emulate_instruction)(struct kvm_vcpu *vcpu, int emul_type,
|
||||
void *insn, int insn_len);
|
||||
|
||||
bool (*apic_init_signal_blocked)(struct kvm_vcpu *vcpu);
|
||||
int (*enable_l2_tlb_flush)(struct kvm_vcpu *vcpu);
|
||||
|
@ -364,8 +364,6 @@ static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
|
||||
svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK;
|
||||
|
||||
}
|
||||
static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
|
||||
void *insn, int insn_len);
|
||||
|
||||
static int __svm_skip_emulated_instruction(struct kvm_vcpu *vcpu,
|
||||
bool commit_side_effects)
|
||||
@ -386,14 +384,6 @@ static int __svm_skip_emulated_instruction(struct kvm_vcpu *vcpu,
|
||||
}
|
||||
|
||||
if (!svm->next_rip) {
|
||||
/*
|
||||
* FIXME: Drop this when kvm_emulate_instruction() does the
|
||||
* right thing and treats "can't emulate" as outright failure
|
||||
* for EMULTYPE_SKIP.
|
||||
*/
|
||||
if (!svm_can_emulate_instruction(vcpu, EMULTYPE_SKIP, NULL, 0))
|
||||
return 0;
|
||||
|
||||
if (unlikely(!commit_side_effects))
|
||||
old_rflags = svm->vmcb->save.rflags;
|
||||
|
||||
@ -2202,12 +2192,6 @@ static int shutdown_interception(struct kvm_vcpu *vcpu)
|
||||
struct kvm_run *kvm_run = vcpu->run;
|
||||
struct vcpu_svm *svm = to_svm(vcpu);
|
||||
|
||||
/*
|
||||
* The VM save area has already been encrypted so it
|
||||
* cannot be reinitialized - just terminate.
|
||||
*/
|
||||
if (sev_es_guest(vcpu->kvm))
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* VMCB is undefined after a SHUTDOWN intercept. INIT the vCPU to put
|
||||
@ -2216,9 +2200,14 @@ static int shutdown_interception(struct kvm_vcpu *vcpu)
|
||||
* userspace. At a platform view, INIT is acceptable behavior as
|
||||
* there exist bare metal platforms that automatically INIT the CPU
|
||||
* in response to shutdown.
|
||||
*
|
||||
* The VM save area for SEV-ES guests has already been encrypted so it
|
||||
* cannot be reinitialized, i.e. synthesizing INIT is futile.
|
||||
*/
|
||||
clear_page(svm->vmcb);
|
||||
kvm_vcpu_reset(vcpu, true);
|
||||
if (!sev_es_guest(vcpu->kvm)) {
|
||||
clear_page(svm->vmcb);
|
||||
kvm_vcpu_reset(vcpu, true);
|
||||
}
|
||||
|
||||
kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
|
||||
return 0;
|
||||
@ -4727,15 +4716,15 @@ static void svm_enable_smi_window(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
#endif
|
||||
|
||||
static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
|
||||
void *insn, int insn_len)
|
||||
static int svm_check_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
|
||||
void *insn, int insn_len)
|
||||
{
|
||||
bool smep, smap, is_user;
|
||||
u64 error_code;
|
||||
|
||||
/* Emulation is always possible when KVM has access to all guest state. */
|
||||
if (!sev_guest(vcpu->kvm))
|
||||
return true;
|
||||
return X86EMUL_CONTINUE;
|
||||
|
||||
/* #UD and #GP should never be intercepted for SEV guests. */
|
||||
WARN_ON_ONCE(emul_type & (EMULTYPE_TRAP_UD |
|
||||
@ -4747,14 +4736,14 @@ static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
|
||||
* to guest register state.
|
||||
*/
|
||||
if (sev_es_guest(vcpu->kvm))
|
||||
return false;
|
||||
return X86EMUL_RETRY_INSTR;
|
||||
|
||||
/*
|
||||
* Emulation is possible if the instruction is already decoded, e.g.
|
||||
* when completing I/O after returning from userspace.
|
||||
*/
|
||||
if (emul_type & EMULTYPE_NO_DECODE)
|
||||
return true;
|
||||
return X86EMUL_CONTINUE;
|
||||
|
||||
/*
|
||||
* Emulation is possible for SEV guests if and only if a prefilled
|
||||
@ -4780,9 +4769,11 @@ static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
|
||||
* success (and in practice it will work the vast majority of the time).
|
||||
*/
|
||||
if (unlikely(!insn)) {
|
||||
if (!(emul_type & EMULTYPE_SKIP))
|
||||
kvm_queue_exception(vcpu, UD_VECTOR);
|
||||
return false;
|
||||
if (emul_type & EMULTYPE_SKIP)
|
||||
return X86EMUL_UNHANDLEABLE;
|
||||
|
||||
kvm_queue_exception(vcpu, UD_VECTOR);
|
||||
return X86EMUL_PROPAGATE_FAULT;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -4793,7 +4784,7 @@ static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
|
||||
* table used to translate CS:RIP resides in emulated MMIO.
|
||||
*/
|
||||
if (likely(insn_len))
|
||||
return true;
|
||||
return X86EMUL_CONTINUE;
|
||||
|
||||
/*
|
||||
* Detect and workaround Errata 1096 Fam_17h_00_0Fh.
|
||||
@ -4851,6 +4842,7 @@ static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
|
||||
kvm_inject_gp(vcpu, 0);
|
||||
else
|
||||
kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
|
||||
return X86EMUL_PROPAGATE_FAULT;
|
||||
}
|
||||
|
||||
resume_guest:
|
||||
@ -4868,7 +4860,7 @@ resume_guest:
|
||||
* doesn't explicitly define "ignored", i.e. doing nothing and letting
|
||||
* the guest spin is technically "ignoring" the access.
|
||||
*/
|
||||
return false;
|
||||
return X86EMUL_RETRY_INSTR;
|
||||
}
|
||||
|
||||
static bool svm_apic_init_signal_blocked(struct kvm_vcpu *vcpu)
|
||||
@ -5028,7 +5020,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
|
||||
.vm_copy_enc_context_from = sev_vm_copy_enc_context_from,
|
||||
.vm_move_enc_context_from = sev_vm_move_enc_context_from,
|
||||
|
||||
.can_emulate_instruction = svm_can_emulate_instruction,
|
||||
.check_emulate_instruction = svm_check_emulate_instruction,
|
||||
|
||||
.apic_init_signal_blocked = svm_apic_init_signal_blocked,
|
||||
|
||||
|
@ -1657,8 +1657,8 @@ static int vmx_rtit_ctl_check(struct kvm_vcpu *vcpu, u64 data)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool vmx_can_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
|
||||
void *insn, int insn_len)
|
||||
static int vmx_check_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
|
||||
void *insn, int insn_len)
|
||||
{
|
||||
/*
|
||||
* Emulation of instructions in SGX enclaves is impossible as RIP does
|
||||
@ -1669,9 +1669,9 @@ static bool vmx_can_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
|
||||
*/
|
||||
if (to_vmx(vcpu)->exit_reason.enclave_mode) {
|
||||
kvm_queue_exception(vcpu, UD_VECTOR);
|
||||
return false;
|
||||
return X86EMUL_PROPAGATE_FAULT;
|
||||
}
|
||||
return true;
|
||||
return X86EMUL_CONTINUE;
|
||||
}
|
||||
|
||||
static int skip_emulated_instruction(struct kvm_vcpu *vcpu)
|
||||
@ -5792,7 +5792,7 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
gpa_t gpa;
|
||||
|
||||
if (!vmx_can_emulate_instruction(vcpu, EMULTYPE_PF, NULL, 0))
|
||||
if (vmx_check_emulate_instruction(vcpu, EMULTYPE_PF, NULL, 0))
|
||||
return 1;
|
||||
|
||||
/*
|
||||
@ -8338,7 +8338,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
|
||||
.enable_smi_window = vmx_enable_smi_window,
|
||||
#endif
|
||||
|
||||
.can_emulate_instruction = vmx_can_emulate_instruction,
|
||||
.check_emulate_instruction = vmx_check_emulate_instruction,
|
||||
.apic_init_signal_blocked = vmx_apic_init_signal_blocked,
|
||||
.migrate_timers = vmx_migrate_timers,
|
||||
|
||||
|
@ -7596,11 +7596,11 @@ int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system);
|
||||
|
||||
static int kvm_can_emulate_insn(struct kvm_vcpu *vcpu, int emul_type,
|
||||
void *insn, int insn_len)
|
||||
static int kvm_check_emulate_insn(struct kvm_vcpu *vcpu, int emul_type,
|
||||
void *insn, int insn_len)
|
||||
{
|
||||
return static_call(kvm_x86_can_emulate_instruction)(vcpu, emul_type,
|
||||
insn, insn_len);
|
||||
return static_call(kvm_x86_check_emulate_instruction)(vcpu, emul_type,
|
||||
insn, insn_len);
|
||||
}
|
||||
|
||||
int handle_ud(struct kvm_vcpu *vcpu)
|
||||
@ -7610,8 +7610,10 @@ int handle_ud(struct kvm_vcpu *vcpu)
|
||||
int emul_type = EMULTYPE_TRAP_UD;
|
||||
char sig[5]; /* ud2; .ascii "kvm" */
|
||||
struct x86_exception e;
|
||||
int r;
|
||||
|
||||
if (unlikely(!kvm_can_emulate_insn(vcpu, emul_type, NULL, 0)))
|
||||
r = kvm_check_emulate_insn(vcpu, emul_type, NULL, 0);
|
||||
if (r != X86EMUL_CONTINUE)
|
||||
return 1;
|
||||
|
||||
if (fep_flags &&
|
||||
@ -8993,8 +8995,14 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
|
||||
struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
|
||||
bool writeback = true;
|
||||
|
||||
if (unlikely(!kvm_can_emulate_insn(vcpu, emulation_type, insn, insn_len)))
|
||||
return 1;
|
||||
r = kvm_check_emulate_insn(vcpu, emulation_type, insn, insn_len);
|
||||
if (r != X86EMUL_CONTINUE) {
|
||||
if (r == X86EMUL_RETRY_INSTR || r == X86EMUL_PROPAGATE_FAULT)
|
||||
return 1;
|
||||
|
||||
WARN_ON_ONCE(r != X86EMUL_UNHANDLEABLE);
|
||||
return handle_emulation_failure(vcpu, emulation_type);
|
||||
}
|
||||
|
||||
vcpu->arch.l1tf_flush_l1d = true;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user