mirror of
https://github.com/torvalds/linux.git
synced 2024-11-25 05:32:00 +00:00
KVM: x86: Funnel all fancy MSR return value handling into a common helper
Add a common helper, kvm_do_msr_access(), to invoke the "leaf" APIs that
are type and access specific, and more importantly to handle errors that
are returned from the leaf APIs. I.e. turn kvm_msr_ignored_check() from a
a helper that is called on an error, into a trampoline that detects errors
*and* applies relevant side effects, e.g. logging unimplemented accesses.
Because the leaf APIs are used for guest accesses, userspace accesses, and
KVM accesses, and because KVM supports restricting access to MSRs from
userspace via filters, the error handling is subtly non-trivial. E.g. KVM
has had at least one bug escape due to making each "outer" function handle
errors. See commit 3376ca3f1a
("KVM: x86: Fix KVM_GET_MSRS stack info
leak").
Link: https://lore.kernel.org/r/20240802181935.292540-8-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
This commit is contained in:
parent
7075f16361
commit
1cec203498
@ -304,25 +304,40 @@ const struct kvm_stats_header kvm_vcpu_stats_header = {
|
|||||||
|
|
||||||
static struct kmem_cache *x86_emulator_cache;
|
static struct kmem_cache *x86_emulator_cache;
|
||||||
|
|
||||||
/*
|
typedef int (*msr_access_t)(struct kvm_vcpu *vcpu, u32 index, u64 *data,
|
||||||
* When called, it means the previous get/set msr reached an invalid msr.
|
bool host_initiated);
|
||||||
* Return true if we want to ignore/silent this failed msr access.
|
|
||||||
*/
|
|
||||||
static bool kvm_msr_ignored_check(u32 msr, u64 data, bool write)
|
|
||||||
{
|
|
||||||
const char *op = write ? "wrmsr" : "rdmsr";
|
|
||||||
|
|
||||||
if (ignore_msrs) {
|
static __always_inline int kvm_do_msr_access(struct kvm_vcpu *vcpu, u32 msr,
|
||||||
if (report_ignored_msrs)
|
u64 *data, bool host_initiated,
|
||||||
kvm_pr_unimpl("ignored %s: 0x%x data 0x%llx\n",
|
enum kvm_msr_access rw,
|
||||||
op, msr, data);
|
msr_access_t msr_access_fn)
|
||||||
/* Mask the error */
|
{
|
||||||
return true;
|
const char *op = rw == MSR_TYPE_W ? "wrmsr" : "rdmsr";
|
||||||
} else {
|
int ret;
|
||||||
|
|
||||||
|
BUILD_BUG_ON(rw != MSR_TYPE_R && rw != MSR_TYPE_W);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Zero the data on read failures to avoid leaking stack data to the
|
||||||
|
* guest and/or userspace, e.g. if the failure is ignored below.
|
||||||
|
*/
|
||||||
|
ret = msr_access_fn(vcpu, msr, data, host_initiated);
|
||||||
|
if (ret && rw == MSR_TYPE_R)
|
||||||
|
*data = 0;
|
||||||
|
|
||||||
|
if (ret != KVM_MSR_RET_UNSUPPORTED)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
if (!ignore_msrs) {
|
||||||
kvm_debug_ratelimited("unhandled %s: 0x%x data 0x%llx\n",
|
kvm_debug_ratelimited("unhandled %s: 0x%x data 0x%llx\n",
|
||||||
op, msr, data);
|
op, msr, *data);
|
||||||
return false;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (report_ignored_msrs)
|
||||||
|
kvm_pr_unimpl("ignored %s: 0x%x data 0x%llx\n", op, msr, *data);
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct kmem_cache *kvm_alloc_emulator_cache(void)
|
static struct kmem_cache *kvm_alloc_emulator_cache(void)
|
||||||
@ -1682,16 +1697,8 @@ static int kvm_get_feature_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data,
|
|||||||
|
|
||||||
static int do_get_feature_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
|
static int do_get_feature_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
|
||||||
{
|
{
|
||||||
int r;
|
return kvm_do_msr_access(vcpu, index, data, true, MSR_TYPE_R,
|
||||||
|
kvm_get_feature_msr);
|
||||||
/* Unconditionally clear the output for simplicity */
|
|
||||||
*data = 0;
|
|
||||||
r = kvm_get_feature_msr(vcpu, index, data, true);
|
|
||||||
|
|
||||||
if (r == KVM_MSR_RET_UNSUPPORTED && kvm_msr_ignored_check(index, 0, false))
|
|
||||||
r = 0;
|
|
||||||
|
|
||||||
return r;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool __kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
|
static bool __kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
|
||||||
@ -1878,16 +1885,17 @@ static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data,
|
|||||||
return kvm_x86_call(set_msr)(vcpu, &msr);
|
return kvm_x86_call(set_msr)(vcpu, &msr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int _kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data,
|
||||||
|
bool host_initiated)
|
||||||
|
{
|
||||||
|
return __kvm_set_msr(vcpu, index, *data, host_initiated);
|
||||||
|
}
|
||||||
|
|
||||||
static int kvm_set_msr_ignored_check(struct kvm_vcpu *vcpu,
|
static int kvm_set_msr_ignored_check(struct kvm_vcpu *vcpu,
|
||||||
u32 index, u64 data, bool host_initiated)
|
u32 index, u64 data, bool host_initiated)
|
||||||
{
|
{
|
||||||
int ret = __kvm_set_msr(vcpu, index, data, host_initiated);
|
return kvm_do_msr_access(vcpu, index, &data, host_initiated, MSR_TYPE_W,
|
||||||
|
_kvm_set_msr);
|
||||||
if (ret == KVM_MSR_RET_UNSUPPORTED)
|
|
||||||
if (kvm_msr_ignored_check(index, data, true))
|
|
||||||
ret = 0;
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1926,16 +1934,8 @@ int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data,
|
|||||||
static int kvm_get_msr_ignored_check(struct kvm_vcpu *vcpu,
|
static int kvm_get_msr_ignored_check(struct kvm_vcpu *vcpu,
|
||||||
u32 index, u64 *data, bool host_initiated)
|
u32 index, u64 *data, bool host_initiated)
|
||||||
{
|
{
|
||||||
int ret = __kvm_get_msr(vcpu, index, data, host_initiated);
|
return kvm_do_msr_access(vcpu, index, data, host_initiated, MSR_TYPE_R,
|
||||||
|
__kvm_get_msr);
|
||||||
if (ret == KVM_MSR_RET_UNSUPPORTED) {
|
|
||||||
/* Unconditionally clear *data for simplicity */
|
|
||||||
*data = 0;
|
|
||||||
if (kvm_msr_ignored_check(index, 0, false))
|
|
||||||
ret = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int kvm_get_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 *data)
|
static int kvm_get_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 *data)
|
||||||
|
Loading…
Reference in New Issue
Block a user