KVM: x86/mmu: Add "never" option to allow sticky disabling of nx_huge_pages

Add a "never" option to the nx_huge_pages module param to allow userspace
to do a one-way hard disabling of the mitigation, and don't create the
per-VM recovery threads when the mitigation is hard disabled.  Letting
userspace pinky swear that userspace doesn't want to enable NX mitigation
(without reloading KVM) allows certain use cases to avoid the latency
problems associated with spawning a kthread for each VM.

E.g. in FaaS use cases, the guest kernel is trusted and the host may
create 100+ VMs per logical CPU, which can result in 100ms+ latencies when
a burst of VMs is created.

Reported-by: Li RongQing <lirongqing@baidu.com>
Closes: https://lore.kernel.org/all/1679555884-32544-1-git-send-email-lirongqing@baidu.com
Cc: Yong He <zhuangel570@gmail.com>
Cc: Robert Hoo <robert.hoo.linux@gmail.com>
Cc: Kai Huang <kai.huang@intel.com>
Reviewed-by: Robert Hoo <robert.hoo.linux@gmail.com>
Acked-by: Kai Huang <kai.huang@intel.com>
Tested-by: Luiz Capitulino <luizcap@amazon.com>
Reviewed-by: Li RongQing <lirongqing@baidu.com>
Link: https://lore.kernel.org/r/20230602005859.784190-1-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
This commit is contained in:
Sean Christopherson 2023-06-01 17:58:59 -07:00
parent 762b33eb90
commit 0b210faf33

View File

@ -58,6 +58,8 @@
extern bool itlb_multihit_kvm_mitigation;
static bool nx_hugepage_mitigation_hard_disabled;
int __read_mostly nx_huge_pages = -1;
static uint __read_mostly nx_huge_pages_recovery_period_ms;
#ifdef CONFIG_PREEMPT_RT
@ -67,12 +69,13 @@ static uint __read_mostly nx_huge_pages_recovery_ratio = 0;
static uint __read_mostly nx_huge_pages_recovery_ratio = 60;
#endif
static int get_nx_huge_pages(char *buffer, const struct kernel_param *kp);
static int set_nx_huge_pages(const char *val, const struct kernel_param *kp);
static int set_nx_huge_pages_recovery_param(const char *val, const struct kernel_param *kp);
static const struct kernel_param_ops nx_huge_pages_ops = {
.set = set_nx_huge_pages,
.get = param_get_bool,
.get = get_nx_huge_pages,
};
static const struct kernel_param_ops nx_huge_pages_recovery_param_ops = {
@ -6852,6 +6855,14 @@ static void mmu_destroy_caches(void)
kmem_cache_destroy(mmu_page_header_cache);
}
static int get_nx_huge_pages(char *buffer, const struct kernel_param *kp)
{
if (nx_hugepage_mitigation_hard_disabled)
return sprintf(buffer, "never\n");
return param_get_bool(buffer, kp);
}
static bool get_nx_auto_mode(void)
{
/* Return true when CPU has the bug, and mitigations are ON */
@ -6868,15 +6879,29 @@ static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
bool old_val = nx_huge_pages;
bool new_val;
if (nx_hugepage_mitigation_hard_disabled)
return -EPERM;
/* In "auto" mode deploy workaround only if CPU has the bug. */
if (sysfs_streq(val, "off"))
if (sysfs_streq(val, "off")) {
new_val = 0;
else if (sysfs_streq(val, "force"))
} else if (sysfs_streq(val, "force")) {
new_val = 1;
else if (sysfs_streq(val, "auto"))
} else if (sysfs_streq(val, "auto")) {
new_val = get_nx_auto_mode();
else if (kstrtobool(val, &new_val) < 0)
} else if (sysfs_streq(val, "never")) {
new_val = 0;
mutex_lock(&kvm_lock);
if (!list_empty(&vm_list)) {
mutex_unlock(&kvm_lock);
return -EBUSY;
}
nx_hugepage_mitigation_hard_disabled = true;
mutex_unlock(&kvm_lock);
} else if (kstrtobool(val, &new_val) < 0) {
return -EINVAL;
}
__set_nx_huge_pages(new_val);
@ -7014,6 +7039,9 @@ static int set_nx_huge_pages_recovery_param(const char *val, const struct kernel
uint old_period, new_period;
int err;
if (nx_hugepage_mitigation_hard_disabled)
return -EPERM;
was_recovery_enabled = calc_nx_huge_pages_recovery_period(&old_period);
err = param_set_uint(val, kp);
@ -7169,6 +7197,9 @@ int kvm_mmu_post_init_vm(struct kvm *kvm)
{
int err;
if (nx_hugepage_mitigation_hard_disabled)
return 0;
err = kvm_vm_create_worker_thread(kvm, kvm_nx_huge_page_recovery_worker, 0,
"kvm-nx-lpage-recovery",
&kvm->arch.nx_huge_page_recovery_thread);