mirror of
https://github.com/torvalds/linux.git
synced 2024-11-24 21:21:41 +00:00
Generic:
* Set .owner for various KVM file_operations so that files refcount the KVM module until KVM is done executing _all_ code, including the last few instructions of kvm_put_kvm(). And then revert the misguided attempt to rely on "struct kvm" refcounts to pin KVM-the-module. ARM: * Do not redo the mapping of vLPIs, if they have already been mapped s390: * Do not leave bits behind in PTEs * Properly catch page invalidations that affect the prefix of a nested guest x86: * When checking if a _running_ vCPU is "in-kernel", i.e. running at CPL0, get the CPL directly instead of relying on preempted_in_kernel (which is valid if and only if the vCPU was preempted, i.e. NOT running). * Fix a benign "return void" that was recently introduced. Selftests: * Makefile tweak for dependency generation * -Wformat fix -----BEGIN PGP SIGNATURE----- iQFIBAABCAAyFiEE8TM4V0tmI4mGbHaCv/vSX3jHroMFAmVzhWMUHHBib256aW5p QHJlZGhhdC5jb20ACgkQv/vSX3jHroMd3QgArPrc1tHKb5RAv3/OGxUhgmmKcYFz AERbxrzcEv25MjCy6wfb+WzOTud/7KM113oVwHpkWbPaOdR/NteCeNrypCOiFnxB BzKOdm23A8UmjgFlwYpIlukmn51QT2iMWfIQDetDboRpRPlIZ6AOQpIHph99K0J1 xMlXKbd4eaeIS+FoqJGD2wlKDkMZmrQD1SjndLp7xtPjRBZQzVTcMg/fKJNcDLAg xIfecCeeU4/lJ0fQlnnO5a4L3gCzJnSGLKYvLgBkX4sC1yFr959WAnqSn8mb+1wT jQtsmCg7+GCuNF5mn4nF/YnnuD7+xgbpGKVGlHhKCmmvmbi74NVm5SBNgA== =aZLl -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm Pull kvm fixes from Paolo Bonzini: "Generic: - Set .owner for various KVM file_operations so that files refcount the KVM module until KVM is done executing _all_ code, including the last few instructions of kvm_put_kvm(). And then revert the misguided attempt to rely on "struct kvm" refcounts to pin KVM-the-module. ARM: - Do not redo the mapping of vLPIs, if they have already been mapped s390: - Do not leave bits behind in PTEs - Properly catch page invalidations that affect the prefix of a nested guest x86: - When checking if a _running_ vCPU is "in-kernel", i.e. running at CPL0, get the CPL directly instead of relying on preempted_in_kernel (which is valid if and only if the vCPU was preempted, i.e. NOT running). - Fix a benign "return void" that was recently introduced. Selftests: - Makefile tweak for dependency generation - '-Wformat' fix" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: KVM: SVM: Update EFER software model on CR0 trap for SEV-ES KVM: selftests: add -MP to CFLAGS KVM: selftests: Actually print out magic token in NX hugepages skip message KVM: x86: Remove 'return void' expression for 'void function' Revert "KVM: Prevent module exit until all VMs are freed" KVM: Set file_operations.owner appropriately for all such structures KVM: x86: Get CPL directly when checking if loaded vCPU is in kernel mode KVM: arm64: GICv4: Do not perform a map to a mapped vLPI KVM: s390/mm: Properly reset no-dat KVM: s390: vsie: fix wrong VIR 37 when MSO is used
This commit is contained in:
commit
0aea22c7ab
@ -436,6 +436,10 @@ int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int virq,
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
/* Silently exit if the vLPI is already mapped */
|
||||
if (irq->hw)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* Emit the mapping request. If it fails, the ITS probably
|
||||
* isn't v4 compatible, so let's silently bail out. Holding
|
||||
|
@ -587,10 +587,6 @@ void kvm_s390_vsie_gmap_notifier(struct gmap *gmap, unsigned long start,
|
||||
|
||||
if (!gmap_is_shadow(gmap))
|
||||
return;
|
||||
if (start >= 1UL << 31)
|
||||
/* We are only interested in prefix pages */
|
||||
return;
|
||||
|
||||
/*
|
||||
* Only new shadow blocks are added to the list during runtime,
|
||||
* therefore we can safely reference them all the time.
|
||||
|
@ -756,7 +756,7 @@ void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
|
||||
pte_clear(mm, addr, ptep);
|
||||
}
|
||||
if (reset)
|
||||
pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK;
|
||||
pgste_val(pgste) &= ~(_PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT);
|
||||
pgste_set_unlock(ptep, pgste);
|
||||
preempt_enable();
|
||||
}
|
||||
|
@ -182,6 +182,7 @@ static int kvm_mmu_rmaps_stat_release(struct inode *inode, struct file *file)
|
||||
}
|
||||
|
||||
static const struct file_operations mmu_rmaps_stat_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = kvm_mmu_rmaps_stat_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
|
@ -1855,15 +1855,17 @@ void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
|
||||
bool old_paging = is_paging(vcpu);
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
if (vcpu->arch.efer & EFER_LME && !vcpu->arch.guest_state_protected) {
|
||||
if (vcpu->arch.efer & EFER_LME) {
|
||||
if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
|
||||
vcpu->arch.efer |= EFER_LMA;
|
||||
svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
|
||||
if (!vcpu->arch.guest_state_protected)
|
||||
svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
|
||||
}
|
||||
|
||||
if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
|
||||
vcpu->arch.efer &= ~EFER_LMA;
|
||||
svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
|
||||
if (!vcpu->arch.guest_state_protected)
|
||||
svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@ -5518,8 +5518,8 @@ static void kvm_vcpu_ioctl_x86_get_xsave2(struct kvm_vcpu *vcpu,
|
||||
static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
|
||||
struct kvm_xsave *guest_xsave)
|
||||
{
|
||||
return kvm_vcpu_ioctl_x86_get_xsave2(vcpu, (void *)guest_xsave->region,
|
||||
sizeof(guest_xsave->region));
|
||||
kvm_vcpu_ioctl_x86_get_xsave2(vcpu, (void *)guest_xsave->region,
|
||||
sizeof(guest_xsave->region));
|
||||
}
|
||||
|
||||
static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
|
||||
@ -13031,7 +13031,10 @@ bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
|
||||
if (vcpu->arch.guest_state_protected)
|
||||
return true;
|
||||
|
||||
return vcpu->arch.preempted_in_kernel;
|
||||
if (vcpu != kvm_get_running_vcpu())
|
||||
return vcpu->arch.preempted_in_kernel;
|
||||
|
||||
return static_call(kvm_x86_get_cpl)(vcpu) == 0;
|
||||
}
|
||||
|
||||
unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu)
|
||||
|
@ -224,7 +224,7 @@ else
|
||||
LINUX_TOOL_ARCH_INCLUDE = $(top_srcdir)/tools/arch/$(ARCH)/include
|
||||
endif
|
||||
CFLAGS += -Wall -Wstrict-prototypes -Wuninitialized -O2 -g -std=gnu99 \
|
||||
-Wno-gnu-variable-sized-type-not-at-end -MD\
|
||||
-Wno-gnu-variable-sized-type-not-at-end -MD -MP \
|
||||
-fno-builtin-memcmp -fno-builtin-memcpy -fno-builtin-memset \
|
||||
-fno-builtin-strnlen \
|
||||
-fno-stack-protector -fno-PIE -I$(LINUX_TOOL_INCLUDE) \
|
||||
|
@ -259,7 +259,7 @@ int main(int argc, char **argv)
|
||||
__TEST_REQUIRE(token == MAGIC_TOKEN,
|
||||
"This test must be run with the magic token %d.\n"
|
||||
"This is done by nx_huge_pages_test.sh, which\n"
|
||||
"also handles environment setup for the test.");
|
||||
"also handles environment setup for the test.", MAGIC_TOKEN);
|
||||
|
||||
run_test(reclaim_period_ms, false, reboot_permissions);
|
||||
run_test(reclaim_period_ms, true, reboot_permissions);
|
||||
|
@ -115,8 +115,6 @@ EXPORT_SYMBOL_GPL(kvm_debugfs_dir);
|
||||
|
||||
static const struct file_operations stat_fops_per_vm;
|
||||
|
||||
static struct file_operations kvm_chardev_ops;
|
||||
|
||||
static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
|
||||
unsigned long arg);
|
||||
#ifdef CONFIG_KVM_COMPAT
|
||||
@ -1157,9 +1155,6 @@ static struct kvm *kvm_create_vm(unsigned long type, const char *fdname)
|
||||
if (!kvm)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
/* KVM is pinned via open("/dev/kvm"), the fd passed to this ioctl(). */
|
||||
__module_get(kvm_chardev_ops.owner);
|
||||
|
||||
KVM_MMU_LOCK_INIT(kvm);
|
||||
mmgrab(current->mm);
|
||||
kvm->mm = current->mm;
|
||||
@ -1279,7 +1274,6 @@ out_err_no_irq_srcu:
|
||||
out_err_no_srcu:
|
||||
kvm_arch_free_vm(kvm);
|
||||
mmdrop(current->mm);
|
||||
module_put(kvm_chardev_ops.owner);
|
||||
return ERR_PTR(r);
|
||||
}
|
||||
|
||||
@ -1348,7 +1342,6 @@ static void kvm_destroy_vm(struct kvm *kvm)
|
||||
preempt_notifier_dec();
|
||||
hardware_disable_all();
|
||||
mmdrop(mm);
|
||||
module_put(kvm_chardev_ops.owner);
|
||||
}
|
||||
|
||||
void kvm_get_kvm(struct kvm *kvm)
|
||||
@ -3887,7 +3880,7 @@ static int kvm_vcpu_release(struct inode *inode, struct file *filp)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct file_operations kvm_vcpu_fops = {
|
||||
static struct file_operations kvm_vcpu_fops = {
|
||||
.release = kvm_vcpu_release,
|
||||
.unlocked_ioctl = kvm_vcpu_ioctl,
|
||||
.mmap = kvm_vcpu_mmap,
|
||||
@ -4081,6 +4074,7 @@ static int kvm_vcpu_stats_release(struct inode *inode, struct file *file)
|
||||
}
|
||||
|
||||
static const struct file_operations kvm_vcpu_stats_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.read = kvm_vcpu_stats_read,
|
||||
.release = kvm_vcpu_stats_release,
|
||||
.llseek = noop_llseek,
|
||||
@ -4431,7 +4425,7 @@ static int kvm_device_release(struct inode *inode, struct file *filp)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct file_operations kvm_device_fops = {
|
||||
static struct file_operations kvm_device_fops = {
|
||||
.unlocked_ioctl = kvm_device_ioctl,
|
||||
.release = kvm_device_release,
|
||||
KVM_COMPAT(kvm_device_ioctl),
|
||||
@ -4759,6 +4753,7 @@ static int kvm_vm_stats_release(struct inode *inode, struct file *file)
|
||||
}
|
||||
|
||||
static const struct file_operations kvm_vm_stats_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.read = kvm_vm_stats_read,
|
||||
.release = kvm_vm_stats_release,
|
||||
.llseek = noop_llseek,
|
||||
@ -5060,7 +5055,7 @@ static long kvm_vm_compat_ioctl(struct file *filp,
|
||||
}
|
||||
#endif
|
||||
|
||||
static const struct file_operations kvm_vm_fops = {
|
||||
static struct file_operations kvm_vm_fops = {
|
||||
.release = kvm_vm_release,
|
||||
.unlocked_ioctl = kvm_vm_ioctl,
|
||||
.llseek = noop_llseek,
|
||||
@ -6095,6 +6090,9 @@ int kvm_init(unsigned vcpu_size, unsigned vcpu_align, struct module *module)
|
||||
goto err_async_pf;
|
||||
|
||||
kvm_chardev_ops.owner = module;
|
||||
kvm_vm_fops.owner = module;
|
||||
kvm_vcpu_fops.owner = module;
|
||||
kvm_device_fops.owner = module;
|
||||
|
||||
kvm_preempt_ops.sched_in = kvm_sched_in;
|
||||
kvm_preempt_ops.sched_out = kvm_sched_out;
|
||||
|
Loading…
Reference in New Issue
Block a user