mirror of
https://github.com/torvalds/linux.git
synced 2024-11-23 12:42:02 +00:00
Small x86 fixes.
-----BEGIN PGP SIGNATURE----- iQFIBAABCAAyFiEE8TM4V0tmI4mGbHaCv/vSX3jHroMFAmFXQUoUHHBib256aW5p QHJlZGhhdC5jb20ACgkQv/vSX3jHroMglgf/egh3zb9/+BUQWe0xWfhcINNzpsVk PJtiBmJc3nQLbZbTSLp63rouy1lNgR0s2DiMwP7G1u39OwW8W3LHMrBUSqF1F01+ gntb4GGiRTiTPJI64K4z6ytORd3tuRarHq8TUIa2zvki9ZW5Obgkm1i1RsNMOo+s AOA7whhpS8e/a5fBbtbS9bTZb30PKTZmbW4oMjvO9Sw4Eb76IauqPSEtRPSuCAc7 r7z62RTlm10Qk0JR3tW1iXMxTJHZk+tYPJ8pclUAWVX5bZqWa/9k8R0Z5i/miFiZ glW/y3R4+aUwIQV2v7V3Jx9MOKDhZxniMtnqZG/Hp9NVDtWIz37V/U37vw== =zQQ1 -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm Pull more kvm fixes from Paolo Bonzini: "Small x86 fixes" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: KVM: selftests: Ensure all migrations are performed when test is affined KVM: x86: Swap order of CPUID entry "index" vs. "significant flag" checks ptp: Fix ptp_kvm_getcrosststamp issue for x86 ptp_kvm x86/kvmclock: Move this_cpu_pvti into kvmclock.h selftests: KVM: Don't clobber XMM register when read KVM: VMX: Fix a TSX_CTRL_CPUID_CLEAR field mask issue
This commit is contained in:
commit
b2626f1e32
@ -2,6 +2,20 @@
|
||||
#ifndef _ASM_X86_KVM_CLOCK_H
|
||||
#define _ASM_X86_KVM_CLOCK_H
|
||||
|
||||
#include <linux/percpu.h>
|
||||
|
||||
extern struct clocksource kvm_clock;
|
||||
|
||||
DECLARE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu);
|
||||
|
||||
static inline struct pvclock_vcpu_time_info *this_cpu_pvti(void)
|
||||
{
|
||||
return &this_cpu_read(hv_clock_per_cpu)->pvti;
|
||||
}
|
||||
|
||||
static inline struct pvclock_vsyscall_time_info *this_cpu_hvclock(void)
|
||||
{
|
||||
return this_cpu_read(hv_clock_per_cpu);
|
||||
}
|
||||
|
||||
#endif /* _ASM_X86_KVM_CLOCK_H */
|
||||
|
@ -49,18 +49,9 @@ early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);
|
||||
static struct pvclock_vsyscall_time_info
|
||||
hv_clock_boot[HVC_BOOT_ARRAY_SIZE] __bss_decrypted __aligned(PAGE_SIZE);
|
||||
static struct pvclock_wall_clock wall_clock __bss_decrypted;
|
||||
static DEFINE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu);
|
||||
static struct pvclock_vsyscall_time_info *hvclock_mem;
|
||||
|
||||
static inline struct pvclock_vcpu_time_info *this_cpu_pvti(void)
|
||||
{
|
||||
return &this_cpu_read(hv_clock_per_cpu)->pvti;
|
||||
}
|
||||
|
||||
static inline struct pvclock_vsyscall_time_info *this_cpu_hvclock(void)
|
||||
{
|
||||
return this_cpu_read(hv_clock_per_cpu);
|
||||
}
|
||||
DEFINE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu);
|
||||
EXPORT_PER_CPU_SYMBOL_GPL(hv_clock_per_cpu);
|
||||
|
||||
/*
|
||||
* The wallclock is the time of day when we booted. Since then, some time may
|
||||
|
@ -65,8 +65,8 @@ static inline struct kvm_cpuid_entry2 *cpuid_entry2_find(
|
||||
for (i = 0; i < nent; i++) {
|
||||
e = &entries[i];
|
||||
|
||||
if (e->function == function && (e->index == index ||
|
||||
!(e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX)))
|
||||
if (e->function == function &&
|
||||
(!(e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) || e->index == index))
|
||||
return e;
|
||||
}
|
||||
|
||||
|
@ -6848,7 +6848,7 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
|
||||
*/
|
||||
tsx_ctrl = vmx_find_uret_msr(vmx, MSR_IA32_TSX_CTRL);
|
||||
if (tsx_ctrl)
|
||||
vmx->guest_uret_msrs[i].mask = ~(u64)TSX_CTRL_CPUID_CLEAR;
|
||||
tsx_ctrl->mask = ~(u64)TSX_CTRL_CPUID_CLEAR;
|
||||
}
|
||||
|
||||
err = alloc_loaded_vmcs(&vmx->vmcs01);
|
||||
|
@ -15,8 +15,6 @@
|
||||
#include <linux/ptp_clock_kernel.h>
|
||||
#include <linux/ptp_kvm.h>
|
||||
|
||||
struct pvclock_vsyscall_time_info *hv_clock;
|
||||
|
||||
static phys_addr_t clock_pair_gpa;
|
||||
static struct kvm_clock_pairing clock_pair;
|
||||
|
||||
@ -28,8 +26,7 @@ int kvm_arch_ptp_init(void)
|
||||
return -ENODEV;
|
||||
|
||||
clock_pair_gpa = slow_virt_to_phys(&clock_pair);
|
||||
hv_clock = pvclock_get_pvti_cpu0_va();
|
||||
if (!hv_clock)
|
||||
if (!pvclock_get_pvti_cpu0_va())
|
||||
return -ENODEV;
|
||||
|
||||
ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING, clock_pair_gpa,
|
||||
@ -64,10 +61,8 @@ int kvm_arch_ptp_get_crosststamp(u64 *cycle, struct timespec64 *tspec,
|
||||
struct pvclock_vcpu_time_info *src;
|
||||
unsigned int version;
|
||||
long ret;
|
||||
int cpu;
|
||||
|
||||
cpu = smp_processor_id();
|
||||
src = &hv_clock[cpu].pvti;
|
||||
src = this_cpu_pvti();
|
||||
|
||||
do {
|
||||
/*
|
||||
|
@ -315,7 +315,7 @@ static inline void set_xmm(int n, unsigned long val)
|
||||
#define GET_XMM(__xmm) \
|
||||
({ \
|
||||
unsigned long __val; \
|
||||
asm volatile("movq %%"#__xmm", %0" : "=r"(__val) : : #__xmm); \
|
||||
asm volatile("movq %%"#__xmm", %0" : "=r"(__val)); \
|
||||
__val; \
|
||||
})
|
||||
|
||||
|
@ -10,6 +10,7 @@
|
||||
#include <signal.h>
|
||||
#include <syscall.h>
|
||||
#include <sys/ioctl.h>
|
||||
#include <sys/sysinfo.h>
|
||||
#include <asm/barrier.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/rseq.h>
|
||||
@ -39,6 +40,7 @@ static __thread volatile struct rseq __rseq = {
|
||||
|
||||
static pthread_t migration_thread;
|
||||
static cpu_set_t possible_mask;
|
||||
static int min_cpu, max_cpu;
|
||||
static bool done;
|
||||
|
||||
static atomic_t seq_cnt;
|
||||
@ -57,20 +59,37 @@ static void sys_rseq(int flags)
|
||||
TEST_ASSERT(!r, "rseq failed, errno = %d (%s)", errno, strerror(errno));
|
||||
}
|
||||
|
||||
static int next_cpu(int cpu)
|
||||
{
|
||||
/*
|
||||
* Advance to the next CPU, skipping those that weren't in the original
|
||||
* affinity set. Sadly, there is no CPU_SET_FOR_EACH, and cpu_set_t's
|
||||
* data storage is considered as opaque. Note, if this task is pinned
|
||||
* to a small set of discontigous CPUs, e.g. 2 and 1023, this loop will
|
||||
* burn a lot cycles and the test will take longer than normal to
|
||||
* complete.
|
||||
*/
|
||||
do {
|
||||
cpu++;
|
||||
if (cpu > max_cpu) {
|
||||
cpu = min_cpu;
|
||||
TEST_ASSERT(CPU_ISSET(cpu, &possible_mask),
|
||||
"Min CPU = %d must always be usable", cpu);
|
||||
break;
|
||||
}
|
||||
} while (!CPU_ISSET(cpu, &possible_mask));
|
||||
|
||||
return cpu;
|
||||
}
|
||||
|
||||
static void *migration_worker(void *ign)
|
||||
{
|
||||
cpu_set_t allowed_mask;
|
||||
int r, i, nr_cpus, cpu;
|
||||
int r, i, cpu;
|
||||
|
||||
CPU_ZERO(&allowed_mask);
|
||||
|
||||
nr_cpus = CPU_COUNT(&possible_mask);
|
||||
|
||||
for (i = 0; i < NR_TASK_MIGRATIONS; i++) {
|
||||
cpu = i % nr_cpus;
|
||||
if (!CPU_ISSET(cpu, &possible_mask))
|
||||
continue;
|
||||
|
||||
for (i = 0, cpu = min_cpu; i < NR_TASK_MIGRATIONS; i++, cpu = next_cpu(cpu)) {
|
||||
CPU_SET(cpu, &allowed_mask);
|
||||
|
||||
/*
|
||||
@ -154,6 +173,36 @@ static void *migration_worker(void *ign)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int calc_min_max_cpu(void)
|
||||
{
|
||||
int i, cnt, nproc;
|
||||
|
||||
if (CPU_COUNT(&possible_mask) < 2)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* CPU_SET doesn't provide a FOR_EACH helper, get the min/max CPU that
|
||||
* this task is affined to in order to reduce the time spent querying
|
||||
* unusable CPUs, e.g. if this task is pinned to a small percentage of
|
||||
* total CPUs.
|
||||
*/
|
||||
nproc = get_nprocs_conf();
|
||||
min_cpu = -1;
|
||||
max_cpu = -1;
|
||||
cnt = 0;
|
||||
|
||||
for (i = 0; i < nproc; i++) {
|
||||
if (!CPU_ISSET(i, &possible_mask))
|
||||
continue;
|
||||
if (min_cpu == -1)
|
||||
min_cpu = i;
|
||||
max_cpu = i;
|
||||
cnt++;
|
||||
}
|
||||
|
||||
return (cnt < 2) ? -EINVAL : 0;
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
int r, i, snapshot;
|
||||
@ -167,8 +216,8 @@ int main(int argc, char *argv[])
|
||||
TEST_ASSERT(!r, "sched_getaffinity failed, errno = %d (%s)", errno,
|
||||
strerror(errno));
|
||||
|
||||
if (CPU_COUNT(&possible_mask) < 2) {
|
||||
print_skip("Only one CPU, task migration not possible\n");
|
||||
if (calc_min_max_cpu()) {
|
||||
print_skip("Only one usable CPU, task migration not possible");
|
||||
exit(KSFT_SKIP);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user