Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM updates from Paolo Bonzini:
"One of the largest releases for KVM... Hardly any generic
changes, but lots of architecture-specific updates.
ARM:
- VHE support so that we can run the kernel at EL2 on ARMv8.1 systems
- PMU support for guests
- 32bit world switch rewritten in C
- various optimizations to the vgic save/restore code.
PPC:
- enabled KVM-VFIO integration ("VFIO device")
- optimizations to speed up IPIs between vcpus
- in-kernel handling of IOMMU hypercalls
- support for dynamic DMA windows (DDW).
s390:
- provide the floating point registers via sync regs;
- separated instruction vs. data accesses
- dirty log improvements for huge guests
- bugfixes and documentation improvements.
x86:
- Hyper-V VMBus hypercall userspace exit
- alternative implementation of lowest-priority interrupts using
vector hashing (for better VT-d posted interrupt support)
- fixed guest debugging with nested virtualizations
- improved interrupt tracking in the in-kernel IOAPIC
- generic infrastructure for tracking writes to guest
memory - currently its only use is to speedup the legacy shadow
paging (pre-EPT) case, but in the future it will be used for
virtual GPUs as well
- much cleanup (LAPIC, kvmclock, MMU, PIT), including ubsan fixes"
* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (217 commits)
KVM: x86: remove eager_fpu field of struct kvm_vcpu_arch
KVM: x86: disable MPX if host did not enable MPX XSAVE features
arm64: KVM: vgic-v3: Only wipe LRs on vcpu exit
arm64: KVM: vgic-v3: Reset LRs at boot time
arm64: KVM: vgic-v3: Do not save an LR known to be empty
arm64: KVM: vgic-v3: Save maintenance interrupt state only if required
arm64: KVM: vgic-v3: Avoid accessing ICH registers
KVM: arm/arm64: vgic-v2: Make GICD_SGIR quicker to hit
KVM: arm/arm64: vgic-v2: Only wipe LRs on vcpu exit
KVM: arm/arm64: vgic-v2: Reset LRs at boot time
KVM: arm/arm64: vgic-v2: Do not save an LR known to be empty
KVM: arm/arm64: vgic-v2: Move GICH_ELRSR saving to its own function
KVM: arm/arm64: vgic-v2: Save maintenance interrupt state only if required
KVM: arm/arm64: vgic-v2: Avoid accessing GICH registers
KVM: s390: allocate only one DMA page per VM
KVM: s390: enable STFLE interpretation only if enabled for the guest
KVM: s390: wake up when the VCPU cpu timer expires
KVM: s390: step the VCPU timer while in enabled wait
KVM: s390: protect VCPU cpu timer with a seqcount
KVM: s390: step VCPU cpu timer during kvm_run ioctl
...
This commit is contained in:
@@ -206,7 +206,7 @@ int smp_request_message_ipi(int virq, int msg)
|
||||
|
||||
#ifdef CONFIG_PPC_SMP_MUXED_IPI
|
||||
struct cpu_messages {
|
||||
int messages; /* current messages */
|
||||
long messages; /* current messages */
|
||||
unsigned long data; /* data for cause ipi */
|
||||
};
|
||||
static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message);
|
||||
@@ -218,7 +218,7 @@ void smp_muxed_ipi_set_data(int cpu, unsigned long data)
|
||||
info->data = data;
|
||||
}
|
||||
|
||||
void smp_muxed_ipi_message_pass(int cpu, int msg)
|
||||
void smp_muxed_ipi_set_message(int cpu, int msg)
|
||||
{
|
||||
struct cpu_messages *info = &per_cpu(ipi_message, cpu);
|
||||
char *message = (char *)&info->messages;
|
||||
@@ -228,6 +228,13 @@ void smp_muxed_ipi_message_pass(int cpu, int msg)
|
||||
*/
|
||||
smp_mb();
|
||||
message[msg] = 1;
|
||||
}
|
||||
|
||||
void smp_muxed_ipi_message_pass(int cpu, int msg)
|
||||
{
|
||||
struct cpu_messages *info = &per_cpu(ipi_message, cpu);
|
||||
|
||||
smp_muxed_ipi_set_message(cpu, msg);
|
||||
/*
|
||||
* cause_ipi functions are required to include a full barrier
|
||||
* before doing whatever causes the IPI.
|
||||
@@ -236,20 +243,31 @@ void smp_muxed_ipi_message_pass(int cpu, int msg)
|
||||
}
|
||||
|
||||
#ifdef __BIG_ENDIAN__
|
||||
#define IPI_MESSAGE(A) (1 << (24 - 8 * (A)))
|
||||
#define IPI_MESSAGE(A) (1uL << ((BITS_PER_LONG - 8) - 8 * (A)))
|
||||
#else
|
||||
#define IPI_MESSAGE(A) (1 << (8 * (A)))
|
||||
#define IPI_MESSAGE(A) (1uL << (8 * (A)))
|
||||
#endif
|
||||
|
||||
irqreturn_t smp_ipi_demux(void)
|
||||
{
|
||||
struct cpu_messages *info = this_cpu_ptr(&ipi_message);
|
||||
unsigned int all;
|
||||
unsigned long all;
|
||||
|
||||
mb(); /* order any irq clear */
|
||||
|
||||
do {
|
||||
all = xchg(&info->messages, 0);
|
||||
#if defined(CONFIG_KVM_XICS) && defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
|
||||
/*
|
||||
* Must check for PPC_MSG_RM_HOST_ACTION messages
|
||||
* before PPC_MSG_CALL_FUNCTION messages because when
|
||||
* a VM is destroyed, we call kick_all_cpus_sync()
|
||||
* to ensure that any pending PPC_MSG_RM_HOST_ACTION
|
||||
* messages have completed before we free any VCPUs.
|
||||
*/
|
||||
if (all & IPI_MESSAGE(PPC_MSG_RM_HOST_ACTION))
|
||||
kvmppc_xics_ipi_action();
|
||||
#endif
|
||||
if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNCTION))
|
||||
generic_smp_call_function_interrupt();
|
||||
if (all & IPI_MESSAGE(PPC_MSG_RESCHEDULE))
|
||||
|
||||
Reference in New Issue
Block a user