mirror of
https://github.com/torvalds/linux.git
synced 2024-11-23 12:42:02 +00:00
Merge branch 'kvm-updates/3.4' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull a few KVM fixes from Avi Kivity: "A bunch of powerpc KVM fixes, a guest and a host RCU fix (unrelated), and a small build fix." * 'kvm-updates/3.4' of git://git.kernel.org/pub/scm/virt/kvm/kvm: KVM: Resolve RCU vs. async page fault problem KVM: VMX: vmx_set_cr0 expects kvm->srcu locked KVM: PMU: Fix integer constant is too large warning in kvm_pmu_set_msr() KVM: PPC: Book3S: PR: Fix preemption KVM: PPC: Save/Restore CR over vcpu_run KVM: PPC: Book3S HV: Save and restore CR in __kvmppc_vcore_entry KVM: PPC: Book3S HV: Fix kvm_alloc_linear in case where no linears exist KVM: PPC: Book3S: Compile fix for ppc32 in HIOR access code
This commit is contained in:
commit
a3fac08085
@ -173,9 +173,9 @@ static void __init kvm_linear_init_one(ulong size, int count, int type)
|
||||
|
||||
static struct kvmppc_linear_info *kvm_alloc_linear(int type)
|
||||
{
|
||||
struct kvmppc_linear_info *ri;
|
||||
struct kvmppc_linear_info *ri, *ret;
|
||||
|
||||
ri = NULL;
|
||||
ret = NULL;
|
||||
spin_lock(&linear_lock);
|
||||
list_for_each_entry(ri, &free_linears, list) {
|
||||
if (ri->type != type)
|
||||
@ -183,11 +183,12 @@ static struct kvmppc_linear_info *kvm_alloc_linear(int type)
|
||||
|
||||
list_del(&ri->list);
|
||||
atomic_inc(&ri->use_count);
|
||||
memset(ri->base_virt, 0, ri->npages << PAGE_SHIFT);
|
||||
ret = ri;
|
||||
break;
|
||||
}
|
||||
spin_unlock(&linear_lock);
|
||||
memset(ri->base_virt, 0, ri->npages << PAGE_SHIFT);
|
||||
return ri;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void kvm_release_linear(struct kvmppc_linear_info *ri)
|
||||
|
@ -46,8 +46,10 @@ _GLOBAL(__kvmppc_vcore_entry)
|
||||
/* Save host state to the stack */
|
||||
stdu r1, -SWITCH_FRAME_SIZE(r1)
|
||||
|
||||
/* Save non-volatile registers (r14 - r31) */
|
||||
/* Save non-volatile registers (r14 - r31) and CR */
|
||||
SAVE_NVGPRS(r1)
|
||||
mfcr r3
|
||||
std r3, _CCR(r1)
|
||||
|
||||
/* Save host DSCR */
|
||||
BEGIN_FTR_SECTION
|
||||
@ -157,8 +159,10 @@ kvmppc_handler_highmem:
|
||||
* R13 = PACA
|
||||
*/
|
||||
|
||||
/* Restore non-volatile host registers (r14 - r31) */
|
||||
/* Restore non-volatile host registers (r14 - r31) and CR */
|
||||
REST_NVGPRS(r1)
|
||||
ld r4, _CCR(r1)
|
||||
mtcr r4
|
||||
|
||||
addi r1, r1, SWITCH_FRAME_SIZE
|
||||
ld r0, PPC_LR_STKOFF(r1)
|
||||
|
@ -84,6 +84,10 @@ kvm_start_entry:
|
||||
/* Save non-volatile registers (r14 - r31) */
|
||||
SAVE_NVGPRS(r1)
|
||||
|
||||
/* Save CR */
|
||||
mfcr r14
|
||||
stw r14, _CCR(r1)
|
||||
|
||||
/* Save LR */
|
||||
PPC_STL r0, _LINK(r1)
|
||||
|
||||
@ -165,6 +169,9 @@ kvm_exit_loop:
|
||||
PPC_LL r4, _LINK(r1)
|
||||
mtlr r4
|
||||
|
||||
lwz r14, _CCR(r1)
|
||||
mtcr r14
|
||||
|
||||
/* Restore non-volatile host registers (r14 - r31) */
|
||||
REST_NVGPRS(r1)
|
||||
|
||||
|
@ -777,6 +777,7 @@ program_interrupt:
|
||||
}
|
||||
}
|
||||
|
||||
preempt_disable();
|
||||
if (!(r & RESUME_HOST)) {
|
||||
/* To avoid clobbering exit_reason, only check for signals if
|
||||
* we aren't already exiting to userspace for some other
|
||||
@ -798,8 +799,6 @@ program_interrupt:
|
||||
run->exit_reason = KVM_EXIT_INTR;
|
||||
r = -EINTR;
|
||||
} else {
|
||||
preempt_disable();
|
||||
|
||||
/* In case an interrupt came in that was triggered
|
||||
* from userspace (like DEC), we need to check what
|
||||
* to inject now! */
|
||||
@ -881,7 +880,8 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
|
||||
|
||||
switch (reg->id) {
|
||||
case KVM_REG_PPC_HIOR:
|
||||
r = put_user(to_book3s(vcpu)->hior, (u64 __user *)reg->addr);
|
||||
r = copy_to_user((u64 __user *)(long)reg->addr,
|
||||
&to_book3s(vcpu)->hior, sizeof(u64));
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
@ -896,7 +896,8 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
|
||||
|
||||
switch (reg->id) {
|
||||
case KVM_REG_PPC_HIOR:
|
||||
r = get_user(to_book3s(vcpu)->hior, (u64 __user *)reg->addr);
|
||||
r = copy_from_user(&to_book3s(vcpu)->hior,
|
||||
(u64 __user *)(long)reg->addr, sizeof(u64));
|
||||
if (!r)
|
||||
to_book3s(vcpu)->hior_explicit = true;
|
||||
break;
|
||||
|
@ -34,7 +34,8 @@
|
||||
/* r2 is special: it holds 'current', and it made nonvolatile in the
|
||||
* kernel with the -ffixed-r2 gcc option. */
|
||||
#define HOST_R2 12
|
||||
#define HOST_NV_GPRS 16
|
||||
#define HOST_CR 16
|
||||
#define HOST_NV_GPRS 20
|
||||
#define HOST_NV_GPR(n) (HOST_NV_GPRS + ((n - 14) * 4))
|
||||
#define HOST_MIN_STACK_SIZE (HOST_NV_GPR(31) + 4)
|
||||
#define HOST_STACK_SIZE (((HOST_MIN_STACK_SIZE + 15) / 16) * 16) /* Align. */
|
||||
@ -296,8 +297,10 @@ heavyweight_exit:
|
||||
|
||||
/* Return to kvm_vcpu_run(). */
|
||||
lwz r4, HOST_STACK_LR(r1)
|
||||
lwz r5, HOST_CR(r1)
|
||||
addi r1, r1, HOST_STACK_SIZE
|
||||
mtlr r4
|
||||
mtcr r5
|
||||
/* r3 still contains the return code from kvmppc_handle_exit(). */
|
||||
blr
|
||||
|
||||
@ -314,6 +317,8 @@ _GLOBAL(__kvmppc_vcpu_run)
|
||||
stw r3, HOST_RUN(r1)
|
||||
mflr r3
|
||||
stw r3, HOST_STACK_LR(r1)
|
||||
mfcr r5
|
||||
stw r5, HOST_CR(r1)
|
||||
|
||||
/* Save host non-volatile register state to stack. */
|
||||
stw r14, HOST_NV_GPR(r14)(r1)
|
||||
|
@ -38,6 +38,7 @@
|
||||
#include <asm/traps.h>
|
||||
#include <asm/desc.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/idle.h>
|
||||
|
||||
static int kvmapf = 1;
|
||||
|
||||
@ -253,7 +254,10 @@ do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
|
||||
kvm_async_pf_task_wait((u32)read_cr2());
|
||||
break;
|
||||
case KVM_PV_REASON_PAGE_READY:
|
||||
rcu_irq_enter();
|
||||
exit_idle();
|
||||
kvm_async_pf_task_wake((u32)read_cr2());
|
||||
rcu_irq_exit();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -369,7 +369,7 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data)
|
||||
case MSR_CORE_PERF_FIXED_CTR_CTRL:
|
||||
if (pmu->fixed_ctr_ctrl == data)
|
||||
return 0;
|
||||
if (!(data & 0xfffffffffffff444)) {
|
||||
if (!(data & 0xfffffffffffff444ull)) {
|
||||
reprogram_fixed_counters(pmu, data);
|
||||
return 0;
|
||||
}
|
||||
|
@ -3906,7 +3906,9 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
|
||||
vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
|
||||
|
||||
vmx->vcpu.arch.cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
|
||||
vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||
vmx_set_cr0(&vmx->vcpu, kvm_read_cr0(vcpu)); /* enter rmode */
|
||||
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
|
||||
vmx_set_cr4(&vmx->vcpu, 0);
|
||||
vmx_set_efer(&vmx->vcpu, 0);
|
||||
vmx_fpu_activate(&vmx->vcpu);
|
||||
|
Loading…
Reference in New Issue
Block a user