mirror of
https://github.com/torvalds/linux.git
synced 2024-11-26 22:21:42 +00:00
Merge git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull kvm powerpc fixes from Marcelo Tosatti: "Urgent KVM PPC updates, quoting Alexander Graf: There are a few bugs in 3.4 that really should be fixed before people can be all happy and fuzzy about KVM on PowerPC. These fixes are: * fix POWER7 bare metal with PR=y * fix deadlock on HV=y book3s_64 mode in low memory cases * fix invalid MMU scope of PR=y mode on book3s_64, possibly eading to memory corruption" * git://git.kernel.org/pub/scm/virt/kvm/kvm: KVM: PPC: Book3S HV: Fix bug leading to deadlock in guest HPT updates powerpc/kvm: Fix VSID usage in 64-bit "PR" KVM KVM: PPC: Book3S: PR: Fix hsrr code KVM: PPC: Fix PR KVM on POWER7 bare metal KVM: PPC: Book3S: PR: Handle EMUL_ASSIST
This commit is contained in:
commit
0e93b4b304
@ -81,12 +81,13 @@ struct kvmppc_vcpu_book3s {
|
||||
u64 sdr1;
|
||||
u64 hior;
|
||||
u64 msr_mask;
|
||||
u64 vsid_next;
|
||||
#ifdef CONFIG_PPC_BOOK3S_32
|
||||
u32 vsid_pool[VSID_POOL_SIZE];
|
||||
u32 vsid_next;
|
||||
#else
|
||||
u64 vsid_first;
|
||||
u64 vsid_max;
|
||||
u64 proto_vsid_first;
|
||||
u64 proto_vsid_max;
|
||||
u64 proto_vsid_next;
|
||||
#endif
|
||||
int context_id[SID_CONTEXTS];
|
||||
|
||||
|
@ -194,14 +194,14 @@ static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
|
||||
backwards_map = !backwards_map;
|
||||
|
||||
/* Uh-oh ... out of mappings. Let's flush! */
|
||||
if (vcpu_book3s->vsid_next == vcpu_book3s->vsid_max) {
|
||||
vcpu_book3s->vsid_next = vcpu_book3s->vsid_first;
|
||||
if (vcpu_book3s->proto_vsid_next == vcpu_book3s->proto_vsid_max) {
|
||||
vcpu_book3s->proto_vsid_next = vcpu_book3s->proto_vsid_first;
|
||||
memset(vcpu_book3s->sid_map, 0,
|
||||
sizeof(struct kvmppc_sid_map) * SID_MAP_NUM);
|
||||
kvmppc_mmu_pte_flush(vcpu, 0, 0);
|
||||
kvmppc_mmu_flush_segments(vcpu);
|
||||
}
|
||||
map->host_vsid = vcpu_book3s->vsid_next++;
|
||||
map->host_vsid = vsid_scramble(vcpu_book3s->proto_vsid_next++, 256M);
|
||||
|
||||
map->guest_vsid = gvsid;
|
||||
map->valid = true;
|
||||
@ -319,9 +319,10 @@ int kvmppc_mmu_init(struct kvm_vcpu *vcpu)
|
||||
return -1;
|
||||
vcpu3s->context_id[0] = err;
|
||||
|
||||
vcpu3s->vsid_max = ((vcpu3s->context_id[0] + 1) << USER_ESID_BITS) - 1;
|
||||
vcpu3s->vsid_first = vcpu3s->context_id[0] << USER_ESID_BITS;
|
||||
vcpu3s->vsid_next = vcpu3s->vsid_first;
|
||||
vcpu3s->proto_vsid_max = ((vcpu3s->context_id[0] + 1)
|
||||
<< USER_ESID_BITS) - 1;
|
||||
vcpu3s->proto_vsid_first = vcpu3s->context_id[0] << USER_ESID_BITS;
|
||||
vcpu3s->proto_vsid_next = vcpu3s->proto_vsid_first;
|
||||
|
||||
kvmppc_mmu_hpte_init(vcpu);
|
||||
|
||||
|
@ -463,6 +463,7 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
|
||||
/* insert R and C bits from PTE */
|
||||
rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C);
|
||||
args[j] |= rcbits << (56 - 5);
|
||||
hp[0] = 0;
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -197,7 +197,8 @@ kvmppc_interrupt:
|
||||
/* Save guest PC and MSR */
|
||||
#ifdef CONFIG_PPC64
|
||||
BEGIN_FTR_SECTION
|
||||
andi. r0,r12,0x2
|
||||
andi. r0, r12, 0x2
|
||||
cmpwi cr1, r0, 0
|
||||
beq 1f
|
||||
mfspr r3,SPRN_HSRR0
|
||||
mfspr r4,SPRN_HSRR1
|
||||
@ -250,6 +251,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
|
||||
beq ld_last_prev_inst
|
||||
cmpwi r12, BOOK3S_INTERRUPT_ALIGNMENT
|
||||
beq- ld_last_inst
|
||||
#ifdef CONFIG_PPC64
|
||||
BEGIN_FTR_SECTION
|
||||
cmpwi r12, BOOK3S_INTERRUPT_H_EMUL_ASSIST
|
||||
beq- ld_last_inst
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
|
||||
#endif
|
||||
|
||||
b no_ld_last_inst
|
||||
|
||||
@ -316,23 +323,17 @@ no_dcbz32_off:
|
||||
* Having set up SRR0/1 with the address where we want
|
||||
* to continue with relocation on (potentially in module
|
||||
* space), we either just go straight there with rfi[d],
|
||||
* or we jump to an interrupt handler with bctr if there
|
||||
* is an interrupt to be handled first. In the latter
|
||||
* case, the rfi[d] at the end of the interrupt handler
|
||||
* will get us back to where we want to continue.
|
||||
* or we jump to an interrupt handler if there is an
|
||||
* interrupt to be handled first. In the latter case,
|
||||
* the rfi[d] at the end of the interrupt handler will
|
||||
* get us back to where we want to continue.
|
||||
*/
|
||||
|
||||
cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
|
||||
beq 1f
|
||||
cmpwi r12, BOOK3S_INTERRUPT_DECREMENTER
|
||||
beq 1f
|
||||
cmpwi r12, BOOK3S_INTERRUPT_PERFMON
|
||||
1: mtctr r12
|
||||
|
||||
/* Register usage at this point:
|
||||
*
|
||||
* R1 = host R1
|
||||
* R2 = host R2
|
||||
* R10 = raw exit handler id
|
||||
* R12 = exit handler id
|
||||
* R13 = shadow vcpu (32-bit) or PACA (64-bit)
|
||||
* SVCPU.* = guest *
|
||||
@ -342,12 +343,25 @@ no_dcbz32_off:
|
||||
PPC_LL r6, HSTATE_HOST_MSR(r13)
|
||||
PPC_LL r8, HSTATE_VMHANDLER(r13)
|
||||
|
||||
/* Restore host msr -> SRR1 */
|
||||
#ifdef CONFIG_PPC64
|
||||
BEGIN_FTR_SECTION
|
||||
beq cr1, 1f
|
||||
mtspr SPRN_HSRR1, r6
|
||||
mtspr SPRN_HSRR0, r8
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
|
||||
#endif
|
||||
1: /* Restore host msr -> SRR1 */
|
||||
mtsrr1 r6
|
||||
/* Load highmem handler address */
|
||||
mtsrr0 r8
|
||||
|
||||
/* RFI into the highmem handler, or jump to interrupt handler */
|
||||
beqctr
|
||||
cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
|
||||
beqa BOOK3S_INTERRUPT_EXTERNAL
|
||||
cmpwi r12, BOOK3S_INTERRUPT_DECREMENTER
|
||||
beqa BOOK3S_INTERRUPT_DECREMENTER
|
||||
cmpwi r12, BOOK3S_INTERRUPT_PERFMON
|
||||
beqa BOOK3S_INTERRUPT_PERFMON
|
||||
|
||||
RFI
|
||||
kvmppc_handler_trampoline_exit_end:
|
||||
|
Loading…
Reference in New Issue
Block a user