KVM: arm/arm64: Cleanup MMIO handling
Our MMIO handling is a bit odd, in the sense that it uses an intermediate per-vcpu structure to store the various decoded information that describe the access. But the same information is readily available in the HSR/ESR_EL2 field, and we actually use this field to populate the structure. Let's simplify the whole thing by getting rid of the superfluous structure and save a (tiny) bit of space in the vcpu structure. [32bit fix courtesy of Olof Johansson <olof@lixom.net>] Signed-off-by: Marc Zyngier <maz@kernel.org>
This commit is contained in:
parent
31a9b0b11b
commit
0e20f5e255
@ -9,7 +9,6 @@
|
|||||||
|
|
||||||
#include <linux/kvm_host.h>
|
#include <linux/kvm_host.h>
|
||||||
#include <asm/kvm_asm.h>
|
#include <asm/kvm_asm.h>
|
||||||
#include <asm/kvm_mmio.h>
|
|
||||||
#include <asm/kvm_arm.h>
|
#include <asm/kvm_arm.h>
|
||||||
#include <asm/cputype.h>
|
#include <asm/cputype.h>
|
||||||
|
|
||||||
@ -220,7 +219,7 @@ static inline bool kvm_vcpu_dabt_is_cm(struct kvm_vcpu *vcpu)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Get Access Size from a data abort */
|
/* Get Access Size from a data abort */
|
||||||
static inline int kvm_vcpu_dabt_get_as(struct kvm_vcpu *vcpu)
|
static inline unsigned int kvm_vcpu_dabt_get_as(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
switch ((kvm_vcpu_get_hsr(vcpu) >> 22) & 0x3) {
|
switch ((kvm_vcpu_get_hsr(vcpu) >> 22) & 0x3) {
|
||||||
case 0:
|
case 0:
|
||||||
@ -231,7 +230,7 @@ static inline int kvm_vcpu_dabt_get_as(struct kvm_vcpu *vcpu)
|
|||||||
return 4;
|
return 4;
|
||||||
default:
|
default:
|
||||||
kvm_err("Hardware is weird: SAS 0b11 is reserved\n");
|
kvm_err("Hardware is weird: SAS 0b11 is reserved\n");
|
||||||
return -EFAULT;
|
return 4;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -14,7 +14,6 @@
|
|||||||
#include <asm/cputype.h>
|
#include <asm/cputype.h>
|
||||||
#include <asm/kvm.h>
|
#include <asm/kvm.h>
|
||||||
#include <asm/kvm_asm.h>
|
#include <asm/kvm_asm.h>
|
||||||
#include <asm/kvm_mmio.h>
|
|
||||||
#include <asm/fpstate.h>
|
#include <asm/fpstate.h>
|
||||||
#include <kvm/arm_arch_timer.h>
|
#include <kvm/arm_arch_timer.h>
|
||||||
|
|
||||||
@ -202,9 +201,6 @@ struct kvm_vcpu_arch {
|
|||||||
/* Don't run the guest (internal implementation need) */
|
/* Don't run the guest (internal implementation need) */
|
||||||
bool pause;
|
bool pause;
|
||||||
|
|
||||||
/* IO related fields */
|
|
||||||
struct kvm_decode mmio_decode;
|
|
||||||
|
|
||||||
/* Cache some mmu pages needed inside spinlock regions */
|
/* Cache some mmu pages needed inside spinlock regions */
|
||||||
struct kvm_mmu_memory_cache mmu_page_cache;
|
struct kvm_mmu_memory_cache mmu_page_cache;
|
||||||
|
|
||||||
@ -300,6 +296,14 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
|||||||
static inline void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
static inline void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
||||||
int exception_index) {}
|
int exception_index) {}
|
||||||
|
|
||||||
|
/* MMIO helpers */
|
||||||
|
void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
|
||||||
|
unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
|
||||||
|
|
||||||
|
int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
|
||||||
|
int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
||||||
|
phys_addr_t fault_ipa);
|
||||||
|
|
||||||
static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
|
static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
|
||||||
unsigned long hyp_stack_ptr,
|
unsigned long hyp_stack_ptr,
|
||||||
unsigned long vector_ptr)
|
unsigned long vector_ptr)
|
||||||
|
@ -10,6 +10,7 @@
|
|||||||
#include <linux/compiler.h>
|
#include <linux/compiler.h>
|
||||||
#include <linux/kvm_host.h>
|
#include <linux/kvm_host.h>
|
||||||
#include <asm/cp15.h>
|
#include <asm/cp15.h>
|
||||||
|
#include <asm/kvm_arm.h>
|
||||||
#include <asm/vfp.h>
|
#include <asm/vfp.h>
|
||||||
|
|
||||||
#define __hyp_text __section(.hyp.text) notrace
|
#define __hyp_text __section(.hyp.text) notrace
|
||||||
|
@ -1,28 +0,0 @@
|
|||||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
||||||
/*
|
|
||||||
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
|
|
||||||
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef __ARM_KVM_MMIO_H__
|
|
||||||
#define __ARM_KVM_MMIO_H__
|
|
||||||
|
|
||||||
#include <linux/kvm_host.h>
|
|
||||||
#include <asm/kvm_asm.h>
|
|
||||||
#include <asm/kvm_arm.h>
|
|
||||||
|
|
||||||
struct kvm_decode {
|
|
||||||
unsigned long rt;
|
|
||||||
bool sign_extend;
|
|
||||||
/* Not used on 32-bit arm */
|
|
||||||
bool sixty_four;
|
|
||||||
};
|
|
||||||
|
|
||||||
void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
|
|
||||||
unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
|
|
||||||
|
|
||||||
int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
|
|
||||||
int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
|
||||||
phys_addr_t fault_ipa);
|
|
||||||
|
|
||||||
#endif /* __ARM_KVM_MMIO_H__ */
|
|
@ -17,7 +17,6 @@
|
|||||||
#include <asm/esr.h>
|
#include <asm/esr.h>
|
||||||
#include <asm/kvm_arm.h>
|
#include <asm/kvm_arm.h>
|
||||||
#include <asm/kvm_hyp.h>
|
#include <asm/kvm_hyp.h>
|
||||||
#include <asm/kvm_mmio.h>
|
|
||||||
#include <asm/ptrace.h>
|
#include <asm/ptrace.h>
|
||||||
#include <asm/cputype.h>
|
#include <asm/cputype.h>
|
||||||
#include <asm/virt.h>
|
#include <asm/virt.h>
|
||||||
@ -341,7 +340,7 @@ static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
|
|||||||
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_CM);
|
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_CM);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
|
static inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
|
return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
|
||||||
}
|
}
|
||||||
|
@ -24,7 +24,6 @@
|
|||||||
#include <asm/fpsimd.h>
|
#include <asm/fpsimd.h>
|
||||||
#include <asm/kvm.h>
|
#include <asm/kvm.h>
|
||||||
#include <asm/kvm_asm.h>
|
#include <asm/kvm_asm.h>
|
||||||
#include <asm/kvm_mmio.h>
|
|
||||||
#include <asm/thread_info.h>
|
#include <asm/thread_info.h>
|
||||||
|
|
||||||
#define __KVM_HAVE_ARCH_INTC_INITIALIZED
|
#define __KVM_HAVE_ARCH_INTC_INITIALIZED
|
||||||
@ -325,9 +324,6 @@ struct kvm_vcpu_arch {
|
|||||||
/* Don't run the guest (internal implementation need) */
|
/* Don't run the guest (internal implementation need) */
|
||||||
bool pause;
|
bool pause;
|
||||||
|
|
||||||
/* IO related fields */
|
|
||||||
struct kvm_decode mmio_decode;
|
|
||||||
|
|
||||||
/* Cache some mmu pages needed inside spinlock regions */
|
/* Cache some mmu pages needed inside spinlock regions */
|
||||||
struct kvm_mmu_memory_cache mmu_page_cache;
|
struct kvm_mmu_memory_cache mmu_page_cache;
|
||||||
|
|
||||||
@ -491,6 +487,14 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
|||||||
void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
||||||
int exception_index);
|
int exception_index);
|
||||||
|
|
||||||
|
/* MMIO helpers */
|
||||||
|
void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
|
||||||
|
unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
|
||||||
|
|
||||||
|
int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
|
||||||
|
int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
||||||
|
phys_addr_t fault_ipa);
|
||||||
|
|
||||||
int kvm_perf_init(void);
|
int kvm_perf_init(void);
|
||||||
int kvm_perf_teardown(void);
|
int kvm_perf_teardown(void);
|
||||||
|
|
||||||
|
@ -1,27 +0,0 @@
|
|||||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
||||||
/*
|
|
||||||
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
|
|
||||||
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef __ARM64_KVM_MMIO_H__
|
|
||||||
#define __ARM64_KVM_MMIO_H__
|
|
||||||
|
|
||||||
#include <linux/kvm_host.h>
|
|
||||||
#include <asm/kvm_arm.h>
|
|
||||||
|
|
||||||
struct kvm_decode {
|
|
||||||
unsigned long rt;
|
|
||||||
bool sign_extend;
|
|
||||||
/* Witdth of the register accessed by the faulting instruction is 64-bits */
|
|
||||||
bool sixty_four;
|
|
||||||
};
|
|
||||||
|
|
||||||
void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
|
|
||||||
unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
|
|
||||||
|
|
||||||
int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
|
|
||||||
int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
|
||||||
phys_addr_t fault_ipa);
|
|
||||||
|
|
||||||
#endif /* __ARM64_KVM_MMIO_H__ */
|
|
@ -5,7 +5,6 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/kvm_host.h>
|
#include <linux/kvm_host.h>
|
||||||
#include <asm/kvm_mmio.h>
|
|
||||||
#include <asm/kvm_emulate.h>
|
#include <asm/kvm_emulate.h>
|
||||||
#include <trace/events/kvm.h>
|
#include <trace/events/kvm.h>
|
||||||
|
|
||||||
@ -92,26 +91,23 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|||||||
|
|
||||||
vcpu->mmio_needed = 0;
|
vcpu->mmio_needed = 0;
|
||||||
|
|
||||||
if (!run->mmio.is_write) {
|
if (!kvm_vcpu_dabt_iswrite(vcpu)) {
|
||||||
len = run->mmio.len;
|
len = kvm_vcpu_dabt_get_as(vcpu);
|
||||||
if (len > sizeof(unsigned long))
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
data = kvm_mmio_read_buf(run->mmio.data, len);
|
data = kvm_mmio_read_buf(run->mmio.data, len);
|
||||||
|
|
||||||
if (vcpu->arch.mmio_decode.sign_extend &&
|
if (kvm_vcpu_dabt_issext(vcpu) &&
|
||||||
len < sizeof(unsigned long)) {
|
len < sizeof(unsigned long)) {
|
||||||
mask = 1U << ((len * 8) - 1);
|
mask = 1U << ((len * 8) - 1);
|
||||||
data = (data ^ mask) - mask;
|
data = (data ^ mask) - mask;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!vcpu->arch.mmio_decode.sixty_four)
|
if (!kvm_vcpu_dabt_issf(vcpu))
|
||||||
data = data & 0xffffffff;
|
data = data & 0xffffffff;
|
||||||
|
|
||||||
trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
|
trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
|
||||||
&data);
|
&data);
|
||||||
data = vcpu_data_host_to_guest(vcpu, data, len);
|
data = vcpu_data_host_to_guest(vcpu, data, len);
|
||||||
vcpu_set_reg(vcpu, vcpu->arch.mmio_decode.rt, data);
|
vcpu_set_reg(vcpu, kvm_vcpu_dabt_get_rd(vcpu), data);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -123,36 +119,6 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int decode_hsr(struct kvm_vcpu *vcpu, bool *is_write, int *len)
|
|
||||||
{
|
|
||||||
unsigned long rt;
|
|
||||||
int access_size;
|
|
||||||
bool sign_extend;
|
|
||||||
bool sixty_four;
|
|
||||||
|
|
||||||
if (kvm_vcpu_dabt_iss1tw(vcpu)) {
|
|
||||||
/* page table accesses IO mem: tell guest to fix its TTBR */
|
|
||||||
kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
access_size = kvm_vcpu_dabt_get_as(vcpu);
|
|
||||||
if (unlikely(access_size < 0))
|
|
||||||
return access_size;
|
|
||||||
|
|
||||||
*is_write = kvm_vcpu_dabt_iswrite(vcpu);
|
|
||||||
sign_extend = kvm_vcpu_dabt_issext(vcpu);
|
|
||||||
sixty_four = kvm_vcpu_dabt_issf(vcpu);
|
|
||||||
rt = kvm_vcpu_dabt_get_rd(vcpu);
|
|
||||||
|
|
||||||
*len = access_size;
|
|
||||||
vcpu->arch.mmio_decode.sign_extend = sign_extend;
|
|
||||||
vcpu->arch.mmio_decode.rt = rt;
|
|
||||||
vcpu->arch.mmio_decode.sixty_four = sixty_four;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
||||||
phys_addr_t fault_ipa)
|
phys_addr_t fault_ipa)
|
||||||
{
|
{
|
||||||
@ -164,15 +130,10 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
|||||||
u8 data_buf[8];
|
u8 data_buf[8];
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Prepare MMIO operation. First decode the syndrome data we get
|
* No valid syndrome? Ask userspace for help if it has
|
||||||
* from the CPU. Then try if some in-kernel emulation feels
|
* voluntered to do so, and bail out otherwise.
|
||||||
* responsible, otherwise let user space do its magic.
|
|
||||||
*/
|
*/
|
||||||
if (kvm_vcpu_dabt_isvalid(vcpu)) {
|
if (!kvm_vcpu_dabt_isvalid(vcpu)) {
|
||||||
ret = decode_hsr(vcpu, &is_write, &len);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
} else {
|
|
||||||
if (vcpu->kvm->arch.return_nisv_io_abort_to_user) {
|
if (vcpu->kvm->arch.return_nisv_io_abort_to_user) {
|
||||||
run->exit_reason = KVM_EXIT_ARM_NISV;
|
run->exit_reason = KVM_EXIT_ARM_NISV;
|
||||||
run->arm_nisv.esr_iss = kvm_vcpu_dabt_iss_nisv_sanitized(vcpu);
|
run->arm_nisv.esr_iss = kvm_vcpu_dabt_iss_nisv_sanitized(vcpu);
|
||||||
@ -184,7 +145,20 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
|||||||
return -ENOSYS;
|
return -ENOSYS;
|
||||||
}
|
}
|
||||||
|
|
||||||
rt = vcpu->arch.mmio_decode.rt;
|
/* Page table accesses IO mem: tell guest to fix its TTBR */
|
||||||
|
if (kvm_vcpu_dabt_iss1tw(vcpu)) {
|
||||||
|
kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Prepare MMIO operation. First decode the syndrome data we get
|
||||||
|
* from the CPU. Then try if some in-kernel emulation feels
|
||||||
|
* responsible, otherwise let user space do its magic.
|
||||||
|
*/
|
||||||
|
is_write = kvm_vcpu_dabt_iswrite(vcpu);
|
||||||
|
len = kvm_vcpu_dabt_get_as(vcpu);
|
||||||
|
rt = kvm_vcpu_dabt_get_rd(vcpu);
|
||||||
|
|
||||||
if (is_write) {
|
if (is_write) {
|
||||||
data = vcpu_data_guest_to_host(vcpu, vcpu_get_reg(vcpu, rt),
|
data = vcpu_data_guest_to_host(vcpu, vcpu_get_reg(vcpu, rt),
|
||||||
|
@ -14,7 +14,6 @@
|
|||||||
#include <asm/cacheflush.h>
|
#include <asm/cacheflush.h>
|
||||||
#include <asm/kvm_arm.h>
|
#include <asm/kvm_arm.h>
|
||||||
#include <asm/kvm_mmu.h>
|
#include <asm/kvm_mmu.h>
|
||||||
#include <asm/kvm_mmio.h>
|
|
||||||
#include <asm/kvm_ras.h>
|
#include <asm/kvm_ras.h>
|
||||||
#include <asm/kvm_asm.h>
|
#include <asm/kvm_asm.h>
|
||||||
#include <asm/kvm_emulate.h>
|
#include <asm/kvm_emulate.h>
|
||||||
|
Loading…
Reference in New Issue
Block a user