ARM: KVM: promote vfp_host pointer to generic host cpu context

We use the vfp_host pointer to store the host VFP context, should
the guest start using VFP itself.

Actually, we can use this pointer in a more generic way to store
CPU speficic data, and arm64 is using it to dump the whole host
state before switching to the guest.

Simply rename the vfp_host field to host_cpu_context, and the
corresponding type to kvm_cpu_context_t. No change in functionnality.

Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Christoffer Dall <cdall@cs.columbia.edu>
This commit is contained in:
Marc Zyngier 2013-04-08 16:47:19 +01:00 committed by Christoffer Dall
parent 17b1e31f92
commit 3de50da690
3 changed files with 20 additions and 18 deletions

View File

@ -87,7 +87,7 @@ struct kvm_vcpu_fault_info {
u32 hyp_pc; /* PC when exception was taken from Hyp mode */ u32 hyp_pc; /* PC when exception was taken from Hyp mode */
}; };
typedef struct vfp_hard_struct kvm_kernel_vfp_t; typedef struct vfp_hard_struct kvm_cpu_context_t;
struct kvm_vcpu_arch { struct kvm_vcpu_arch {
struct kvm_regs regs; struct kvm_regs regs;
@ -105,8 +105,10 @@ struct kvm_vcpu_arch {
struct kvm_vcpu_fault_info fault; struct kvm_vcpu_fault_info fault;
/* Floating point registers (VFP and Advanced SIMD/NEON) */ /* Floating point registers (VFP and Advanced SIMD/NEON) */
kvm_kernel_vfp_t vfp_guest; struct vfp_hard_struct vfp_guest;
kvm_kernel_vfp_t *vfp_host;
/* Host FP context */
kvm_cpu_context_t *host_cpu_context;
/* VGIC state */ /* VGIC state */
struct vgic_cpu vgic_cpu; struct vgic_cpu vgic_cpu;

View File

@ -154,7 +154,7 @@ int main(void)
DEFINE(VCPU_MIDR, offsetof(struct kvm_vcpu, arch.midr)); DEFINE(VCPU_MIDR, offsetof(struct kvm_vcpu, arch.midr));
DEFINE(VCPU_CP15, offsetof(struct kvm_vcpu, arch.cp15)); DEFINE(VCPU_CP15, offsetof(struct kvm_vcpu, arch.cp15));
DEFINE(VCPU_VFP_GUEST, offsetof(struct kvm_vcpu, arch.vfp_guest)); DEFINE(VCPU_VFP_GUEST, offsetof(struct kvm_vcpu, arch.vfp_guest));
DEFINE(VCPU_VFP_HOST, offsetof(struct kvm_vcpu, arch.vfp_host)); DEFINE(VCPU_VFP_HOST, offsetof(struct kvm_vcpu, arch.host_cpu_context));
DEFINE(VCPU_REGS, offsetof(struct kvm_vcpu, arch.regs)); DEFINE(VCPU_REGS, offsetof(struct kvm_vcpu, arch.regs));
DEFINE(VCPU_USR_REGS, offsetof(struct kvm_vcpu, arch.regs.usr_regs)); DEFINE(VCPU_USR_REGS, offsetof(struct kvm_vcpu, arch.regs.usr_regs));
DEFINE(VCPU_SVC_REGS, offsetof(struct kvm_vcpu, arch.regs.svc_regs)); DEFINE(VCPU_SVC_REGS, offsetof(struct kvm_vcpu, arch.regs.svc_regs));

View File

@ -49,7 +49,7 @@ __asm__(".arch_extension virt");
#endif #endif
static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page); static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
static kvm_kernel_vfp_t __percpu *kvm_host_vfp_state; static kvm_cpu_context_t __percpu *kvm_host_cpu_state;
static unsigned long hyp_default_vectors; static unsigned long hyp_default_vectors;
/* Per-CPU variable containing the currently running vcpu. */ /* Per-CPU variable containing the currently running vcpu. */
@ -317,7 +317,7 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{ {
vcpu->cpu = cpu; vcpu->cpu = cpu;
vcpu->arch.vfp_host = this_cpu_ptr(kvm_host_vfp_state); vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state);
/* /*
* Check whether this vcpu requires the cache to be flushed on * Check whether this vcpu requires the cache to be flushed on
@ -882,24 +882,24 @@ static int init_hyp_mode(void)
} }
/* /*
* Map the host VFP structures * Map the host CPU structures
*/ */
kvm_host_vfp_state = alloc_percpu(kvm_kernel_vfp_t); kvm_host_cpu_state = alloc_percpu(kvm_cpu_context_t);
if (!kvm_host_vfp_state) { if (!kvm_host_cpu_state) {
err = -ENOMEM; err = -ENOMEM;
kvm_err("Cannot allocate host VFP state\n"); kvm_err("Cannot allocate host CPU state\n");
goto out_free_mappings; goto out_free_mappings;
} }
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
kvm_kernel_vfp_t *vfp; kvm_cpu_context_t *cpu_ctxt;
vfp = per_cpu_ptr(kvm_host_vfp_state, cpu); cpu_ctxt = per_cpu_ptr(kvm_host_cpu_state, cpu);
err = create_hyp_mappings(vfp, vfp + 1); err = create_hyp_mappings(cpu_ctxt, cpu_ctxt + 1);
if (err) { if (err) {
kvm_err("Cannot map host VFP state: %d\n", err); kvm_err("Cannot map host CPU state: %d\n", err);
goto out_free_vfp; goto out_free_context;
} }
} }
@ -913,7 +913,7 @@ static int init_hyp_mode(void)
*/ */
err = kvm_vgic_hyp_init(); err = kvm_vgic_hyp_init();
if (err) if (err)
goto out_free_vfp; goto out_free_context;
#ifdef CONFIG_KVM_ARM_VGIC #ifdef CONFIG_KVM_ARM_VGIC
vgic_present = true; vgic_present = true;
@ -935,8 +935,8 @@ static int init_hyp_mode(void)
kvm_info("Hyp mode initialized successfully\n"); kvm_info("Hyp mode initialized successfully\n");
return 0; return 0;
out_free_vfp: out_free_context:
free_percpu(kvm_host_vfp_state); free_percpu(kvm_host_cpu_state);
out_free_mappings: out_free_mappings:
free_hyp_pgds(); free_hyp_pgds();
out_free_stack_pages: out_free_stack_pages: