2013-01-20 23:28:06 +00:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
|
|
|
|
* Authors: Rusty Russell <rusty@rustcorp.com.au>
|
|
|
|
* Christoffer Dall <c.dall@virtualopensystems.com>
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License, version 2, as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program; if not, write to the Free Software
|
|
|
|
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
|
|
*/
|
2013-01-20 23:28:09 +00:00
|
|
|
#include <linux/mm.h>
|
2013-01-20 23:28:06 +00:00
|
|
|
#include <linux/kvm_host.h>
|
2013-01-20 23:28:09 +00:00
|
|
|
#include <asm/kvm_arm.h>
|
|
|
|
#include <asm/kvm_host.h>
|
|
|
|
#include <asm/kvm_emulate.h>
|
|
|
|
#include <asm/kvm_coproc.h>
|
|
|
|
#include <asm/cacheflush.h>
|
|
|
|
#include <asm/cputype.h>
|
|
|
|
#include <trace/events/kvm.h>
|
2013-01-20 23:28:06 +00:00
|
|
|
|
2013-01-20 23:28:09 +00:00
|
|
|
#include "trace.h"
|
|
|
|
#include "coproc.h"
|
|
|
|
|
|
|
|
|
|
|
|
/******************************************************************************
|
|
|
|
* Co-processor emulation
|
|
|
|
*****************************************************************************/
|
|
|
|
|
|
|
|
int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|
|
|
{
|
|
|
|
kvm_inject_undefined(vcpu);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* We can get here, if the host has been built without VFPv3 support,
|
|
|
|
* but the guest attempted a floating point operation.
|
|
|
|
*/
|
|
|
|
kvm_inject_undefined(vcpu);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|
|
|
{
|
|
|
|
kvm_inject_undefined(vcpu);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|
|
|
{
|
|
|
|
kvm_inject_undefined(vcpu);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* See note at ARM ARM B1.14.4 */
|
|
|
|
static bool access_dcsw(struct kvm_vcpu *vcpu,
|
|
|
|
const struct coproc_params *p,
|
|
|
|
const struct coproc_reg *r)
|
|
|
|
{
|
|
|
|
u32 val;
|
|
|
|
int cpu;
|
|
|
|
|
|
|
|
cpu = get_cpu();
|
|
|
|
|
|
|
|
if (!p->is_write)
|
|
|
|
return read_from_write_only(vcpu, p);
|
|
|
|
|
|
|
|
cpumask_setall(&vcpu->arch.require_dcache_flush);
|
|
|
|
cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush);
|
|
|
|
|
|
|
|
/* If we were already preempted, take the long way around */
|
|
|
|
if (cpu != vcpu->arch.last_pcpu) {
|
|
|
|
flush_cache_all();
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
val = *vcpu_reg(vcpu, p->Rt1);
|
|
|
|
|
|
|
|
switch (p->CRm) {
|
|
|
|
case 6: /* Upgrade DCISW to DCCISW, as per HCR.SWIO */
|
|
|
|
case 14: /* DCCISW */
|
|
|
|
asm volatile("mcr p15, 0, %0, c7, c14, 2" : : "r" (val));
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 10: /* DCCSW */
|
|
|
|
asm volatile("mcr p15, 0, %0, c7, c10, 2" : : "r" (val));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
done:
|
|
|
|
put_cpu();
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We could trap ID_DFR0 and tell the guest we don't support performance
|
|
|
|
* monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was
|
|
|
|
* NAKed, so it will read the PMCR anyway.
|
|
|
|
*
|
|
|
|
* Therefore we tell the guest we have 0 counters. Unfortunately, we
|
|
|
|
* must always support PMCCNTR (the cycle counter): we just RAZ/WI for
|
|
|
|
* all PM registers, which doesn't crash the guest kernel at least.
|
|
|
|
*/
|
|
|
|
static bool pm_fake(struct kvm_vcpu *vcpu,
|
|
|
|
const struct coproc_params *p,
|
|
|
|
const struct coproc_reg *r)
|
|
|
|
{
|
|
|
|
if (p->is_write)
|
|
|
|
return ignore_write(vcpu, p);
|
|
|
|
else
|
|
|
|
return read_zero(vcpu, p);
|
|
|
|
}
|
|
|
|
|
|
|
|
#define access_pmcr pm_fake
|
|
|
|
#define access_pmcntenset pm_fake
|
|
|
|
#define access_pmcntenclr pm_fake
|
|
|
|
#define access_pmovsr pm_fake
|
|
|
|
#define access_pmselr pm_fake
|
|
|
|
#define access_pmceid0 pm_fake
|
|
|
|
#define access_pmceid1 pm_fake
|
|
|
|
#define access_pmccntr pm_fake
|
|
|
|
#define access_pmxevtyper pm_fake
|
|
|
|
#define access_pmxevcntr pm_fake
|
|
|
|
#define access_pmuserenr pm_fake
|
|
|
|
#define access_pmintenset pm_fake
|
|
|
|
#define access_pmintenclr pm_fake
|
|
|
|
|
|
|
|
/* Architected CP15 registers.
|
|
|
|
* Important: Must be sorted ascending by CRn, CRM, Op1, Op2
|
|
|
|
*/
|
|
|
|
static const struct coproc_reg cp15_regs[] = {
|
|
|
|
/* CSSELR: swapped by interrupt.S. */
|
|
|
|
{ CRn( 0), CRm( 0), Op1( 2), Op2( 0), is32,
|
|
|
|
NULL, reset_unknown, c0_CSSELR },
|
|
|
|
|
|
|
|
/* TTBR0/TTBR1: swapped by interrupt.S. */
|
|
|
|
{ CRm( 2), Op1( 0), is64, NULL, reset_unknown64, c2_TTBR0 },
|
|
|
|
{ CRm( 2), Op1( 1), is64, NULL, reset_unknown64, c2_TTBR1 },
|
|
|
|
|
|
|
|
/* TTBCR: swapped by interrupt.S. */
|
|
|
|
{ CRn( 2), CRm( 0), Op1( 0), Op2( 2), is32,
|
|
|
|
NULL, reset_val, c2_TTBCR, 0x00000000 },
|
|
|
|
|
|
|
|
/* DACR: swapped by interrupt.S. */
|
|
|
|
{ CRn( 3), CRm( 0), Op1( 0), Op2( 0), is32,
|
|
|
|
NULL, reset_unknown, c3_DACR },
|
|
|
|
|
|
|
|
/* DFSR/IFSR/ADFSR/AIFSR: swapped by interrupt.S. */
|
|
|
|
{ CRn( 5), CRm( 0), Op1( 0), Op2( 0), is32,
|
|
|
|
NULL, reset_unknown, c5_DFSR },
|
|
|
|
{ CRn( 5), CRm( 0), Op1( 0), Op2( 1), is32,
|
|
|
|
NULL, reset_unknown, c5_IFSR },
|
|
|
|
{ CRn( 5), CRm( 1), Op1( 0), Op2( 0), is32,
|
|
|
|
NULL, reset_unknown, c5_ADFSR },
|
|
|
|
{ CRn( 5), CRm( 1), Op1( 0), Op2( 1), is32,
|
|
|
|
NULL, reset_unknown, c5_AIFSR },
|
|
|
|
|
|
|
|
/* DFAR/IFAR: swapped by interrupt.S. */
|
|
|
|
{ CRn( 6), CRm( 0), Op1( 0), Op2( 0), is32,
|
|
|
|
NULL, reset_unknown, c6_DFAR },
|
|
|
|
{ CRn( 6), CRm( 0), Op1( 0), Op2( 2), is32,
|
|
|
|
NULL, reset_unknown, c6_IFAR },
|
|
|
|
/*
|
|
|
|
* DC{C,I,CI}SW operations:
|
|
|
|
*/
|
|
|
|
{ CRn( 7), CRm( 6), Op1( 0), Op2( 2), is32, access_dcsw},
|
|
|
|
{ CRn( 7), CRm(10), Op1( 0), Op2( 2), is32, access_dcsw},
|
|
|
|
{ CRn( 7), CRm(14), Op1( 0), Op2( 2), is32, access_dcsw},
|
|
|
|
/*
|
|
|
|
* Dummy performance monitor implementation.
|
|
|
|
*/
|
|
|
|
{ CRn( 9), CRm(12), Op1( 0), Op2( 0), is32, access_pmcr},
|
|
|
|
{ CRn( 9), CRm(12), Op1( 0), Op2( 1), is32, access_pmcntenset},
|
|
|
|
{ CRn( 9), CRm(12), Op1( 0), Op2( 2), is32, access_pmcntenclr},
|
|
|
|
{ CRn( 9), CRm(12), Op1( 0), Op2( 3), is32, access_pmovsr},
|
|
|
|
{ CRn( 9), CRm(12), Op1( 0), Op2( 5), is32, access_pmselr},
|
|
|
|
{ CRn( 9), CRm(12), Op1( 0), Op2( 6), is32, access_pmceid0},
|
|
|
|
{ CRn( 9), CRm(12), Op1( 0), Op2( 7), is32, access_pmceid1},
|
|
|
|
{ CRn( 9), CRm(13), Op1( 0), Op2( 0), is32, access_pmccntr},
|
|
|
|
{ CRn( 9), CRm(13), Op1( 0), Op2( 1), is32, access_pmxevtyper},
|
|
|
|
{ CRn( 9), CRm(13), Op1( 0), Op2( 2), is32, access_pmxevcntr},
|
|
|
|
{ CRn( 9), CRm(14), Op1( 0), Op2( 0), is32, access_pmuserenr},
|
|
|
|
{ CRn( 9), CRm(14), Op1( 0), Op2( 1), is32, access_pmintenset},
|
|
|
|
{ CRn( 9), CRm(14), Op1( 0), Op2( 2), is32, access_pmintenclr},
|
|
|
|
|
|
|
|
/* PRRR/NMRR (aka MAIR0/MAIR1): swapped by interrupt.S. */
|
|
|
|
{ CRn(10), CRm( 2), Op1( 0), Op2( 0), is32,
|
|
|
|
NULL, reset_unknown, c10_PRRR},
|
|
|
|
{ CRn(10), CRm( 2), Op1( 0), Op2( 1), is32,
|
|
|
|
NULL, reset_unknown, c10_NMRR},
|
|
|
|
|
|
|
|
/* VBAR: swapped by interrupt.S. */
|
|
|
|
{ CRn(12), CRm( 0), Op1( 0), Op2( 0), is32,
|
|
|
|
NULL, reset_val, c12_VBAR, 0x00000000 },
|
|
|
|
|
|
|
|
/* CONTEXTIDR/TPIDRURW/TPIDRURO/TPIDRPRW: swapped by interrupt.S. */
|
|
|
|
{ CRn(13), CRm( 0), Op1( 0), Op2( 1), is32,
|
|
|
|
NULL, reset_val, c13_CID, 0x00000000 },
|
|
|
|
{ CRn(13), CRm( 0), Op1( 0), Op2( 2), is32,
|
|
|
|
NULL, reset_unknown, c13_TID_URW },
|
|
|
|
{ CRn(13), CRm( 0), Op1( 0), Op2( 3), is32,
|
|
|
|
NULL, reset_unknown, c13_TID_URO },
|
|
|
|
{ CRn(13), CRm( 0), Op1( 0), Op2( 4), is32,
|
|
|
|
NULL, reset_unknown, c13_TID_PRIV },
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Target specific emulation tables */
|
|
|
|
static struct kvm_coproc_target_table *target_tables[KVM_ARM_NUM_TARGETS];
|
|
|
|
|
|
|
|
void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table)
|
|
|
|
{
|
|
|
|
target_tables[table->target] = table;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Get specific register table for this target. */
|
|
|
|
static const struct coproc_reg *get_target_table(unsigned target, size_t *num)
|
|
|
|
{
|
|
|
|
struct kvm_coproc_target_table *table;
|
|
|
|
|
|
|
|
table = target_tables[target];
|
|
|
|
*num = table->num;
|
|
|
|
return table->table;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct coproc_reg *find_reg(const struct coproc_params *params,
|
|
|
|
const struct coproc_reg table[],
|
|
|
|
unsigned int num)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
for (i = 0; i < num; i++) {
|
|
|
|
const struct coproc_reg *r = &table[i];
|
|
|
|
|
|
|
|
if (params->is_64bit != r->is_64)
|
|
|
|
continue;
|
|
|
|
if (params->CRn != r->CRn)
|
|
|
|
continue;
|
|
|
|
if (params->CRm != r->CRm)
|
|
|
|
continue;
|
|
|
|
if (params->Op1 != r->Op1)
|
|
|
|
continue;
|
|
|
|
if (params->Op2 != r->Op2)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int emulate_cp15(struct kvm_vcpu *vcpu,
|
|
|
|
const struct coproc_params *params)
|
|
|
|
{
|
|
|
|
size_t num;
|
|
|
|
const struct coproc_reg *table, *r;
|
|
|
|
|
|
|
|
trace_kvm_emulate_cp15_imp(params->Op1, params->Rt1, params->CRn,
|
|
|
|
params->CRm, params->Op2, params->is_write);
|
|
|
|
|
|
|
|
table = get_target_table(vcpu->arch.target, &num);
|
|
|
|
|
|
|
|
/* Search target-specific then generic table. */
|
|
|
|
r = find_reg(params, table, num);
|
|
|
|
if (!r)
|
|
|
|
r = find_reg(params, cp15_regs, ARRAY_SIZE(cp15_regs));
|
|
|
|
|
|
|
|
if (likely(r)) {
|
|
|
|
/* If we don't have an accessor, we should never get here! */
|
|
|
|
BUG_ON(!r->access);
|
|
|
|
|
|
|
|
if (likely(r->access(vcpu, params, r))) {
|
|
|
|
/* Skip instruction, since it was emulated */
|
|
|
|
kvm_skip_instr(vcpu, (vcpu->arch.hsr >> 25) & 1);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
/* If access function fails, it should complain. */
|
|
|
|
} else {
|
|
|
|
kvm_err("Unsupported guest CP15 access at: %08x\n",
|
|
|
|
*vcpu_pc(vcpu));
|
|
|
|
print_cp_instr(params);
|
|
|
|
}
|
|
|
|
kvm_inject_undefined(vcpu);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access
|
|
|
|
* @vcpu: The VCPU pointer
|
|
|
|
* @run: The kvm_run struct
|
|
|
|
*/
|
|
|
|
int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|
|
|
{
|
|
|
|
struct coproc_params params;
|
|
|
|
|
|
|
|
params.CRm = (vcpu->arch.hsr >> 1) & 0xf;
|
|
|
|
params.Rt1 = (vcpu->arch.hsr >> 5) & 0xf;
|
|
|
|
params.is_write = ((vcpu->arch.hsr & 1) == 0);
|
|
|
|
params.is_64bit = true;
|
|
|
|
|
|
|
|
params.Op1 = (vcpu->arch.hsr >> 16) & 0xf;
|
|
|
|
params.Op2 = 0;
|
|
|
|
params.Rt2 = (vcpu->arch.hsr >> 10) & 0xf;
|
|
|
|
params.CRn = 0;
|
|
|
|
|
|
|
|
return emulate_cp15(vcpu, ¶ms);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void reset_coproc_regs(struct kvm_vcpu *vcpu,
|
|
|
|
const struct coproc_reg *table, size_t num)
|
|
|
|
{
|
|
|
|
unsigned long i;
|
|
|
|
|
|
|
|
for (i = 0; i < num; i++)
|
|
|
|
if (table[i].reset)
|
|
|
|
table[i].reset(vcpu, &table[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
|
|
|
|
* @vcpu: The VCPU pointer
|
|
|
|
* @run: The kvm_run struct
|
|
|
|
*/
|
|
|
|
int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|
|
|
{
|
|
|
|
struct coproc_params params;
|
|
|
|
|
|
|
|
params.CRm = (vcpu->arch.hsr >> 1) & 0xf;
|
|
|
|
params.Rt1 = (vcpu->arch.hsr >> 5) & 0xf;
|
|
|
|
params.is_write = ((vcpu->arch.hsr & 1) == 0);
|
|
|
|
params.is_64bit = false;
|
|
|
|
|
|
|
|
params.CRn = (vcpu->arch.hsr >> 10) & 0xf;
|
|
|
|
params.Op1 = (vcpu->arch.hsr >> 14) & 0x7;
|
|
|
|
params.Op2 = (vcpu->arch.hsr >> 17) & 0x7;
|
|
|
|
params.Rt2 = 0;
|
|
|
|
|
|
|
|
return emulate_cp15(vcpu, ¶ms);
|
|
|
|
}
|
|
|
|
|
|
|
|
void kvm_coproc_table_init(void)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
/* Make sure tables are unique and in order. */
|
|
|
|
for (i = 1; i < ARRAY_SIZE(cp15_regs); i++)
|
|
|
|
BUG_ON(cmp_reg(&cp15_regs[i-1], &cp15_regs[i]) >= 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* kvm_reset_coprocs - sets cp15 registers to reset value
|
|
|
|
* @vcpu: The VCPU pointer
|
|
|
|
*
|
|
|
|
* This function finds the right table above and sets the registers on the
|
|
|
|
* virtual CPU struct to their architecturally defined reset values.
|
|
|
|
*/
|
2013-01-20 23:28:06 +00:00
|
|
|
void kvm_reset_coprocs(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
2013-01-20 23:28:09 +00:00
|
|
|
size_t num;
|
|
|
|
const struct coproc_reg *table;
|
|
|
|
|
|
|
|
/* Catch someone adding a register without putting in reset entry. */
|
|
|
|
memset(vcpu->arch.cp15, 0x42, sizeof(vcpu->arch.cp15));
|
|
|
|
|
|
|
|
/* Generic chip reset first (so target could override). */
|
|
|
|
reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs));
|
|
|
|
|
|
|
|
table = get_target_table(vcpu->arch.target, &num);
|
|
|
|
reset_coproc_regs(vcpu, table, num);
|
|
|
|
|
|
|
|
for (num = 1; num < NR_CP15_REGS; num++)
|
|
|
|
if (vcpu->arch.cp15[num] == 0x42424242)
|
|
|
|
panic("Didn't reset vcpu->arch.cp15[%zi]", num);
|
2013-01-20 23:28:06 +00:00
|
|
|
}
|