2012-11-22 02:34:02 +00:00
|
|
|
/*
|
|
|
|
* This file is subject to the terms and conditions of the GNU General Public
|
|
|
|
* License. See the file "COPYING" in the main directory of this archive
|
|
|
|
* for more details.
|
|
|
|
*
|
|
|
|
* KVM/MIPS: MIPS specific KVM APIs
|
|
|
|
*
|
|
|
|
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
|
|
|
|
* Authors: Sanjay Lal <sanjayl@kymasys.com>
|
2014-06-26 19:11:34 +00:00
|
|
|
*/
|
2012-11-22 02:34:02 +00:00
|
|
|
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/err.h>
|
2014-11-18 14:09:12 +00:00
|
|
|
#include <linux/kdebug.h>
|
2012-11-22 02:34:02 +00:00
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/vmalloc.h>
|
|
|
|
#include <linux/fs.h>
|
|
|
|
#include <linux/bootmem.h>
|
KVM: MIPS: Don't leak FPU/DSP to guest
The FPU and DSP are enabled via the CP0 Status CU1 and MX bits by
kvm_mips_set_c0_status() on a guest exit, presumably in case there is
active state that needs saving if pre-emption occurs. However neither of
these bits are cleared again when returning to the guest.
This effectively gives the guest access to the FPU/DSP hardware after
the first guest exit even though it is not aware of its presence,
allowing FP instructions in guest user code to intermittently actually
execute instead of trapping into the guest OS for emulation. It will
then read & manipulate the hardware FP registers which technically
belong to the user process (e.g. QEMU), or are stale from another user
process. It can also crash the guest OS by causing an FP exception, for
which a guest exception handler won't have been registered.
First lets save and disable the FPU (and MSA) state with lose_fpu(1)
before entering the guest. This simplifies the problem, especially for
when guest FPU/MSA support is added in the future, and prevents FR=1 FPU
state being live when the FR bit gets cleared for the guest, which
according to the architecture causes the contents of the FPU and vector
registers to become UNPREDICTABLE.
We can then safely remove the enabling of the FPU in
kvm_mips_set_c0_status(), since there should never be any active FPU or
MSA state to save at pre-emption, which should plug the FPU leak.
DSP state is always live rather than being lazily restored, so for that
it is simpler to just clear the MX bit again when re-entering the guest.
Signed-off-by: James Hogan <james.hogan@imgtec.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Sanjay Lal <sanjayl@kymasys.com>
Cc: Gleb Natapov <gleb@kernel.org>
Cc: kvm@vger.kernel.org
Cc: linux-mips@linux-mips.org
Cc: <stable@vger.kernel.org> # v3.10+: 044f0f03eca0: MIPS: KVM: Deliver guest interrupts
Cc: <stable@vger.kernel.org> # v3.10+
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2015-02-04 17:06:37 +00:00
|
|
|
#include <asm/fpu.h>
|
2012-11-22 02:34:02 +00:00
|
|
|
#include <asm/page.h>
|
|
|
|
#include <asm/cacheflush.h>
|
|
|
|
#include <asm/mmu_context.h>
|
2015-02-04 10:52:03 +00:00
|
|
|
#include <asm/pgtable.h>
|
2012-11-22 02:34:02 +00:00
|
|
|
|
|
|
|
#include <linux/kvm_host.h>
|
|
|
|
|
2014-06-26 19:11:38 +00:00
|
|
|
#include "interrupt.h"
|
|
|
|
#include "commpage.h"
|
2012-11-22 02:34:02 +00:00
|
|
|
|
|
|
|
#define CREATE_TRACE_POINTS
|
|
|
|
#include "trace.h"
|
|
|
|
|
|
|
|
#ifndef VECTORSPACING
|
|
|
|
#define VECTORSPACING 0x100 /* for EI/VI mode */
|
|
|
|
#endif
|
|
|
|
|
2014-06-26 19:11:34 +00:00
|
|
|
#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x)
|
2012-11-22 02:34:02 +00:00
|
|
|
struct kvm_stats_debugfs_item debugfs_entries[] = {
|
2014-06-26 19:11:34 +00:00
|
|
|
{ "wait", VCPU_STAT(wait_exits), KVM_STAT_VCPU },
|
|
|
|
{ "cache", VCPU_STAT(cache_exits), KVM_STAT_VCPU },
|
|
|
|
{ "signal", VCPU_STAT(signal_exits), KVM_STAT_VCPU },
|
|
|
|
{ "interrupt", VCPU_STAT(int_exits), KVM_STAT_VCPU },
|
|
|
|
{ "cop_unsuable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU },
|
|
|
|
{ "tlbmod", VCPU_STAT(tlbmod_exits), KVM_STAT_VCPU },
|
|
|
|
{ "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits), KVM_STAT_VCPU },
|
|
|
|
{ "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits), KVM_STAT_VCPU },
|
|
|
|
{ "addrerr_st", VCPU_STAT(addrerr_st_exits), KVM_STAT_VCPU },
|
|
|
|
{ "addrerr_ld", VCPU_STAT(addrerr_ld_exits), KVM_STAT_VCPU },
|
|
|
|
{ "syscall", VCPU_STAT(syscall_exits), KVM_STAT_VCPU },
|
|
|
|
{ "resvd_inst", VCPU_STAT(resvd_inst_exits), KVM_STAT_VCPU },
|
|
|
|
{ "break_inst", VCPU_STAT(break_inst_exits), KVM_STAT_VCPU },
|
2015-02-06 16:03:57 +00:00
|
|
|
{ "trap_inst", VCPU_STAT(trap_inst_exits), KVM_STAT_VCPU },
|
2015-02-06 10:56:27 +00:00
|
|
|
{ "msa_fpe", VCPU_STAT(msa_fpe_exits), KVM_STAT_VCPU },
|
2015-02-06 10:56:27 +00:00
|
|
|
{ "fpe", VCPU_STAT(fpe_exits), KVM_STAT_VCPU },
|
2015-02-06 10:56:27 +00:00
|
|
|
{ "msa_disabled", VCPU_STAT(msa_disabled_exits), KVM_STAT_VCPU },
|
2014-06-26 19:11:34 +00:00
|
|
|
{ "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU },
|
kvm: add halt_poll_ns module parameter
This patch introduces a new module parameter for the KVM module; when it
is present, KVM attempts a bit of polling on every HLT before scheduling
itself out via kvm_vcpu_block.
This parameter helps a lot for latency-bound workloads---in particular
I tested it with O_DSYNC writes with a battery-backed disk in the host.
In this case, writes are fast (because the data doesn't have to go all
the way to the platters) but they cannot be merged by either the host or
the guest. KVM's performance here is usually around 30% of bare metal,
or 50% if you use cache=directsync or cache=writethrough (these
parameters avoid that the guest sends pointless flush requests, and
at the same time they are not slow because of the battery-backed cache).
The bad performance happens because on every halt the host CPU decides
to halt itself too. When the interrupt comes, the vCPU thread is then
migrated to a new physical CPU, and in general the latency is horrible
because the vCPU thread has to be scheduled back in.
With this patch performance reaches 60-65% of bare metal and, more
important, 99% of what you get if you use idle=poll in the guest. This
means that the tunable gets rid of this particular bottleneck, and more
work can be done to improve performance in the kernel or QEMU.
Of course there is some price to pay; every time an otherwise idle vCPUs
is interrupted by an interrupt, it will poll unnecessarily and thus
impose a little load on the host. The above results were obtained with
a mostly random value of the parameter (500000), and the load was around
1.5-2.5% CPU usage on one of the host's core for each idle guest vCPU.
The patch also adds a new stat, /sys/kernel/debug/kvm/halt_successful_poll,
that can be used to tune the parameter. It counts how many HLT
instructions received an interrupt during the polling period; each
successful poll avoids that Linux schedules the VCPU thread out and back
in, and may also avoid a likely trip to C1 and back for the physical CPU.
While the VM is idle, a Linux 4 VCPU VM halts around 10 times per second.
Of these halts, almost all are failed polls. During the benchmark,
instead, basically all halts end within the polling period, except a more
or less constant stream of 50 per second coming from vCPUs that are not
running the benchmark. The wasted time is thus very low. Things may
be slightly different for Windows VMs, which have a ~10 ms timer tick.
The effect is also visible on Marcelo's recently-introduced latency
test for the TSC deadline timer. Though of course a non-RT kernel has
awful latency bounds, the latency of the timer is around 8000-10000 clock
cycles compared to 20000-120000 without setting halt_poll_ns. For the TSC
deadline timer, thus, the effect is both a smaller average latency and
a smaller variance.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2015-02-04 17:20:58 +00:00
|
|
|
{ "halt_successful_poll", VCPU_STAT(halt_successful_poll), KVM_STAT_VCPU },
|
2015-09-15 16:27:57 +00:00
|
|
|
{ "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), KVM_STAT_VCPU },
|
2016-05-13 10:16:35 +00:00
|
|
|
{ "halt_poll_invalid", VCPU_STAT(halt_poll_invalid), KVM_STAT_VCPU },
|
2014-06-26 19:11:34 +00:00
|
|
|
{ "halt_wakeup", VCPU_STAT(halt_wakeup), KVM_STAT_VCPU },
|
2012-11-22 02:34:02 +00:00
|
|
|
{NULL}
|
|
|
|
};
|
|
|
|
|
|
|
|
static int kvm_mips_reset_vcpu(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
int i;
|
2014-06-26 19:11:34 +00:00
|
|
|
|
2012-11-22 02:34:02 +00:00
|
|
|
for_each_possible_cpu(i) {
|
|
|
|
vcpu->arch.guest_kernel_asid[i] = 0;
|
|
|
|
vcpu->arch.guest_user_asid[i] = 0;
|
|
|
|
}
|
2014-06-26 19:11:34 +00:00
|
|
|
|
2012-11-22 02:34:02 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-06-26 19:11:34 +00:00
|
|
|
/*
|
|
|
|
* XXXKYMA: We are simulatoring a processor that has the WII bit set in
|
|
|
|
* Config7, so we are "runnable" if interrupts are pending
|
2012-11-22 02:34:02 +00:00
|
|
|
*/
|
|
|
|
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
return !!(vcpu->arch.pending_exceptions);
|
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2014-08-28 13:13:03 +00:00
|
|
|
int kvm_arch_hardware_enable(void)
|
2012-11-22 02:34:02 +00:00
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_arch_hardware_setup(void)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void kvm_arch_check_processor_compat(void *rtn)
|
|
|
|
{
|
2014-06-26 19:11:36 +00:00
|
|
|
*(int *)rtn = 0;
|
2012-11-22 02:34:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void kvm_mips_init_tlbs(struct kvm *kvm)
|
|
|
|
{
|
|
|
|
unsigned long wired;
|
|
|
|
|
2014-06-26 19:11:34 +00:00
|
|
|
/*
|
|
|
|
* Add a wired entry to the TLB, it is used to map the commpage to
|
|
|
|
* the Guest kernel
|
|
|
|
*/
|
2012-11-22 02:34:02 +00:00
|
|
|
wired = read_c0_wired();
|
|
|
|
write_c0_wired(wired + 1);
|
|
|
|
mtc0_tlbw_hazard();
|
|
|
|
kvm->arch.commpage_tlb = wired;
|
|
|
|
|
|
|
|
kvm_debug("[%d] commpage TLB: %d\n", smp_processor_id(),
|
|
|
|
kvm->arch.commpage_tlb);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void kvm_mips_init_vm_percpu(void *arg)
|
|
|
|
{
|
|
|
|
struct kvm *kvm = (struct kvm *)arg;
|
|
|
|
|
|
|
|
kvm_mips_init_tlbs(kvm);
|
|
|
|
kvm_mips_callbacks->vm_init(kvm);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
|
|
|
{
|
|
|
|
if (atomic_inc_return(&kvm_mips_instance) == 1) {
|
2014-05-29 09:16:43 +00:00
|
|
|
kvm_debug("%s: 1st KVM instance, setup host TLB parameters\n",
|
|
|
|
__func__);
|
2012-11-22 02:34:02 +00:00
|
|
|
on_each_cpu(kvm_mips_init_vm_percpu, kvm, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void kvm_mips_free_vcpus(struct kvm *kvm)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
struct kvm_vcpu *vcpu;
|
|
|
|
|
|
|
|
/* Put the pages we reserved for the guest pmap */
|
|
|
|
for (i = 0; i < kvm->arch.guest_pmap_npages; i++) {
|
|
|
|
if (kvm->arch.guest_pmap[i] != KVM_INVALID_PAGE)
|
2016-06-09 13:19:11 +00:00
|
|
|
kvm_release_pfn_clean(kvm->arch.guest_pmap[i]);
|
2012-11-22 02:34:02 +00:00
|
|
|
}
|
2014-05-29 09:16:44 +00:00
|
|
|
kfree(kvm->arch.guest_pmap);
|
2012-11-22 02:34:02 +00:00
|
|
|
|
|
|
|
kvm_for_each_vcpu(i, vcpu, kvm) {
|
|
|
|
kvm_arch_vcpu_free(vcpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
mutex_lock(&kvm->lock);
|
|
|
|
|
|
|
|
for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
|
|
|
|
kvm->vcpus[i] = NULL;
|
|
|
|
|
|
|
|
atomic_set(&kvm->online_vcpus, 0);
|
|
|
|
|
|
|
|
mutex_unlock(&kvm->lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void kvm_mips_uninit_tlbs(void *arg)
|
|
|
|
{
|
|
|
|
/* Restore wired count */
|
|
|
|
write_c0_wired(0);
|
|
|
|
mtc0_tlbw_hazard();
|
|
|
|
/* Clear out all the TLBs */
|
|
|
|
kvm_local_flush_tlb_all();
|
|
|
|
}
|
|
|
|
|
|
|
|
void kvm_arch_destroy_vm(struct kvm *kvm)
|
|
|
|
{
|
|
|
|
kvm_mips_free_vcpus(kvm);
|
|
|
|
|
|
|
|
/* If this is the last instance, restore wired count */
|
|
|
|
if (atomic_dec_return(&kvm_mips_instance) == 0) {
|
2014-05-29 09:16:43 +00:00
|
|
|
kvm_debug("%s: last KVM instance, restoring TLB parameters\n",
|
|
|
|
__func__);
|
2012-11-22 02:34:02 +00:00
|
|
|
on_each_cpu(kvm_mips_uninit_tlbs, NULL, 1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-06-26 19:11:34 +00:00
|
|
|
long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl,
|
|
|
|
unsigned long arg)
|
2012-11-22 02:34:02 +00:00
|
|
|
{
|
2013-05-23 16:49:10 +00:00
|
|
|
return -ENOIOCTLCMD;
|
2012-11-22 02:34:02 +00:00
|
|
|
}
|
|
|
|
|
2013-10-07 16:48:00 +00:00
|
|
|
int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
|
|
|
|
unsigned long npages)
|
2012-11-22 02:34:02 +00:00
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
2014-06-26 19:11:34 +00:00
|
|
|
struct kvm_memory_slot *memslot,
|
2015-05-18 11:59:39 +00:00
|
|
|
const struct kvm_userspace_memory_region *mem,
|
2014-06-26 19:11:34 +00:00
|
|
|
enum kvm_mr_change change)
|
2012-11-22 02:34:02 +00:00
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void kvm_arch_commit_memory_region(struct kvm *kvm,
|
2015-05-18 11:59:39 +00:00
|
|
|
const struct kvm_userspace_memory_region *mem,
|
2014-06-26 19:11:34 +00:00
|
|
|
const struct kvm_memory_slot *old,
|
2015-05-18 11:20:23 +00:00
|
|
|
const struct kvm_memory_slot *new,
|
2014-06-26 19:11:34 +00:00
|
|
|
enum kvm_mr_change change)
|
2012-11-22 02:34:02 +00:00
|
|
|
{
|
|
|
|
unsigned long npages = 0;
|
2014-06-26 19:11:36 +00:00
|
|
|
int i;
|
2012-11-22 02:34:02 +00:00
|
|
|
|
|
|
|
kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n",
|
|
|
|
__func__, kvm, mem->slot, mem->guest_phys_addr,
|
|
|
|
mem->memory_size, mem->userspace_addr);
|
|
|
|
|
|
|
|
/* Setup Guest PMAP table */
|
|
|
|
if (!kvm->arch.guest_pmap) {
|
|
|
|
if (mem->slot == 0)
|
|
|
|
npages = mem->memory_size >> PAGE_SHIFT;
|
|
|
|
|
|
|
|
if (npages) {
|
|
|
|
kvm->arch.guest_pmap_npages = npages;
|
|
|
|
kvm->arch.guest_pmap =
|
|
|
|
kzalloc(npages * sizeof(unsigned long), GFP_KERNEL);
|
|
|
|
|
|
|
|
if (!kvm->arch.guest_pmap) {
|
2015-12-16 23:49:39 +00:00
|
|
|
kvm_err("Failed to allocate guest PMAP\n");
|
2014-06-26 19:11:36 +00:00
|
|
|
return;
|
2012-11-22 02:34:02 +00:00
|
|
|
}
|
|
|
|
|
2014-05-29 09:16:43 +00:00
|
|
|
kvm_debug("Allocated space for Guest PMAP Table (%ld pages) @ %p\n",
|
|
|
|
npages, kvm->arch.guest_pmap);
|
2012-11-22 02:34:02 +00:00
|
|
|
|
|
|
|
/* Now setup the page table */
|
2014-06-26 19:11:34 +00:00
|
|
|
for (i = 0; i < npages; i++)
|
2012-11-22 02:34:02 +00:00
|
|
|
kvm->arch.guest_pmap[i] = KVM_INVALID_PAGE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
|
|
|
|
{
|
|
|
|
int err, size, offset;
|
|
|
|
void *gebase;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
|
|
|
|
|
|
|
|
if (!vcpu) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = kvm_vcpu_init(vcpu, kvm, id);
|
|
|
|
|
|
|
|
if (err)
|
|
|
|
goto out_free_cpu;
|
|
|
|
|
2014-05-29 09:16:43 +00:00
|
|
|
kvm_debug("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu);
|
2012-11-22 02:34:02 +00:00
|
|
|
|
2014-06-26 19:11:34 +00:00
|
|
|
/*
|
|
|
|
* Allocate space for host mode exception handlers that handle
|
2012-11-22 02:34:02 +00:00
|
|
|
* guest mode exits
|
|
|
|
*/
|
2014-06-26 19:11:34 +00:00
|
|
|
if (cpu_has_veic || cpu_has_vint)
|
2012-11-22 02:34:02 +00:00
|
|
|
size = 0x200 + VECTORSPACING * 64;
|
2014-06-26 19:11:34 +00:00
|
|
|
else
|
2014-05-29 09:16:23 +00:00
|
|
|
size = 0x4000;
|
2012-11-22 02:34:02 +00:00
|
|
|
|
|
|
|
gebase = kzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL);
|
|
|
|
|
|
|
|
if (!gebase) {
|
|
|
|
err = -ENOMEM;
|
2015-11-11 14:21:20 +00:00
|
|
|
goto out_uninit_cpu;
|
2012-11-22 02:34:02 +00:00
|
|
|
}
|
2014-05-29 09:16:43 +00:00
|
|
|
kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n",
|
|
|
|
ALIGN(size, PAGE_SIZE), gebase);
|
2012-11-22 02:34:02 +00:00
|
|
|
|
|
|
|
/* Save new ebase */
|
|
|
|
vcpu->arch.guest_ebase = gebase;
|
|
|
|
|
|
|
|
/* Copy L1 Guest Exception handler to correct offset */
|
|
|
|
|
|
|
|
/* TLB Refill, EXL = 0 */
|
|
|
|
memcpy(gebase, mips32_exception,
|
|
|
|
mips32_exceptionEnd - mips32_exception);
|
|
|
|
|
|
|
|
/* General Exception Entry point */
|
|
|
|
memcpy(gebase + 0x180, mips32_exception,
|
|
|
|
mips32_exceptionEnd - mips32_exception);
|
|
|
|
|
|
|
|
/* For vectored interrupts poke the exception code @ all offsets 0-7 */
|
|
|
|
for (i = 0; i < 8; i++) {
|
|
|
|
kvm_debug("L1 Vectored handler @ %p\n",
|
|
|
|
gebase + 0x200 + (i * VECTORSPACING));
|
|
|
|
memcpy(gebase + 0x200 + (i * VECTORSPACING), mips32_exception,
|
|
|
|
mips32_exceptionEnd - mips32_exception);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* General handler, relocate to unmapped space for sanity's sake */
|
|
|
|
offset = 0x2000;
|
2014-05-29 09:16:43 +00:00
|
|
|
kvm_debug("Installing KVM Exception handlers @ %p, %#x bytes\n",
|
|
|
|
gebase + offset,
|
|
|
|
mips32_GuestExceptionEnd - mips32_GuestException);
|
2012-11-22 02:34:02 +00:00
|
|
|
|
|
|
|
memcpy(gebase + offset, mips32_GuestException,
|
|
|
|
mips32_GuestExceptionEnd - mips32_GuestException);
|
|
|
|
|
2016-06-09 09:50:43 +00:00
|
|
|
#ifdef MODULE
|
|
|
|
offset += mips32_GuestExceptionEnd - mips32_GuestException;
|
|
|
|
memcpy(gebase + offset, (char *)__kvm_mips_vcpu_run,
|
|
|
|
__kvm_mips_vcpu_run_end - (char *)__kvm_mips_vcpu_run);
|
|
|
|
vcpu->arch.vcpu_run = gebase + offset;
|
|
|
|
#else
|
|
|
|
vcpu->arch.vcpu_run = __kvm_mips_vcpu_run;
|
|
|
|
#endif
|
|
|
|
|
2012-11-22 02:34:02 +00:00
|
|
|
/* Invalidate the icache for these ranges */
|
2014-05-29 09:16:25 +00:00
|
|
|
local_flush_icache_range((unsigned long)gebase,
|
|
|
|
(unsigned long)gebase + ALIGN(size, PAGE_SIZE));
|
2012-11-22 02:34:02 +00:00
|
|
|
|
2014-06-26 19:11:34 +00:00
|
|
|
/*
|
|
|
|
* Allocate comm page for guest kernel, a TLB will be reserved for
|
|
|
|
* mapping GVA @ 0xFFFF8000 to this page
|
|
|
|
*/
|
2012-11-22 02:34:02 +00:00
|
|
|
vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL);
|
|
|
|
|
|
|
|
if (!vcpu->arch.kseg0_commpage) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto out_free_gebase;
|
|
|
|
}
|
|
|
|
|
2014-05-29 09:16:43 +00:00
|
|
|
kvm_debug("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage);
|
2012-11-22 02:34:02 +00:00
|
|
|
kvm_mips_commpage_init(vcpu);
|
|
|
|
|
|
|
|
/* Init */
|
|
|
|
vcpu->arch.last_sched_cpu = -1;
|
|
|
|
|
|
|
|
/* Start off the timer */
|
2014-05-29 09:16:35 +00:00
|
|
|
kvm_mips_init_count(vcpu);
|
2012-11-22 02:34:02 +00:00
|
|
|
|
|
|
|
return vcpu;
|
|
|
|
|
|
|
|
out_free_gebase:
|
|
|
|
kfree(gebase);
|
|
|
|
|
2015-11-11 14:21:20 +00:00
|
|
|
out_uninit_cpu:
|
|
|
|
kvm_vcpu_uninit(vcpu);
|
|
|
|
|
2012-11-22 02:34:02 +00:00
|
|
|
out_free_cpu:
|
|
|
|
kfree(vcpu);
|
|
|
|
|
|
|
|
out:
|
|
|
|
return ERR_PTR(err);
|
|
|
|
}
|
|
|
|
|
|
|
|
void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
hrtimer_cancel(&vcpu->arch.comparecount_timer);
|
|
|
|
|
|
|
|
kvm_vcpu_uninit(vcpu);
|
|
|
|
|
|
|
|
kvm_mips_dump_stats(vcpu);
|
|
|
|
|
2014-05-29 09:16:44 +00:00
|
|
|
kfree(vcpu->arch.guest_ebase);
|
|
|
|
kfree(vcpu->arch.kseg0_commpage);
|
2014-06-24 17:31:08 +00:00
|
|
|
kfree(vcpu);
|
2012-11-22 02:34:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
kvm_arch_vcpu_free(vcpu);
|
|
|
|
}
|
|
|
|
|
2014-06-26 19:11:34 +00:00
|
|
|
int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
|
|
|
|
struct kvm_guest_debug *dbg)
|
2012-11-22 02:34:02 +00:00
|
|
|
{
|
2013-05-23 16:49:10 +00:00
|
|
|
return -ENOIOCTLCMD;
|
2012-11-22 02:34:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|
|
|
{
|
|
|
|
int r = 0;
|
|
|
|
sigset_t sigsaved;
|
|
|
|
|
|
|
|
if (vcpu->sigset_active)
|
|
|
|
sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
|
|
|
|
|
|
|
|
if (vcpu->mmio_needed) {
|
|
|
|
if (!vcpu->mmio_is_write)
|
|
|
|
kvm_mips_complete_mmio_load(vcpu, run);
|
|
|
|
vcpu->mmio_needed = 0;
|
|
|
|
}
|
|
|
|
|
KVM: MIPS: Don't leak FPU/DSP to guest
The FPU and DSP are enabled via the CP0 Status CU1 and MX bits by
kvm_mips_set_c0_status() on a guest exit, presumably in case there is
active state that needs saving if pre-emption occurs. However neither of
these bits are cleared again when returning to the guest.
This effectively gives the guest access to the FPU/DSP hardware after
the first guest exit even though it is not aware of its presence,
allowing FP instructions in guest user code to intermittently actually
execute instead of trapping into the guest OS for emulation. It will
then read & manipulate the hardware FP registers which technically
belong to the user process (e.g. QEMU), or are stale from another user
process. It can also crash the guest OS by causing an FP exception, for
which a guest exception handler won't have been registered.
First lets save and disable the FPU (and MSA) state with lose_fpu(1)
before entering the guest. This simplifies the problem, especially for
when guest FPU/MSA support is added in the future, and prevents FR=1 FPU
state being live when the FR bit gets cleared for the guest, which
according to the architecture causes the contents of the FPU and vector
registers to become UNPREDICTABLE.
We can then safely remove the enabling of the FPU in
kvm_mips_set_c0_status(), since there should never be any active FPU or
MSA state to save at pre-emption, which should plug the FPU leak.
DSP state is always live rather than being lazily restored, so for that
it is simpler to just clear the MX bit again when re-entering the guest.
Signed-off-by: James Hogan <james.hogan@imgtec.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Sanjay Lal <sanjayl@kymasys.com>
Cc: Gleb Natapov <gleb@kernel.org>
Cc: kvm@vger.kernel.org
Cc: linux-mips@linux-mips.org
Cc: <stable@vger.kernel.org> # v3.10+: 044f0f03eca0: MIPS: KVM: Deliver guest interrupts
Cc: <stable@vger.kernel.org> # v3.10+
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2015-02-04 17:06:37 +00:00
|
|
|
lose_fpu(1);
|
|
|
|
|
2014-05-29 09:16:32 +00:00
|
|
|
local_irq_disable();
|
2012-11-22 02:34:02 +00:00
|
|
|
/* Check if we have any exceptions/interrupts pending */
|
|
|
|
kvm_mips_deliver_interrupts(vcpu,
|
|
|
|
kvm_read_c0_guest_cause(vcpu->arch.cop0));
|
|
|
|
|
2015-04-30 11:43:31 +00:00
|
|
|
__kvm_guest_enter();
|
2012-11-22 02:34:02 +00:00
|
|
|
|
2015-02-04 10:52:03 +00:00
|
|
|
/* Disable hardware page table walking while in guest */
|
|
|
|
htw_stop();
|
|
|
|
|
2016-06-09 09:50:43 +00:00
|
|
|
r = vcpu->arch.vcpu_run(run, vcpu);
|
2012-11-22 02:34:02 +00:00
|
|
|
|
2015-02-04 10:52:03 +00:00
|
|
|
/* Re-enable HTW before enabling interrupts */
|
|
|
|
htw_start();
|
|
|
|
|
2015-04-30 11:43:31 +00:00
|
|
|
__kvm_guest_exit();
|
2012-11-22 02:34:02 +00:00
|
|
|
local_irq_enable();
|
|
|
|
|
|
|
|
if (vcpu->sigset_active)
|
|
|
|
sigprocmask(SIG_SETMASK, &sigsaved, NULL);
|
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2014-06-26 19:11:34 +00:00
|
|
|
int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
|
|
|
|
struct kvm_mips_interrupt *irq)
|
2012-11-22 02:34:02 +00:00
|
|
|
{
|
|
|
|
int intr = (int)irq->irq;
|
|
|
|
struct kvm_vcpu *dvcpu = NULL;
|
|
|
|
|
|
|
|
if (intr == 3 || intr == -3 || intr == 4 || intr == -4)
|
|
|
|
kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu,
|
|
|
|
(int)intr);
|
|
|
|
|
|
|
|
if (irq->cpu == -1)
|
|
|
|
dvcpu = vcpu;
|
|
|
|
else
|
|
|
|
dvcpu = vcpu->kvm->vcpus[irq->cpu];
|
|
|
|
|
|
|
|
if (intr == 2 || intr == 3 || intr == 4) {
|
|
|
|
kvm_mips_callbacks->queue_io_int(dvcpu, irq);
|
|
|
|
|
|
|
|
} else if (intr == -2 || intr == -3 || intr == -4) {
|
|
|
|
kvm_mips_callbacks->dequeue_io_int(dvcpu, irq);
|
|
|
|
} else {
|
|
|
|
kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__,
|
|
|
|
irq->cpu, irq->irq);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
dvcpu->arch.wait = 0;
|
|
|
|
|
2016-02-19 08:46:39 +00:00
|
|
|
if (swait_active(&dvcpu->wq))
|
|
|
|
swake_up(&dvcpu->wq);
|
2012-11-22 02:34:02 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-06-26 19:11:34 +00:00
|
|
|
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
|
|
|
|
struct kvm_mp_state *mp_state)
|
2012-11-22 02:34:02 +00:00
|
|
|
{
|
2013-05-23 16:49:10 +00:00
|
|
|
return -ENOIOCTLCMD;
|
2012-11-22 02:34:02 +00:00
|
|
|
}
|
|
|
|
|
2014-06-26 19:11:34 +00:00
|
|
|
int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
|
|
|
|
struct kvm_mp_state *mp_state)
|
2012-11-22 02:34:02 +00:00
|
|
|
{
|
2013-05-23 16:49:10 +00:00
|
|
|
return -ENOIOCTLCMD;
|
2012-11-22 02:34:02 +00:00
|
|
|
}
|
|
|
|
|
2013-05-23 16:49:09 +00:00
|
|
|
static u64 kvm_mips_get_one_regs[] = {
|
|
|
|
KVM_REG_MIPS_R0,
|
|
|
|
KVM_REG_MIPS_R1,
|
|
|
|
KVM_REG_MIPS_R2,
|
|
|
|
KVM_REG_MIPS_R3,
|
|
|
|
KVM_REG_MIPS_R4,
|
|
|
|
KVM_REG_MIPS_R5,
|
|
|
|
KVM_REG_MIPS_R6,
|
|
|
|
KVM_REG_MIPS_R7,
|
|
|
|
KVM_REG_MIPS_R8,
|
|
|
|
KVM_REG_MIPS_R9,
|
|
|
|
KVM_REG_MIPS_R10,
|
|
|
|
KVM_REG_MIPS_R11,
|
|
|
|
KVM_REG_MIPS_R12,
|
|
|
|
KVM_REG_MIPS_R13,
|
|
|
|
KVM_REG_MIPS_R14,
|
|
|
|
KVM_REG_MIPS_R15,
|
|
|
|
KVM_REG_MIPS_R16,
|
|
|
|
KVM_REG_MIPS_R17,
|
|
|
|
KVM_REG_MIPS_R18,
|
|
|
|
KVM_REG_MIPS_R19,
|
|
|
|
KVM_REG_MIPS_R20,
|
|
|
|
KVM_REG_MIPS_R21,
|
|
|
|
KVM_REG_MIPS_R22,
|
|
|
|
KVM_REG_MIPS_R23,
|
|
|
|
KVM_REG_MIPS_R24,
|
|
|
|
KVM_REG_MIPS_R25,
|
|
|
|
KVM_REG_MIPS_R26,
|
|
|
|
KVM_REG_MIPS_R27,
|
|
|
|
KVM_REG_MIPS_R28,
|
|
|
|
KVM_REG_MIPS_R29,
|
|
|
|
KVM_REG_MIPS_R30,
|
|
|
|
KVM_REG_MIPS_R31,
|
|
|
|
|
|
|
|
KVM_REG_MIPS_HI,
|
|
|
|
KVM_REG_MIPS_LO,
|
|
|
|
KVM_REG_MIPS_PC,
|
|
|
|
|
|
|
|
KVM_REG_MIPS_CP0_INDEX,
|
|
|
|
KVM_REG_MIPS_CP0_CONTEXT,
|
2014-05-29 09:16:30 +00:00
|
|
|
KVM_REG_MIPS_CP0_USERLOCAL,
|
2013-05-23 16:49:09 +00:00
|
|
|
KVM_REG_MIPS_CP0_PAGEMASK,
|
|
|
|
KVM_REG_MIPS_CP0_WIRED,
|
2014-05-29 09:16:31 +00:00
|
|
|
KVM_REG_MIPS_CP0_HWRENA,
|
2013-05-23 16:49:09 +00:00
|
|
|
KVM_REG_MIPS_CP0_BADVADDR,
|
2014-05-29 09:16:29 +00:00
|
|
|
KVM_REG_MIPS_CP0_COUNT,
|
2013-05-23 16:49:09 +00:00
|
|
|
KVM_REG_MIPS_CP0_ENTRYHI,
|
2014-05-29 09:16:29 +00:00
|
|
|
KVM_REG_MIPS_CP0_COMPARE,
|
2013-05-23 16:49:09 +00:00
|
|
|
KVM_REG_MIPS_CP0_STATUS,
|
|
|
|
KVM_REG_MIPS_CP0_CAUSE,
|
2014-05-29 09:16:27 +00:00
|
|
|
KVM_REG_MIPS_CP0_EPC,
|
2014-06-26 12:56:52 +00:00
|
|
|
KVM_REG_MIPS_CP0_PRID,
|
2013-05-23 16:49:09 +00:00
|
|
|
KVM_REG_MIPS_CP0_CONFIG,
|
|
|
|
KVM_REG_MIPS_CP0_CONFIG1,
|
|
|
|
KVM_REG_MIPS_CP0_CONFIG2,
|
|
|
|
KVM_REG_MIPS_CP0_CONFIG3,
|
2014-06-26 14:11:29 +00:00
|
|
|
KVM_REG_MIPS_CP0_CONFIG4,
|
|
|
|
KVM_REG_MIPS_CP0_CONFIG5,
|
2013-05-23 16:49:09 +00:00
|
|
|
KVM_REG_MIPS_CP0_CONFIG7,
|
2014-05-29 09:16:37 +00:00
|
|
|
KVM_REG_MIPS_CP0_ERROREPC,
|
|
|
|
|
|
|
|
KVM_REG_MIPS_COUNT_CTL,
|
|
|
|
KVM_REG_MIPS_COUNT_RESUME,
|
2014-05-29 09:16:38 +00:00
|
|
|
KVM_REG_MIPS_COUNT_HZ,
|
2013-05-23 16:49:09 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
|
|
|
|
const struct kvm_one_reg *reg)
|
|
|
|
{
|
|
|
|
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
2014-12-02 15:48:24 +00:00
|
|
|
struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
|
2014-05-29 09:16:29 +00:00
|
|
|
int ret;
|
2013-05-23 16:49:09 +00:00
|
|
|
s64 v;
|
2014-12-02 15:48:24 +00:00
|
|
|
s64 vs[2];
|
2014-12-02 15:48:24 +00:00
|
|
|
unsigned int idx;
|
2013-05-23 16:49:09 +00:00
|
|
|
|
|
|
|
switch (reg->id) {
|
2014-12-02 15:48:24 +00:00
|
|
|
/* General purpose registers */
|
2013-05-23 16:49:09 +00:00
|
|
|
case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31:
|
|
|
|
v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0];
|
|
|
|
break;
|
|
|
|
case KVM_REG_MIPS_HI:
|
|
|
|
v = (long)vcpu->arch.hi;
|
|
|
|
break;
|
|
|
|
case KVM_REG_MIPS_LO:
|
|
|
|
v = (long)vcpu->arch.lo;
|
|
|
|
break;
|
|
|
|
case KVM_REG_MIPS_PC:
|
|
|
|
v = (long)vcpu->arch.pc;
|
|
|
|
break;
|
|
|
|
|
2014-12-02 15:48:24 +00:00
|
|
|
/* Floating point registers */
|
|
|
|
case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31):
|
|
|
|
if (!kvm_mips_guest_has_fpu(&vcpu->arch))
|
|
|
|
return -EINVAL;
|
|
|
|
idx = reg->id - KVM_REG_MIPS_FPR_32(0);
|
|
|
|
/* Odd singles in top of even double when FR=0 */
|
|
|
|
if (kvm_read_c0_guest_status(cop0) & ST0_FR)
|
|
|
|
v = get_fpr32(&fpu->fpr[idx], 0);
|
|
|
|
else
|
|
|
|
v = get_fpr32(&fpu->fpr[idx & ~1], idx & 1);
|
|
|
|
break;
|
|
|
|
case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31):
|
|
|
|
if (!kvm_mips_guest_has_fpu(&vcpu->arch))
|
|
|
|
return -EINVAL;
|
|
|
|
idx = reg->id - KVM_REG_MIPS_FPR_64(0);
|
|
|
|
/* Can't access odd doubles in FR=0 mode */
|
|
|
|
if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR))
|
|
|
|
return -EINVAL;
|
|
|
|
v = get_fpr64(&fpu->fpr[idx], 0);
|
|
|
|
break;
|
|
|
|
case KVM_REG_MIPS_FCR_IR:
|
|
|
|
if (!kvm_mips_guest_has_fpu(&vcpu->arch))
|
|
|
|
return -EINVAL;
|
|
|
|
v = boot_cpu_data.fpu_id;
|
|
|
|
break;
|
|
|
|
case KVM_REG_MIPS_FCR_CSR:
|
|
|
|
if (!kvm_mips_guest_has_fpu(&vcpu->arch))
|
|
|
|
return -EINVAL;
|
|
|
|
v = fpu->fcr31;
|
|
|
|
break;
|
|
|
|
|
2014-12-02 15:48:24 +00:00
|
|
|
/* MIPS SIMD Architecture (MSA) registers */
|
|
|
|
case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31):
|
|
|
|
if (!kvm_mips_guest_has_msa(&vcpu->arch))
|
|
|
|
return -EINVAL;
|
|
|
|
/* Can't access MSA registers in FR=0 mode */
|
|
|
|
if (!(kvm_read_c0_guest_status(cop0) & ST0_FR))
|
|
|
|
return -EINVAL;
|
|
|
|
idx = reg->id - KVM_REG_MIPS_VEC_128(0);
|
|
|
|
#ifdef CONFIG_CPU_LITTLE_ENDIAN
|
|
|
|
/* least significant byte first */
|
|
|
|
vs[0] = get_fpr64(&fpu->fpr[idx], 0);
|
|
|
|
vs[1] = get_fpr64(&fpu->fpr[idx], 1);
|
|
|
|
#else
|
|
|
|
/* most significant byte first */
|
|
|
|
vs[0] = get_fpr64(&fpu->fpr[idx], 1);
|
|
|
|
vs[1] = get_fpr64(&fpu->fpr[idx], 0);
|
|
|
|
#endif
|
|
|
|
break;
|
|
|
|
case KVM_REG_MIPS_MSA_IR:
|
|
|
|
if (!kvm_mips_guest_has_msa(&vcpu->arch))
|
|
|
|
return -EINVAL;
|
|
|
|
v = boot_cpu_data.msa_id;
|
|
|
|
break;
|
|
|
|
case KVM_REG_MIPS_MSA_CSR:
|
|
|
|
if (!kvm_mips_guest_has_msa(&vcpu->arch))
|
|
|
|
return -EINVAL;
|
|
|
|
v = fpu->msacsr;
|
|
|
|
break;
|
|
|
|
|
2014-12-02 15:48:24 +00:00
|
|
|
/* Co-processor 0 registers */
|
2013-05-23 16:49:09 +00:00
|
|
|
case KVM_REG_MIPS_CP0_INDEX:
|
|
|
|
v = (long)kvm_read_c0_guest_index(cop0);
|
|
|
|
break;
|
|
|
|
case KVM_REG_MIPS_CP0_CONTEXT:
|
|
|
|
v = (long)kvm_read_c0_guest_context(cop0);
|
|
|
|
break;
|
2014-05-29 09:16:30 +00:00
|
|
|
case KVM_REG_MIPS_CP0_USERLOCAL:
|
|
|
|
v = (long)kvm_read_c0_guest_userlocal(cop0);
|
|
|
|
break;
|
2013-05-23 16:49:09 +00:00
|
|
|
case KVM_REG_MIPS_CP0_PAGEMASK:
|
|
|
|
v = (long)kvm_read_c0_guest_pagemask(cop0);
|
|
|
|
break;
|
|
|
|
case KVM_REG_MIPS_CP0_WIRED:
|
|
|
|
v = (long)kvm_read_c0_guest_wired(cop0);
|
|
|
|
break;
|
2014-05-29 09:16:31 +00:00
|
|
|
case KVM_REG_MIPS_CP0_HWRENA:
|
|
|
|
v = (long)kvm_read_c0_guest_hwrena(cop0);
|
|
|
|
break;
|
2013-05-23 16:49:09 +00:00
|
|
|
case KVM_REG_MIPS_CP0_BADVADDR:
|
|
|
|
v = (long)kvm_read_c0_guest_badvaddr(cop0);
|
|
|
|
break;
|
|
|
|
case KVM_REG_MIPS_CP0_ENTRYHI:
|
|
|
|
v = (long)kvm_read_c0_guest_entryhi(cop0);
|
|
|
|
break;
|
2014-05-29 09:16:29 +00:00
|
|
|
case KVM_REG_MIPS_CP0_COMPARE:
|
|
|
|
v = (long)kvm_read_c0_guest_compare(cop0);
|
|
|
|
break;
|
2013-05-23 16:49:09 +00:00
|
|
|
case KVM_REG_MIPS_CP0_STATUS:
|
|
|
|
v = (long)kvm_read_c0_guest_status(cop0);
|
|
|
|
break;
|
|
|
|
case KVM_REG_MIPS_CP0_CAUSE:
|
|
|
|
v = (long)kvm_read_c0_guest_cause(cop0);
|
|
|
|
break;
|
2014-05-29 09:16:27 +00:00
|
|
|
case KVM_REG_MIPS_CP0_EPC:
|
|
|
|
v = (long)kvm_read_c0_guest_epc(cop0);
|
|
|
|
break;
|
2014-06-26 12:56:52 +00:00
|
|
|
case KVM_REG_MIPS_CP0_PRID:
|
|
|
|
v = (long)kvm_read_c0_guest_prid(cop0);
|
|
|
|
break;
|
2013-05-23 16:49:09 +00:00
|
|
|
case KVM_REG_MIPS_CP0_CONFIG:
|
|
|
|
v = (long)kvm_read_c0_guest_config(cop0);
|
|
|
|
break;
|
|
|
|
case KVM_REG_MIPS_CP0_CONFIG1:
|
|
|
|
v = (long)kvm_read_c0_guest_config1(cop0);
|
|
|
|
break;
|
|
|
|
case KVM_REG_MIPS_CP0_CONFIG2:
|
|
|
|
v = (long)kvm_read_c0_guest_config2(cop0);
|
|
|
|
break;
|
|
|
|
case KVM_REG_MIPS_CP0_CONFIG3:
|
|
|
|
v = (long)kvm_read_c0_guest_config3(cop0);
|
|
|
|
break;
|
2014-06-26 14:11:29 +00:00
|
|
|
case KVM_REG_MIPS_CP0_CONFIG4:
|
|
|
|
v = (long)kvm_read_c0_guest_config4(cop0);
|
|
|
|
break;
|
|
|
|
case KVM_REG_MIPS_CP0_CONFIG5:
|
|
|
|
v = (long)kvm_read_c0_guest_config5(cop0);
|
|
|
|
break;
|
2013-05-23 16:49:09 +00:00
|
|
|
case KVM_REG_MIPS_CP0_CONFIG7:
|
|
|
|
v = (long)kvm_read_c0_guest_config7(cop0);
|
|
|
|
break;
|
2014-06-26 12:47:22 +00:00
|
|
|
case KVM_REG_MIPS_CP0_ERROREPC:
|
|
|
|
v = (long)kvm_read_c0_guest_errorepc(cop0);
|
|
|
|
break;
|
2014-05-29 09:16:29 +00:00
|
|
|
/* registers to be handled specially */
|
|
|
|
case KVM_REG_MIPS_CP0_COUNT:
|
2014-05-29 09:16:37 +00:00
|
|
|
case KVM_REG_MIPS_COUNT_CTL:
|
|
|
|
case KVM_REG_MIPS_COUNT_RESUME:
|
2014-05-29 09:16:38 +00:00
|
|
|
case KVM_REG_MIPS_COUNT_HZ:
|
2014-05-29 09:16:29 +00:00
|
|
|
ret = kvm_mips_callbacks->get_one_reg(vcpu, reg, &v);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
break;
|
2013-05-23 16:49:09 +00:00
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2013-06-10 19:33:48 +00:00
|
|
|
if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
|
|
|
|
u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
|
2014-06-26 19:11:34 +00:00
|
|
|
|
2013-06-10 19:33:48 +00:00
|
|
|
return put_user(v, uaddr64);
|
|
|
|
} else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
|
|
|
|
u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
|
|
|
|
u32 v32 = (u32)v;
|
2014-06-26 19:11:34 +00:00
|
|
|
|
2013-06-10 19:33:48 +00:00
|
|
|
return put_user(v32, uaddr32);
|
2014-12-02 15:48:24 +00:00
|
|
|
} else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
|
|
|
|
void __user *uaddr = (void __user *)(long)reg->addr;
|
|
|
|
|
2016-02-28 15:35:59 +00:00
|
|
|
return copy_to_user(uaddr, vs, 16) ? -EFAULT : 0;
|
2013-06-10 19:33:48 +00:00
|
|
|
} else {
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2013-05-23 16:49:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
|
|
|
|
const struct kvm_one_reg *reg)
|
|
|
|
{
|
|
|
|
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
2014-12-02 15:48:24 +00:00
|
|
|
struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
|
|
|
|
s64 v;
|
2014-12-02 15:48:24 +00:00
|
|
|
s64 vs[2];
|
2014-12-02 15:48:24 +00:00
|
|
|
unsigned int idx;
|
2013-05-23 16:49:09 +00:00
|
|
|
|
2013-06-10 19:33:48 +00:00
|
|
|
if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
|
|
|
|
u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
|
|
|
|
|
|
|
|
if (get_user(v, uaddr64) != 0)
|
|
|
|
return -EFAULT;
|
|
|
|
} else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
|
|
|
|
u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
|
|
|
|
s32 v32;
|
|
|
|
|
|
|
|
if (get_user(v32, uaddr32) != 0)
|
|
|
|
return -EFAULT;
|
|
|
|
v = (s64)v32;
|
2014-12-02 15:48:24 +00:00
|
|
|
} else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
|
|
|
|
void __user *uaddr = (void __user *)(long)reg->addr;
|
|
|
|
|
2016-02-28 15:35:59 +00:00
|
|
|
return copy_from_user(vs, uaddr, 16) ? -EFAULT : 0;
|
2013-06-10 19:33:48 +00:00
|
|
|
} else {
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2013-05-23 16:49:09 +00:00
|
|
|
|
|
|
|
switch (reg->id) {
|
2014-12-02 15:48:24 +00:00
|
|
|
/* General purpose registers */
|
2013-05-23 16:49:09 +00:00
|
|
|
case KVM_REG_MIPS_R0:
|
|
|
|
/* Silently ignore requests to set $0 */
|
|
|
|
break;
|
|
|
|
case KVM_REG_MIPS_R1 ... KVM_REG_MIPS_R31:
|
|
|
|
vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v;
|
|
|
|
break;
|
|
|
|
case KVM_REG_MIPS_HI:
|
|
|
|
vcpu->arch.hi = v;
|
|
|
|
break;
|
|
|
|
case KVM_REG_MIPS_LO:
|
|
|
|
vcpu->arch.lo = v;
|
|
|
|
break;
|
|
|
|
case KVM_REG_MIPS_PC:
|
|
|
|
vcpu->arch.pc = v;
|
|
|
|
break;
|
|
|
|
|
2014-12-02 15:48:24 +00:00
|
|
|
/* Floating point registers */
|
|
|
|
case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31):
|
|
|
|
if (!kvm_mips_guest_has_fpu(&vcpu->arch))
|
|
|
|
return -EINVAL;
|
|
|
|
idx = reg->id - KVM_REG_MIPS_FPR_32(0);
|
|
|
|
/* Odd singles in top of even double when FR=0 */
|
|
|
|
if (kvm_read_c0_guest_status(cop0) & ST0_FR)
|
|
|
|
set_fpr32(&fpu->fpr[idx], 0, v);
|
|
|
|
else
|
|
|
|
set_fpr32(&fpu->fpr[idx & ~1], idx & 1, v);
|
|
|
|
break;
|
|
|
|
case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31):
|
|
|
|
if (!kvm_mips_guest_has_fpu(&vcpu->arch))
|
|
|
|
return -EINVAL;
|
|
|
|
idx = reg->id - KVM_REG_MIPS_FPR_64(0);
|
|
|
|
/* Can't access odd doubles in FR=0 mode */
|
|
|
|
if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR))
|
|
|
|
return -EINVAL;
|
|
|
|
set_fpr64(&fpu->fpr[idx], 0, v);
|
|
|
|
break;
|
|
|
|
case KVM_REG_MIPS_FCR_IR:
|
|
|
|
if (!kvm_mips_guest_has_fpu(&vcpu->arch))
|
|
|
|
return -EINVAL;
|
|
|
|
/* Read-only */
|
|
|
|
break;
|
|
|
|
case KVM_REG_MIPS_FCR_CSR:
|
|
|
|
if (!kvm_mips_guest_has_fpu(&vcpu->arch))
|
|
|
|
return -EINVAL;
|
|
|
|
fpu->fcr31 = v;
|
|
|
|
break;
|
|
|
|
|
2014-12-02 15:48:24 +00:00
|
|
|
/* MIPS SIMD Architecture (MSA) registers */
|
|
|
|
case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31):
|
|
|
|
if (!kvm_mips_guest_has_msa(&vcpu->arch))
|
|
|
|
return -EINVAL;
|
|
|
|
idx = reg->id - KVM_REG_MIPS_VEC_128(0);
|
|
|
|
#ifdef CONFIG_CPU_LITTLE_ENDIAN
|
|
|
|
/* least significant byte first */
|
|
|
|
set_fpr64(&fpu->fpr[idx], 0, vs[0]);
|
|
|
|
set_fpr64(&fpu->fpr[idx], 1, vs[1]);
|
|
|
|
#else
|
|
|
|
/* most significant byte first */
|
|
|
|
set_fpr64(&fpu->fpr[idx], 1, vs[0]);
|
|
|
|
set_fpr64(&fpu->fpr[idx], 0, vs[1]);
|
|
|
|
#endif
|
|
|
|
break;
|
|
|
|
case KVM_REG_MIPS_MSA_IR:
|
|
|
|
if (!kvm_mips_guest_has_msa(&vcpu->arch))
|
|
|
|
return -EINVAL;
|
|
|
|
/* Read-only */
|
|
|
|
break;
|
|
|
|
case KVM_REG_MIPS_MSA_CSR:
|
|
|
|
if (!kvm_mips_guest_has_msa(&vcpu->arch))
|
|
|
|
return -EINVAL;
|
|
|
|
fpu->msacsr = v;
|
|
|
|
break;
|
|
|
|
|
2014-12-02 15:48:24 +00:00
|
|
|
/* Co-processor 0 registers */
|
2013-05-23 16:49:09 +00:00
|
|
|
case KVM_REG_MIPS_CP0_INDEX:
|
|
|
|
kvm_write_c0_guest_index(cop0, v);
|
|
|
|
break;
|
|
|
|
case KVM_REG_MIPS_CP0_CONTEXT:
|
|
|
|
kvm_write_c0_guest_context(cop0, v);
|
|
|
|
break;
|
2014-05-29 09:16:30 +00:00
|
|
|
case KVM_REG_MIPS_CP0_USERLOCAL:
|
|
|
|
kvm_write_c0_guest_userlocal(cop0, v);
|
|
|
|
break;
|
2013-05-23 16:49:09 +00:00
|
|
|
case KVM_REG_MIPS_CP0_PAGEMASK:
|
|
|
|
kvm_write_c0_guest_pagemask(cop0, v);
|
|
|
|
break;
|
|
|
|
case KVM_REG_MIPS_CP0_WIRED:
|
|
|
|
kvm_write_c0_guest_wired(cop0, v);
|
|
|
|
break;
|
2014-05-29 09:16:31 +00:00
|
|
|
case KVM_REG_MIPS_CP0_HWRENA:
|
|
|
|
kvm_write_c0_guest_hwrena(cop0, v);
|
|
|
|
break;
|
2013-05-23 16:49:09 +00:00
|
|
|
case KVM_REG_MIPS_CP0_BADVADDR:
|
|
|
|
kvm_write_c0_guest_badvaddr(cop0, v);
|
|
|
|
break;
|
|
|
|
case KVM_REG_MIPS_CP0_ENTRYHI:
|
|
|
|
kvm_write_c0_guest_entryhi(cop0, v);
|
|
|
|
break;
|
|
|
|
case KVM_REG_MIPS_CP0_STATUS:
|
|
|
|
kvm_write_c0_guest_status(cop0, v);
|
|
|
|
break;
|
2014-05-29 09:16:27 +00:00
|
|
|
case KVM_REG_MIPS_CP0_EPC:
|
|
|
|
kvm_write_c0_guest_epc(cop0, v);
|
|
|
|
break;
|
2014-06-26 12:56:52 +00:00
|
|
|
case KVM_REG_MIPS_CP0_PRID:
|
|
|
|
kvm_write_c0_guest_prid(cop0, v);
|
|
|
|
break;
|
2013-05-23 16:49:09 +00:00
|
|
|
case KVM_REG_MIPS_CP0_ERROREPC:
|
|
|
|
kvm_write_c0_guest_errorepc(cop0, v);
|
|
|
|
break;
|
2014-05-29 09:16:29 +00:00
|
|
|
/* registers to be handled specially */
|
|
|
|
case KVM_REG_MIPS_CP0_COUNT:
|
|
|
|
case KVM_REG_MIPS_CP0_COMPARE:
|
2014-05-29 09:16:35 +00:00
|
|
|
case KVM_REG_MIPS_CP0_CAUSE:
|
2014-06-26 14:11:29 +00:00
|
|
|
case KVM_REG_MIPS_CP0_CONFIG:
|
|
|
|
case KVM_REG_MIPS_CP0_CONFIG1:
|
|
|
|
case KVM_REG_MIPS_CP0_CONFIG2:
|
|
|
|
case KVM_REG_MIPS_CP0_CONFIG3:
|
|
|
|
case KVM_REG_MIPS_CP0_CONFIG4:
|
|
|
|
case KVM_REG_MIPS_CP0_CONFIG5:
|
2014-05-29 09:16:37 +00:00
|
|
|
case KVM_REG_MIPS_COUNT_CTL:
|
|
|
|
case KVM_REG_MIPS_COUNT_RESUME:
|
2014-05-29 09:16:38 +00:00
|
|
|
case KVM_REG_MIPS_COUNT_HZ:
|
2014-05-29 09:16:29 +00:00
|
|
|
return kvm_mips_callbacks->set_one_reg(vcpu, reg, v);
|
2013-05-23 16:49:09 +00:00
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-12-08 23:07:56 +00:00
|
|
|
static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
|
|
|
|
struct kvm_enable_cap *cap)
|
|
|
|
{
|
|
|
|
int r = 0;
|
|
|
|
|
|
|
|
if (!kvm_vm_ioctl_check_extension(vcpu->kvm, cap->cap))
|
|
|
|
return -EINVAL;
|
|
|
|
if (cap->flags)
|
|
|
|
return -EINVAL;
|
|
|
|
if (cap->args[0])
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
switch (cap->cap) {
|
|
|
|
case KVM_CAP_MIPS_FPU:
|
|
|
|
vcpu->arch.fpu_enabled = true;
|
|
|
|
break;
|
2014-12-08 23:07:56 +00:00
|
|
|
case KVM_CAP_MIPS_MSA:
|
|
|
|
vcpu->arch.msa_enabled = true;
|
|
|
|
break;
|
2014-12-08 23:07:56 +00:00
|
|
|
default:
|
|
|
|
r = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2014-06-26 19:11:34 +00:00
|
|
|
long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl,
|
|
|
|
unsigned long arg)
|
2012-11-22 02:34:02 +00:00
|
|
|
{
|
|
|
|
struct kvm_vcpu *vcpu = filp->private_data;
|
|
|
|
void __user *argp = (void __user *)arg;
|
|
|
|
long r;
|
|
|
|
|
|
|
|
switch (ioctl) {
|
2013-05-23 16:49:09 +00:00
|
|
|
case KVM_SET_ONE_REG:
|
|
|
|
case KVM_GET_ONE_REG: {
|
|
|
|
struct kvm_one_reg reg;
|
2014-06-26 19:11:34 +00:00
|
|
|
|
2013-05-23 16:49:09 +00:00
|
|
|
if (copy_from_user(®, argp, sizeof(reg)))
|
|
|
|
return -EFAULT;
|
|
|
|
if (ioctl == KVM_SET_ONE_REG)
|
|
|
|
return kvm_mips_set_reg(vcpu, ®);
|
|
|
|
else
|
|
|
|
return kvm_mips_get_reg(vcpu, ®);
|
|
|
|
}
|
|
|
|
case KVM_GET_REG_LIST: {
|
|
|
|
struct kvm_reg_list __user *user_list = argp;
|
|
|
|
u64 __user *reg_dest;
|
|
|
|
struct kvm_reg_list reg_list;
|
|
|
|
unsigned n;
|
|
|
|
|
|
|
|
if (copy_from_user(®_list, user_list, sizeof(reg_list)))
|
|
|
|
return -EFAULT;
|
|
|
|
n = reg_list.n;
|
|
|
|
reg_list.n = ARRAY_SIZE(kvm_mips_get_one_regs);
|
|
|
|
if (copy_to_user(user_list, ®_list, sizeof(reg_list)))
|
|
|
|
return -EFAULT;
|
|
|
|
if (n < reg_list.n)
|
|
|
|
return -E2BIG;
|
|
|
|
reg_dest = user_list->reg;
|
|
|
|
if (copy_to_user(reg_dest, kvm_mips_get_one_regs,
|
|
|
|
sizeof(kvm_mips_get_one_regs)))
|
|
|
|
return -EFAULT;
|
|
|
|
return 0;
|
|
|
|
}
|
2012-11-22 02:34:02 +00:00
|
|
|
case KVM_NMI:
|
|
|
|
/* Treat the NMI as a CPU reset */
|
|
|
|
r = kvm_mips_reset_vcpu(vcpu);
|
|
|
|
break;
|
|
|
|
case KVM_INTERRUPT:
|
|
|
|
{
|
|
|
|
struct kvm_mips_interrupt irq;
|
2014-06-26 19:11:34 +00:00
|
|
|
|
2012-11-22 02:34:02 +00:00
|
|
|
r = -EFAULT;
|
|
|
|
if (copy_from_user(&irq, argp, sizeof(irq)))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__,
|
|
|
|
irq.irq);
|
|
|
|
|
|
|
|
r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
|
|
|
|
break;
|
|
|
|
}
|
2014-12-08 23:07:56 +00:00
|
|
|
case KVM_ENABLE_CAP: {
|
|
|
|
struct kvm_enable_cap cap;
|
|
|
|
|
|
|
|
r = -EFAULT;
|
|
|
|
if (copy_from_user(&cap, argp, sizeof(cap)))
|
|
|
|
goto out;
|
|
|
|
r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
|
|
|
|
break;
|
|
|
|
}
|
2012-11-22 02:34:02 +00:00
|
|
|
default:
|
2013-05-23 16:49:09 +00:00
|
|
|
r = -ENOIOCTLCMD;
|
2012-11-22 02:34:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2014-06-26 19:11:34 +00:00
|
|
|
/* Get (and clear) the dirty memory log for a memory slot. */
|
2012-11-22 02:34:02 +00:00
|
|
|
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
|
|
|
|
{
|
2015-05-17 14:20:07 +00:00
|
|
|
struct kvm_memslots *slots;
|
2012-11-22 02:34:02 +00:00
|
|
|
struct kvm_memory_slot *memslot;
|
|
|
|
unsigned long ga, ga_end;
|
|
|
|
int is_dirty = 0;
|
|
|
|
int r;
|
|
|
|
unsigned long n;
|
|
|
|
|
|
|
|
mutex_lock(&kvm->slots_lock);
|
|
|
|
|
|
|
|
r = kvm_get_dirty_log(kvm, log, &is_dirty);
|
|
|
|
if (r)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/* If nothing is dirty, don't bother messing with page tables. */
|
|
|
|
if (is_dirty) {
|
2015-05-17 14:20:07 +00:00
|
|
|
slots = kvm_memslots(kvm);
|
|
|
|
memslot = id_to_memslot(slots, log->slot);
|
2012-11-22 02:34:02 +00:00
|
|
|
|
|
|
|
ga = memslot->base_gfn << PAGE_SHIFT;
|
|
|
|
ga_end = ga + (memslot->npages << PAGE_SHIFT);
|
|
|
|
|
2014-06-26 19:11:35 +00:00
|
|
|
kvm_info("%s: dirty, ga: %#lx, ga_end %#lx\n", __func__, ga,
|
|
|
|
ga_end);
|
2012-11-22 02:34:02 +00:00
|
|
|
|
|
|
|
n = kvm_dirty_bitmap_bytes(memslot);
|
|
|
|
memset(memslot->dirty_bitmap, 0, n);
|
|
|
|
}
|
|
|
|
|
|
|
|
r = 0;
|
|
|
|
out:
|
|
|
|
mutex_unlock(&kvm->slots_lock);
|
|
|
|
return r;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
|
|
|
|
{
|
|
|
|
long r;
|
|
|
|
|
|
|
|
switch (ioctl) {
|
|
|
|
default:
|
2013-05-23 16:49:10 +00:00
|
|
|
r = -ENOIOCTLCMD;
|
2012-11-22 02:34:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_arch_init(void *opaque)
|
|
|
|
{
|
|
|
|
if (kvm_mips_callbacks) {
|
|
|
|
kvm_err("kvm: module already exists\n");
|
|
|
|
return -EEXIST;
|
|
|
|
}
|
|
|
|
|
2014-06-26 19:11:36 +00:00
|
|
|
return kvm_mips_emulation_init(&kvm_mips_callbacks);
|
2012-11-22 02:34:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void kvm_arch_exit(void)
|
|
|
|
{
|
|
|
|
kvm_mips_callbacks = NULL;
|
|
|
|
}
|
|
|
|
|
2014-06-26 19:11:34 +00:00
|
|
|
int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
|
|
|
|
struct kvm_sregs *sregs)
|
2012-11-22 02:34:02 +00:00
|
|
|
{
|
2013-05-23 16:49:10 +00:00
|
|
|
return -ENOIOCTLCMD;
|
2012-11-22 02:34:02 +00:00
|
|
|
}
|
|
|
|
|
2014-06-26 19:11:34 +00:00
|
|
|
int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
|
|
|
|
struct kvm_sregs *sregs)
|
2012-11-22 02:34:02 +00:00
|
|
|
{
|
2013-05-23 16:49:10 +00:00
|
|
|
return -ENOIOCTLCMD;
|
2012-11-22 02:34:02 +00:00
|
|
|
}
|
|
|
|
|
2014-12-04 14:47:07 +00:00
|
|
|
void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
|
2012-11-22 02:34:02 +00:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
|
|
|
|
{
|
2013-05-23 16:49:10 +00:00
|
|
|
return -ENOIOCTLCMD;
|
2012-11-22 02:34:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
|
|
|
|
{
|
2013-05-23 16:49:10 +00:00
|
|
|
return -ENOIOCTLCMD;
|
2012-11-22 02:34:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
|
|
|
|
{
|
|
|
|
return VM_FAULT_SIGBUS;
|
|
|
|
}
|
|
|
|
|
2014-07-14 16:27:35 +00:00
|
|
|
int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
|
2012-11-22 02:34:02 +00:00
|
|
|
{
|
|
|
|
int r;
|
|
|
|
|
|
|
|
switch (ext) {
|
2013-05-23 16:49:09 +00:00
|
|
|
case KVM_CAP_ONE_REG:
|
2014-12-08 23:07:56 +00:00
|
|
|
case KVM_CAP_ENABLE_CAP:
|
2013-05-23 16:49:09 +00:00
|
|
|
r = 1;
|
|
|
|
break;
|
2012-11-22 02:34:02 +00:00
|
|
|
case KVM_CAP_COALESCED_MMIO:
|
|
|
|
r = KVM_COALESCED_MMIO_PAGE_OFFSET;
|
|
|
|
break;
|
2014-12-08 23:07:56 +00:00
|
|
|
case KVM_CAP_MIPS_FPU:
|
2016-04-22 09:38:48 +00:00
|
|
|
/* We don't handle systems with inconsistent cpu_has_fpu */
|
|
|
|
r = !!raw_cpu_has_fpu;
|
2014-12-08 23:07:56 +00:00
|
|
|
break;
|
2014-12-08 23:07:56 +00:00
|
|
|
case KVM_CAP_MIPS_MSA:
|
|
|
|
/*
|
|
|
|
* We don't support MSA vector partitioning yet:
|
|
|
|
* 1) It would require explicit support which can't be tested
|
|
|
|
* yet due to lack of support in current hardware.
|
|
|
|
* 2) It extends the state that would need to be saved/restored
|
|
|
|
* by e.g. QEMU for migration.
|
|
|
|
*
|
|
|
|
* When vector partitioning hardware becomes available, support
|
|
|
|
* could be added by requiring a flag when enabling
|
|
|
|
* KVM_CAP_MIPS_MSA capability to indicate that userland knows
|
|
|
|
* to save/restore the appropriate extra state.
|
|
|
|
*/
|
|
|
|
r = cpu_has_msa && !(boot_cpu_data.msa_id & MSA_IR_WRPF);
|
|
|
|
break;
|
2012-11-22 02:34:02 +00:00
|
|
|
default:
|
|
|
|
r = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
return kvm_mips_pending_timer(vcpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
struct mips_coproc *cop0;
|
|
|
|
|
|
|
|
if (!vcpu)
|
|
|
|
return -1;
|
|
|
|
|
2014-06-26 19:11:35 +00:00
|
|
|
kvm_debug("VCPU Register Dump:\n");
|
|
|
|
kvm_debug("\tpc = 0x%08lx\n", vcpu->arch.pc);
|
|
|
|
kvm_debug("\texceptions: %08lx\n", vcpu->arch.pending_exceptions);
|
2012-11-22 02:34:02 +00:00
|
|
|
|
|
|
|
for (i = 0; i < 32; i += 4) {
|
2014-06-26 19:11:35 +00:00
|
|
|
kvm_debug("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i,
|
2012-11-22 02:34:02 +00:00
|
|
|
vcpu->arch.gprs[i],
|
|
|
|
vcpu->arch.gprs[i + 1],
|
|
|
|
vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
|
|
|
|
}
|
2014-06-26 19:11:35 +00:00
|
|
|
kvm_debug("\thi: 0x%08lx\n", vcpu->arch.hi);
|
|
|
|
kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo);
|
2012-11-22 02:34:02 +00:00
|
|
|
|
|
|
|
cop0 = vcpu->arch.cop0;
|
2014-06-26 19:11:35 +00:00
|
|
|
kvm_debug("\tStatus: 0x%08lx, Cause: 0x%08lx\n",
|
|
|
|
kvm_read_c0_guest_status(cop0),
|
|
|
|
kvm_read_c0_guest_cause(cop0));
|
2012-11-22 02:34:02 +00:00
|
|
|
|
2014-06-26 19:11:35 +00:00
|
|
|
kvm_debug("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0));
|
2012-11-22 02:34:02 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2013-05-23 16:49:08 +00:00
|
|
|
for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
|
2013-05-23 16:49:07 +00:00
|
|
|
vcpu->arch.gprs[i] = regs->gpr[i];
|
2013-05-23 16:49:08 +00:00
|
|
|
vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */
|
2012-11-22 02:34:02 +00:00
|
|
|
vcpu->arch.hi = regs->hi;
|
|
|
|
vcpu->arch.lo = regs->lo;
|
|
|
|
vcpu->arch.pc = regs->pc;
|
|
|
|
|
2013-05-23 16:49:09 +00:00
|
|
|
return 0;
|
2012-11-22 02:34:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2013-05-23 16:49:08 +00:00
|
|
|
for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
|
2013-05-23 16:49:07 +00:00
|
|
|
regs->gpr[i] = vcpu->arch.gprs[i];
|
2012-11-22 02:34:02 +00:00
|
|
|
|
|
|
|
regs->hi = vcpu->arch.hi;
|
|
|
|
regs->lo = vcpu->arch.lo;
|
|
|
|
regs->pc = vcpu->arch.pc;
|
|
|
|
|
2013-05-23 16:49:09 +00:00
|
|
|
return 0;
|
2012-11-22 02:34:02 +00:00
|
|
|
}
|
|
|
|
|
2014-05-29 09:16:39 +00:00
|
|
|
static void kvm_mips_comparecount_func(unsigned long data)
|
2012-11-22 02:34:02 +00:00
|
|
|
{
|
|
|
|
struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
|
|
|
|
|
|
|
|
kvm_mips_callbacks->queue_timer_int(vcpu);
|
|
|
|
|
|
|
|
vcpu->arch.wait = 0;
|
2016-02-19 08:46:39 +00:00
|
|
|
if (swait_active(&vcpu->wq))
|
|
|
|
swake_up(&vcpu->wq);
|
2012-11-22 02:34:02 +00:00
|
|
|
}
|
|
|
|
|
2014-06-26 19:11:34 +00:00
|
|
|
/* low level hrtimer wake routine */
|
2014-05-29 09:16:39 +00:00
|
|
|
static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
|
2012-11-22 02:34:02 +00:00
|
|
|
{
|
|
|
|
struct kvm_vcpu *vcpu;
|
|
|
|
|
|
|
|
vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer);
|
|
|
|
kvm_mips_comparecount_func((unsigned long) vcpu);
|
2014-05-29 09:16:35 +00:00
|
|
|
return kvm_mips_count_timeout(vcpu);
|
2012-11-22 02:34:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
kvm_mips_callbacks->vcpu_init(vcpu);
|
|
|
|
hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC,
|
|
|
|
HRTIMER_MODE_REL);
|
|
|
|
vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-06-26 19:11:34 +00:00
|
|
|
int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
|
|
|
|
struct kvm_translation *tr)
|
2012-11-22 02:34:02 +00:00
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Initial guest state */
|
|
|
|
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
return kvm_mips_callbacks->vcpu_setup(vcpu);
|
|
|
|
}
|
|
|
|
|
2014-06-26 19:11:34 +00:00
|
|
|
static void kvm_mips_set_c0_status(void)
|
2012-11-22 02:34:02 +00:00
|
|
|
{
|
2016-06-09 13:19:08 +00:00
|
|
|
u32 status = read_c0_status();
|
2012-11-22 02:34:02 +00:00
|
|
|
|
|
|
|
if (cpu_has_dsp)
|
|
|
|
status |= (ST0_MX);
|
|
|
|
|
|
|
|
write_c0_status(status);
|
|
|
|
ehb();
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
|
|
|
|
*/
|
|
|
|
int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
|
|
|
{
|
2016-06-09 13:19:08 +00:00
|
|
|
u32 cause = vcpu->arch.host_cp0_cause;
|
|
|
|
u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
|
|
|
|
u32 __user *opc = (u32 __user *) vcpu->arch.pc;
|
2012-11-22 02:34:02 +00:00
|
|
|
unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
|
|
|
|
enum emulation_result er = EMULATE_DONE;
|
|
|
|
int ret = RESUME_GUEST;
|
|
|
|
|
2015-02-04 10:52:03 +00:00
|
|
|
/* re-enable HTW before enabling interrupts */
|
|
|
|
htw_start();
|
|
|
|
|
2012-11-22 02:34:02 +00:00
|
|
|
/* Set a default exit reason */
|
|
|
|
run->exit_reason = KVM_EXIT_UNKNOWN;
|
|
|
|
run->ready_for_interrupt_injection = 1;
|
|
|
|
|
2014-06-26 19:11:34 +00:00
|
|
|
/*
|
|
|
|
* Set the appropriate status bits based on host CPU features,
|
|
|
|
* before we hit the scheduler
|
|
|
|
*/
|
2012-11-22 02:34:02 +00:00
|
|
|
kvm_mips_set_c0_status();
|
|
|
|
|
|
|
|
local_irq_enable();
|
|
|
|
|
|
|
|
kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n",
|
|
|
|
cause, opc, run, vcpu);
|
|
|
|
|
2014-06-26 19:11:34 +00:00
|
|
|
/*
|
|
|
|
* Do a privilege check, if in UM most of these exit conditions end up
|
2012-11-22 02:34:02 +00:00
|
|
|
* causing an exception to be delivered to the Guest Kernel
|
|
|
|
*/
|
|
|
|
er = kvm_mips_check_privilege(cause, opc, run, vcpu);
|
|
|
|
if (er == EMULATE_PRIV_FAIL) {
|
|
|
|
goto skip_emul;
|
|
|
|
} else if (er == EMULATE_FAIL) {
|
|
|
|
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
|
|
|
ret = RESUME_HOST;
|
|
|
|
goto skip_emul;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (exccode) {
|
2015-12-16 23:49:33 +00:00
|
|
|
case EXCCODE_INT:
|
|
|
|
kvm_debug("[%d]EXCCODE_INT @ %p\n", vcpu->vcpu_id, opc);
|
2012-11-22 02:34:02 +00:00
|
|
|
|
|
|
|
++vcpu->stat.int_exits;
|
|
|
|
trace_kvm_exit(vcpu, INT_EXITS);
|
|
|
|
|
2014-06-26 19:11:34 +00:00
|
|
|
if (need_resched())
|
2012-11-22 02:34:02 +00:00
|
|
|
cond_resched();
|
|
|
|
|
|
|
|
ret = RESUME_GUEST;
|
|
|
|
break;
|
|
|
|
|
2015-12-16 23:49:33 +00:00
|
|
|
case EXCCODE_CPU:
|
|
|
|
kvm_debug("EXCCODE_CPU: @ PC: %p\n", opc);
|
2012-11-22 02:34:02 +00:00
|
|
|
|
|
|
|
++vcpu->stat.cop_unusable_exits;
|
|
|
|
trace_kvm_exit(vcpu, COP_UNUSABLE_EXITS);
|
|
|
|
ret = kvm_mips_callbacks->handle_cop_unusable(vcpu);
|
|
|
|
/* XXXKYMA: Might need to return to user space */
|
2014-06-26 19:11:34 +00:00
|
|
|
if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN)
|
2012-11-22 02:34:02 +00:00
|
|
|
ret = RESUME_HOST;
|
|
|
|
break;
|
|
|
|
|
2015-12-16 23:49:33 +00:00
|
|
|
case EXCCODE_MOD:
|
2012-11-22 02:34:02 +00:00
|
|
|
++vcpu->stat.tlbmod_exits;
|
|
|
|
trace_kvm_exit(vcpu, TLBMOD_EXITS);
|
|
|
|
ret = kvm_mips_callbacks->handle_tlb_mod(vcpu);
|
|
|
|
break;
|
|
|
|
|
2015-12-16 23:49:33 +00:00
|
|
|
case EXCCODE_TLBS:
|
2014-06-26 19:11:34 +00:00
|
|
|
kvm_debug("TLB ST fault: cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n",
|
|
|
|
cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc,
|
|
|
|
badvaddr);
|
2012-11-22 02:34:02 +00:00
|
|
|
|
|
|
|
++vcpu->stat.tlbmiss_st_exits;
|
|
|
|
trace_kvm_exit(vcpu, TLBMISS_ST_EXITS);
|
|
|
|
ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu);
|
|
|
|
break;
|
|
|
|
|
2015-12-16 23:49:33 +00:00
|
|
|
case EXCCODE_TLBL:
|
2012-11-22 02:34:02 +00:00
|
|
|
kvm_debug("TLB LD fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
|
|
|
|
cause, opc, badvaddr);
|
|
|
|
|
|
|
|
++vcpu->stat.tlbmiss_ld_exits;
|
|
|
|
trace_kvm_exit(vcpu, TLBMISS_LD_EXITS);
|
|
|
|
ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu);
|
|
|
|
break;
|
|
|
|
|
2015-12-16 23:49:33 +00:00
|
|
|
case EXCCODE_ADES:
|
2012-11-22 02:34:02 +00:00
|
|
|
++vcpu->stat.addrerr_st_exits;
|
|
|
|
trace_kvm_exit(vcpu, ADDRERR_ST_EXITS);
|
|
|
|
ret = kvm_mips_callbacks->handle_addr_err_st(vcpu);
|
|
|
|
break;
|
|
|
|
|
2015-12-16 23:49:33 +00:00
|
|
|
case EXCCODE_ADEL:
|
2012-11-22 02:34:02 +00:00
|
|
|
++vcpu->stat.addrerr_ld_exits;
|
|
|
|
trace_kvm_exit(vcpu, ADDRERR_LD_EXITS);
|
|
|
|
ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu);
|
|
|
|
break;
|
|
|
|
|
2015-12-16 23:49:33 +00:00
|
|
|
case EXCCODE_SYS:
|
2012-11-22 02:34:02 +00:00
|
|
|
++vcpu->stat.syscall_exits;
|
|
|
|
trace_kvm_exit(vcpu, SYSCALL_EXITS);
|
|
|
|
ret = kvm_mips_callbacks->handle_syscall(vcpu);
|
|
|
|
break;
|
|
|
|
|
2015-12-16 23:49:33 +00:00
|
|
|
case EXCCODE_RI:
|
2012-11-22 02:34:02 +00:00
|
|
|
++vcpu->stat.resvd_inst_exits;
|
|
|
|
trace_kvm_exit(vcpu, RESVD_INST_EXITS);
|
|
|
|
ret = kvm_mips_callbacks->handle_res_inst(vcpu);
|
|
|
|
break;
|
|
|
|
|
2015-12-16 23:49:33 +00:00
|
|
|
case EXCCODE_BP:
|
2012-11-22 02:34:02 +00:00
|
|
|
++vcpu->stat.break_inst_exits;
|
|
|
|
trace_kvm_exit(vcpu, BREAK_INST_EXITS);
|
|
|
|
ret = kvm_mips_callbacks->handle_break(vcpu);
|
|
|
|
break;
|
|
|
|
|
2015-12-16 23:49:33 +00:00
|
|
|
case EXCCODE_TR:
|
2015-02-06 16:03:57 +00:00
|
|
|
++vcpu->stat.trap_inst_exits;
|
|
|
|
trace_kvm_exit(vcpu, TRAP_INST_EXITS);
|
|
|
|
ret = kvm_mips_callbacks->handle_trap(vcpu);
|
|
|
|
break;
|
|
|
|
|
2015-12-16 23:49:33 +00:00
|
|
|
case EXCCODE_MSAFPE:
|
2015-02-06 10:56:27 +00:00
|
|
|
++vcpu->stat.msa_fpe_exits;
|
|
|
|
trace_kvm_exit(vcpu, MSA_FPE_EXITS);
|
|
|
|
ret = kvm_mips_callbacks->handle_msa_fpe(vcpu);
|
|
|
|
break;
|
|
|
|
|
2015-12-16 23:49:33 +00:00
|
|
|
case EXCCODE_FPE:
|
2015-02-06 10:56:27 +00:00
|
|
|
++vcpu->stat.fpe_exits;
|
|
|
|
trace_kvm_exit(vcpu, FPE_EXITS);
|
|
|
|
ret = kvm_mips_callbacks->handle_fpe(vcpu);
|
|
|
|
break;
|
|
|
|
|
2015-12-16 23:49:33 +00:00
|
|
|
case EXCCODE_MSADIS:
|
2015-02-06 10:56:27 +00:00
|
|
|
++vcpu->stat.msa_disabled_exits;
|
|
|
|
trace_kvm_exit(vcpu, MSA_DISABLED_EXITS);
|
2015-02-06 11:11:56 +00:00
|
|
|
ret = kvm_mips_callbacks->handle_msa_disabled(vcpu);
|
|
|
|
break;
|
|
|
|
|
2012-11-22 02:34:02 +00:00
|
|
|
default:
|
2014-06-26 19:11:34 +00:00
|
|
|
kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n",
|
|
|
|
exccode, opc, kvm_get_inst(opc, vcpu), badvaddr,
|
|
|
|
kvm_read_c0_guest_status(vcpu->arch.cop0));
|
2012-11-22 02:34:02 +00:00
|
|
|
kvm_arch_vcpu_dump_regs(vcpu);
|
|
|
|
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
|
|
|
ret = RESUME_HOST;
|
|
|
|
break;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
skip_emul:
|
|
|
|
local_irq_disable();
|
|
|
|
|
|
|
|
if (er == EMULATE_DONE && !(ret & RESUME_HOST))
|
|
|
|
kvm_mips_deliver_interrupts(vcpu, cause);
|
|
|
|
|
|
|
|
if (!(ret & RESUME_HOST)) {
|
2014-06-26 19:11:34 +00:00
|
|
|
/* Only check for signals if not already exiting to userspace */
|
2012-11-22 02:34:02 +00:00
|
|
|
if (signal_pending(current)) {
|
|
|
|
run->exit_reason = KVM_EXIT_INTR;
|
|
|
|
ret = (-EINTR << 2) | RESUME_HOST;
|
|
|
|
++vcpu->stat.signal_exits;
|
|
|
|
trace_kvm_exit(vcpu, SIGNAL_EXITS);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-11-18 14:09:12 +00:00
|
|
|
if (ret == RESUME_GUEST) {
|
|
|
|
/*
|
2015-03-05 11:43:36 +00:00
|
|
|
* If FPU / MSA are enabled (i.e. the guest's FPU / MSA context
|
|
|
|
* is live), restore FCR31 / MSACSR.
|
2014-11-18 14:09:12 +00:00
|
|
|
*
|
|
|
|
* This should be before returning to the guest exception
|
2015-03-05 11:43:36 +00:00
|
|
|
* vector, as it may well cause an [MSA] FP exception if there
|
|
|
|
* are pending exception bits unmasked. (see
|
2014-11-18 14:09:12 +00:00
|
|
|
* kvm_mips_csr_die_notifier() for how that is handled).
|
|
|
|
*/
|
|
|
|
if (kvm_mips_guest_has_fpu(&vcpu->arch) &&
|
|
|
|
read_c0_status() & ST0_CU1)
|
|
|
|
__kvm_restore_fcsr(&vcpu->arch);
|
2015-03-05 11:43:36 +00:00
|
|
|
|
|
|
|
if (kvm_mips_guest_has_msa(&vcpu->arch) &&
|
|
|
|
read_c0_config5() & MIPS_CONF5_MSAEN)
|
|
|
|
__kvm_restore_msacsr(&vcpu->arch);
|
2014-11-18 14:09:12 +00:00
|
|
|
}
|
|
|
|
|
2015-02-04 10:52:03 +00:00
|
|
|
/* Disable HTW before returning to guest or host */
|
|
|
|
htw_stop();
|
|
|
|
|
2012-11-22 02:34:02 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-11-18 14:09:12 +00:00
|
|
|
/* Enable FPU for guest and restore context */
|
|
|
|
void kvm_own_fpu(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
|
|
|
unsigned int sr, cfg5;
|
|
|
|
|
|
|
|
preempt_disable();
|
|
|
|
|
2015-03-05 11:43:36 +00:00
|
|
|
sr = kvm_read_c0_guest_status(cop0);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If MSA state is already live, it is undefined how it interacts with
|
|
|
|
* FR=0 FPU state, and we don't want to hit reserved instruction
|
|
|
|
* exceptions trying to save the MSA state later when CU=1 && FR=1, so
|
|
|
|
* play it safe and save it first.
|
|
|
|
*
|
|
|
|
* In theory we shouldn't ever hit this case since kvm_lose_fpu() should
|
|
|
|
* get called when guest CU1 is set, however we can't trust the guest
|
|
|
|
* not to clobber the status register directly via the commpage.
|
|
|
|
*/
|
|
|
|
if (cpu_has_msa && sr & ST0_CU1 && !(sr & ST0_FR) &&
|
2016-06-14 08:40:10 +00:00
|
|
|
vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
|
2015-03-05 11:43:36 +00:00
|
|
|
kvm_lose_fpu(vcpu);
|
|
|
|
|
2014-11-18 14:09:12 +00:00
|
|
|
/*
|
|
|
|
* Enable FPU for guest
|
|
|
|
* We set FR and FRE according to guest context
|
|
|
|
*/
|
|
|
|
change_c0_status(ST0_CU1 | ST0_FR, sr);
|
|
|
|
if (cpu_has_fre) {
|
|
|
|
cfg5 = kvm_read_c0_guest_config5(cop0);
|
|
|
|
change_c0_config5(MIPS_CONF5_FRE, cfg5);
|
|
|
|
}
|
|
|
|
enable_fpu_hazard();
|
|
|
|
|
|
|
|
/* If guest FPU state not active, restore it now */
|
2016-06-14 08:40:10 +00:00
|
|
|
if (!(vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) {
|
2014-11-18 14:09:12 +00:00
|
|
|
__kvm_restore_fpu(&vcpu->arch);
|
2016-06-14 08:40:10 +00:00
|
|
|
vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU;
|
2016-06-14 08:40:11 +00:00
|
|
|
trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU);
|
|
|
|
} else {
|
|
|
|
trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_FPU);
|
2014-11-18 14:09:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
preempt_enable();
|
|
|
|
}
|
|
|
|
|
2015-03-05 11:43:36 +00:00
|
|
|
#ifdef CONFIG_CPU_HAS_MSA
|
|
|
|
/* Enable MSA for guest and restore context */
|
|
|
|
void kvm_own_msa(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
|
|
|
unsigned int sr, cfg5;
|
|
|
|
|
|
|
|
preempt_disable();
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Enable FPU if enabled in guest, since we're restoring FPU context
|
|
|
|
* anyway. We set FR and FRE according to guest context.
|
|
|
|
*/
|
|
|
|
if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
|
|
|
|
sr = kvm_read_c0_guest_status(cop0);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If FR=0 FPU state is already live, it is undefined how it
|
|
|
|
* interacts with MSA state, so play it safe and save it first.
|
|
|
|
*/
|
|
|
|
if (!(sr & ST0_FR) &&
|
2016-06-14 08:40:10 +00:00
|
|
|
(vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU |
|
|
|
|
KVM_MIPS_AUX_MSA)) == KVM_MIPS_AUX_FPU)
|
2015-03-05 11:43:36 +00:00
|
|
|
kvm_lose_fpu(vcpu);
|
|
|
|
|
|
|
|
change_c0_status(ST0_CU1 | ST0_FR, sr);
|
|
|
|
if (sr & ST0_CU1 && cpu_has_fre) {
|
|
|
|
cfg5 = kvm_read_c0_guest_config5(cop0);
|
|
|
|
change_c0_config5(MIPS_CONF5_FRE, cfg5);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Enable MSA for guest */
|
|
|
|
set_c0_config5(MIPS_CONF5_MSAEN);
|
|
|
|
enable_fpu_hazard();
|
|
|
|
|
2016-06-14 08:40:10 +00:00
|
|
|
switch (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA)) {
|
|
|
|
case KVM_MIPS_AUX_FPU:
|
2015-03-05 11:43:36 +00:00
|
|
|
/*
|
|
|
|
* Guest FPU state already loaded, only restore upper MSA state
|
|
|
|
*/
|
|
|
|
__kvm_restore_msa_upper(&vcpu->arch);
|
2016-06-14 08:40:10 +00:00
|
|
|
vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA;
|
2016-06-14 08:40:11 +00:00
|
|
|
trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_MSA);
|
2015-03-05 11:43:36 +00:00
|
|
|
break;
|
|
|
|
case 0:
|
|
|
|
/* Neither FPU or MSA already active, restore full MSA state */
|
|
|
|
__kvm_restore_msa(&vcpu->arch);
|
2016-06-14 08:40:10 +00:00
|
|
|
vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA;
|
2015-03-05 11:43:36 +00:00
|
|
|
if (kvm_mips_guest_has_fpu(&vcpu->arch))
|
2016-06-14 08:40:10 +00:00
|
|
|
vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU;
|
2016-06-14 08:40:11 +00:00
|
|
|
trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE,
|
|
|
|
KVM_TRACE_AUX_FPU_MSA);
|
2015-03-05 11:43:36 +00:00
|
|
|
break;
|
|
|
|
default:
|
2016-06-14 08:40:11 +00:00
|
|
|
trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_MSA);
|
2015-03-05 11:43:36 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
preempt_enable();
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Drop FPU & MSA without saving it */
|
2014-11-18 14:09:12 +00:00
|
|
|
void kvm_drop_fpu(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
preempt_disable();
|
2016-06-14 08:40:10 +00:00
|
|
|
if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
|
2015-03-05 11:43:36 +00:00
|
|
|
disable_msa();
|
2016-06-14 08:40:11 +00:00
|
|
|
trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_MSA);
|
2016-06-14 08:40:10 +00:00
|
|
|
vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_MSA;
|
2015-03-05 11:43:36 +00:00
|
|
|
}
|
2016-06-14 08:40:10 +00:00
|
|
|
if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
|
2014-11-18 14:09:12 +00:00
|
|
|
clear_c0_status(ST0_CU1 | ST0_FR);
|
2016-06-14 08:40:11 +00:00
|
|
|
trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_FPU);
|
2016-06-14 08:40:10 +00:00
|
|
|
vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU;
|
2014-11-18 14:09:12 +00:00
|
|
|
}
|
|
|
|
preempt_enable();
|
|
|
|
}
|
|
|
|
|
2015-03-05 11:43:36 +00:00
|
|
|
/* Save and disable FPU & MSA */
|
2014-11-18 14:09:12 +00:00
|
|
|
void kvm_lose_fpu(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
/*
|
2015-03-05 11:43:36 +00:00
|
|
|
* FPU & MSA get disabled in root context (hardware) when it is disabled
|
|
|
|
* in guest context (software), but the register state in the hardware
|
|
|
|
* may still be in use. This is why we explicitly re-enable the hardware
|
2014-11-18 14:09:12 +00:00
|
|
|
* before saving.
|
|
|
|
*/
|
|
|
|
|
|
|
|
preempt_disable();
|
2016-06-14 08:40:10 +00:00
|
|
|
if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
|
2015-03-05 11:43:36 +00:00
|
|
|
set_c0_config5(MIPS_CONF5_MSAEN);
|
|
|
|
enable_fpu_hazard();
|
|
|
|
|
|
|
|
__kvm_save_msa(&vcpu->arch);
|
2016-06-14 08:40:11 +00:00
|
|
|
trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU_MSA);
|
2015-03-05 11:43:36 +00:00
|
|
|
|
|
|
|
/* Disable MSA & FPU */
|
|
|
|
disable_msa();
|
2016-06-14 08:40:10 +00:00
|
|
|
if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
|
2015-03-05 11:43:36 +00:00
|
|
|
clear_c0_status(ST0_CU1 | ST0_FR);
|
2016-04-22 09:38:49 +00:00
|
|
|
disable_fpu_hazard();
|
|
|
|
}
|
2016-06-14 08:40:10 +00:00
|
|
|
vcpu->arch.aux_inuse &= ~(KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA);
|
|
|
|
} else if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
|
2014-11-18 14:09:12 +00:00
|
|
|
set_c0_status(ST0_CU1);
|
|
|
|
enable_fpu_hazard();
|
|
|
|
|
|
|
|
__kvm_save_fpu(&vcpu->arch);
|
2016-06-14 08:40:10 +00:00
|
|
|
vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU;
|
2016-06-14 08:40:11 +00:00
|
|
|
trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU);
|
2014-11-18 14:09:12 +00:00
|
|
|
|
|
|
|
/* Disable FPU */
|
|
|
|
clear_c0_status(ST0_CU1 | ST0_FR);
|
2016-04-22 09:38:49 +00:00
|
|
|
disable_fpu_hazard();
|
2014-11-18 14:09:12 +00:00
|
|
|
}
|
|
|
|
preempt_enable();
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2015-03-05 11:43:36 +00:00
|
|
|
* Step over a specific ctc1 to FCSR and a specific ctcmsa to MSACSR which are
|
|
|
|
* used to restore guest FCSR/MSACSR state and may trigger a "harmless" FP/MSAFP
|
|
|
|
* exception if cause bits are set in the value being written.
|
2014-11-18 14:09:12 +00:00
|
|
|
*/
|
|
|
|
static int kvm_mips_csr_die_notify(struct notifier_block *self,
|
|
|
|
unsigned long cmd, void *ptr)
|
|
|
|
{
|
|
|
|
struct die_args *args = (struct die_args *)ptr;
|
|
|
|
struct pt_regs *regs = args->regs;
|
|
|
|
unsigned long pc;
|
|
|
|
|
2015-03-05 11:43:36 +00:00
|
|
|
/* Only interested in FPE and MSAFPE */
|
|
|
|
if (cmd != DIE_FP && cmd != DIE_MSAFP)
|
2014-11-18 14:09:12 +00:00
|
|
|
return NOTIFY_DONE;
|
|
|
|
|
|
|
|
/* Return immediately if guest context isn't active */
|
|
|
|
if (!(current->flags & PF_VCPU))
|
|
|
|
return NOTIFY_DONE;
|
|
|
|
|
|
|
|
/* Should never get here from user mode */
|
|
|
|
BUG_ON(user_mode(regs));
|
|
|
|
|
|
|
|
pc = instruction_pointer(regs);
|
|
|
|
switch (cmd) {
|
|
|
|
case DIE_FP:
|
|
|
|
/* match 2nd instruction in __kvm_restore_fcsr */
|
|
|
|
if (pc != (unsigned long)&__kvm_restore_fcsr + 4)
|
|
|
|
return NOTIFY_DONE;
|
|
|
|
break;
|
2015-03-05 11:43:36 +00:00
|
|
|
case DIE_MSAFP:
|
|
|
|
/* match 2nd/3rd instruction in __kvm_restore_msacsr */
|
|
|
|
if (!cpu_has_msa ||
|
|
|
|
pc < (unsigned long)&__kvm_restore_msacsr + 4 ||
|
|
|
|
pc > (unsigned long)&__kvm_restore_msacsr + 8)
|
|
|
|
return NOTIFY_DONE;
|
|
|
|
break;
|
2014-11-18 14:09:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Move PC forward a little and continue executing */
|
|
|
|
instruction_pointer(regs) += 4;
|
|
|
|
|
|
|
|
return NOTIFY_STOP;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct notifier_block kvm_mips_csr_die_notifier = {
|
|
|
|
.notifier_call = kvm_mips_csr_die_notify,
|
|
|
|
};
|
|
|
|
|
2015-12-16 23:49:32 +00:00
|
|
|
static int __init kvm_mips_init(void)
|
2012-11-22 02:34:02 +00:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2014-11-18 14:09:12 +00:00
|
|
|
register_die_notifier(&kvm_mips_csr_die_notifier);
|
|
|
|
|
2012-11-22 02:34:02 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-12-16 23:49:32 +00:00
|
|
|
static void __exit kvm_mips_exit(void)
|
2012-11-22 02:34:02 +00:00
|
|
|
{
|
|
|
|
kvm_exit();
|
|
|
|
|
2014-11-18 14:09:12 +00:00
|
|
|
unregister_die_notifier(&kvm_mips_csr_die_notifier);
|
2012-11-22 02:34:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
module_init(kvm_mips_init);
|
|
|
|
module_exit(kvm_mips_exit);
|
|
|
|
|
|
|
|
EXPORT_TRACEPOINT_SYMBOL(kvm_exit);
|