2017-11-24 14:00:33 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2014-01-23 11:26:52 +00:00
|
|
|
/*
|
|
|
|
* kvm guest debug support
|
|
|
|
*
|
|
|
|
* Copyright IBM Corp. 2014
|
|
|
|
*
|
|
|
|
* Author(s): David Hildenbrand <dahi@linux.vnet.ibm.com>
|
|
|
|
*/
|
|
|
|
#include <linux/kvm_host.h>
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include "kvm-s390.h"
|
|
|
|
#include "gaccess.h"
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Extends the address range given by *start and *stop to include the address
|
|
|
|
* range starting with estart and the length len. Takes care of overflowing
|
2016-03-04 19:20:04 +00:00
|
|
|
* intervals and tries to minimize the overall interval size.
|
2014-01-23 11:26:52 +00:00
|
|
|
*/
|
|
|
|
static void extend_address_range(u64 *start, u64 *stop, u64 estart, int len)
|
|
|
|
{
|
|
|
|
u64 estop;
|
|
|
|
|
|
|
|
if (len > 0)
|
|
|
|
len--;
|
|
|
|
else
|
|
|
|
len = 0;
|
|
|
|
|
|
|
|
estop = estart + len;
|
|
|
|
|
|
|
|
/* 0-0 range represents "not set" */
|
|
|
|
if ((*start == 0) && (*stop == 0)) {
|
|
|
|
*start = estart;
|
|
|
|
*stop = estop;
|
|
|
|
} else if (*start <= *stop) {
|
|
|
|
/* increase the existing range */
|
|
|
|
if (estart < *start)
|
|
|
|
*start = estart;
|
|
|
|
if (estop > *stop)
|
|
|
|
*stop = estop;
|
|
|
|
} else {
|
|
|
|
/* "overflowing" interval, whereby *stop > *start */
|
|
|
|
if (estart <= *stop) {
|
|
|
|
if (estop > *stop)
|
|
|
|
*stop = estop;
|
|
|
|
} else if (estop > *start) {
|
|
|
|
if (estart < *start)
|
|
|
|
*start = estart;
|
|
|
|
}
|
|
|
|
/* minimize the range */
|
|
|
|
else if ((estop - *stop) < (*start - estart))
|
|
|
|
*stop = estop;
|
|
|
|
else
|
|
|
|
*start = estart;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#define MAX_INST_SIZE 6
|
|
|
|
|
|
|
|
static void enable_all_hw_bp(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
unsigned long start, len;
|
|
|
|
u64 *cr9 = &vcpu->arch.sie_block->gcr[9];
|
|
|
|
u64 *cr10 = &vcpu->arch.sie_block->gcr[10];
|
|
|
|
u64 *cr11 = &vcpu->arch.sie_block->gcr[11];
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (vcpu->arch.guestdbg.nr_hw_bp <= 0 ||
|
|
|
|
vcpu->arch.guestdbg.hw_bp_info == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
2016-03-04 19:20:04 +00:00
|
|
|
* If the guest is not interested in branching events, we can safely
|
2014-01-23 11:26:52 +00:00
|
|
|
* limit them to the PER address range.
|
|
|
|
*/
|
|
|
|
if (!(*cr9 & PER_EVENT_BRANCH))
|
|
|
|
*cr9 |= PER_CONTROL_BRANCH_ADDRESS;
|
|
|
|
*cr9 |= PER_EVENT_IFETCH | PER_EVENT_BRANCH;
|
|
|
|
|
|
|
|
for (i = 0; i < vcpu->arch.guestdbg.nr_hw_bp; i++) {
|
|
|
|
start = vcpu->arch.guestdbg.hw_bp_info[i].addr;
|
|
|
|
len = vcpu->arch.guestdbg.hw_bp_info[i].len;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The instruction in front of the desired bp has to
|
|
|
|
* report instruction-fetching events
|
|
|
|
*/
|
|
|
|
if (start < MAX_INST_SIZE) {
|
|
|
|
len += start;
|
|
|
|
start = 0;
|
|
|
|
} else {
|
|
|
|
start -= MAX_INST_SIZE;
|
|
|
|
len += MAX_INST_SIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
extend_address_range(cr10, cr11, start, len);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void enable_all_hw_wp(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
unsigned long start, len;
|
|
|
|
u64 *cr9 = &vcpu->arch.sie_block->gcr[9];
|
|
|
|
u64 *cr10 = &vcpu->arch.sie_block->gcr[10];
|
|
|
|
u64 *cr11 = &vcpu->arch.sie_block->gcr[11];
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (vcpu->arch.guestdbg.nr_hw_wp <= 0 ||
|
|
|
|
vcpu->arch.guestdbg.hw_wp_info == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* if host uses storage alternation for special address
|
|
|
|
* spaces, enable all events and give all to the guest */
|
|
|
|
if (*cr9 & PER_EVENT_STORE && *cr9 & PER_CONTROL_ALTERATION) {
|
|
|
|
*cr9 &= ~PER_CONTROL_ALTERATION;
|
|
|
|
*cr10 = 0;
|
2016-01-18 12:12:19 +00:00
|
|
|
*cr11 = -1UL;
|
2014-01-23 11:26:52 +00:00
|
|
|
} else {
|
|
|
|
*cr9 &= ~PER_CONTROL_ALTERATION;
|
|
|
|
*cr9 |= PER_EVENT_STORE;
|
|
|
|
|
|
|
|
for (i = 0; i < vcpu->arch.guestdbg.nr_hw_wp; i++) {
|
|
|
|
start = vcpu->arch.guestdbg.hw_wp_info[i].addr;
|
|
|
|
len = vcpu->arch.guestdbg.hw_wp_info[i].len;
|
|
|
|
|
|
|
|
extend_address_range(cr10, cr11, start, len);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void kvm_s390_backup_guest_per_regs(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
vcpu->arch.guestdbg.cr0 = vcpu->arch.sie_block->gcr[0];
|
|
|
|
vcpu->arch.guestdbg.cr9 = vcpu->arch.sie_block->gcr[9];
|
|
|
|
vcpu->arch.guestdbg.cr10 = vcpu->arch.sie_block->gcr[10];
|
|
|
|
vcpu->arch.guestdbg.cr11 = vcpu->arch.sie_block->gcr[11];
|
|
|
|
}
|
|
|
|
|
|
|
|
void kvm_s390_restore_guest_per_regs(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
vcpu->arch.sie_block->gcr[0] = vcpu->arch.guestdbg.cr0;
|
|
|
|
vcpu->arch.sie_block->gcr[9] = vcpu->arch.guestdbg.cr9;
|
|
|
|
vcpu->arch.sie_block->gcr[10] = vcpu->arch.guestdbg.cr10;
|
|
|
|
vcpu->arch.sie_block->gcr[11] = vcpu->arch.guestdbg.cr11;
|
|
|
|
}
|
|
|
|
|
|
|
|
void kvm_s390_patch_guest_per_regs(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* TODO: if guest psw has per enabled, otherwise 0s!
|
|
|
|
* This reduces the amount of reported events.
|
|
|
|
* Need to intercept all psw changes!
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (guestdbg_sstep_enabled(vcpu)) {
|
2014-03-18 09:06:14 +00:00
|
|
|
/* disable timer (clock-comparator) interrupts */
|
2018-04-30 15:55:24 +00:00
|
|
|
vcpu->arch.sie_block->gcr[0] &= ~CR0_CLOCK_COMPARATOR_SUBMASK;
|
2014-01-23 11:26:52 +00:00
|
|
|
vcpu->arch.sie_block->gcr[9] |= PER_EVENT_IFETCH;
|
|
|
|
vcpu->arch.sie_block->gcr[10] = 0;
|
2016-01-18 12:12:19 +00:00
|
|
|
vcpu->arch.sie_block->gcr[11] = -1UL;
|
2014-01-23 11:26:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (guestdbg_hw_bp_enabled(vcpu)) {
|
|
|
|
enable_all_hw_bp(vcpu);
|
|
|
|
enable_all_hw_wp(vcpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* TODO: Instruction-fetching-nullification not allowed for now */
|
|
|
|
if (vcpu->arch.sie_block->gcr[9] & PER_EVENT_NULLIFICATION)
|
|
|
|
vcpu->arch.sie_block->gcr[9] &= ~PER_EVENT_NULLIFICATION;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define MAX_WP_SIZE 100
|
|
|
|
|
|
|
|
static int __import_wp_info(struct kvm_vcpu *vcpu,
|
|
|
|
struct kvm_hw_breakpoint *bp_data,
|
|
|
|
struct kvm_hw_wp_info_arch *wp_info)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
wp_info->len = bp_data->len;
|
|
|
|
wp_info->addr = bp_data->addr;
|
|
|
|
wp_info->phys_addr = bp_data->phys_addr;
|
|
|
|
wp_info->old_data = NULL;
|
|
|
|
|
|
|
|
if (wp_info->len < 0 || wp_info->len > MAX_WP_SIZE)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2020-11-06 07:34:23 +00:00
|
|
|
wp_info->old_data = kmalloc(bp_data->len, GFP_KERNEL_ACCOUNT);
|
2014-01-23 11:26:52 +00:00
|
|
|
if (!wp_info->old_data)
|
|
|
|
return -ENOMEM;
|
|
|
|
/* try to backup the original value */
|
2015-03-03 16:05:43 +00:00
|
|
|
ret = read_guest_abs(vcpu, wp_info->phys_addr, wp_info->old_data,
|
|
|
|
wp_info->len);
|
2014-01-23 11:26:52 +00:00
|
|
|
if (ret) {
|
|
|
|
kfree(wp_info->old_data);
|
|
|
|
wp_info->old_data = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define MAX_BP_COUNT 50
|
|
|
|
|
|
|
|
int kvm_s390_import_bp_data(struct kvm_vcpu *vcpu,
|
|
|
|
struct kvm_guest_debug *dbg)
|
|
|
|
{
|
2016-08-24 17:45:23 +00:00
|
|
|
int ret = 0, nr_wp = 0, nr_bp = 0, i;
|
2014-01-23 11:26:52 +00:00
|
|
|
struct kvm_hw_breakpoint *bp_data = NULL;
|
|
|
|
struct kvm_hw_wp_info_arch *wp_info = NULL;
|
|
|
|
struct kvm_hw_bp_info_arch *bp_info = NULL;
|
|
|
|
|
|
|
|
if (dbg->arch.nr_hw_bp <= 0 || !dbg->arch.hw_bp)
|
|
|
|
return 0;
|
|
|
|
else if (dbg->arch.nr_hw_bp > MAX_BP_COUNT)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2016-08-24 18:10:09 +00:00
|
|
|
bp_data = memdup_user(dbg->arch.hw_bp,
|
|
|
|
sizeof(*bp_data) * dbg->arch.nr_hw_bp);
|
|
|
|
if (IS_ERR(bp_data))
|
|
|
|
return PTR_ERR(bp_data);
|
2014-01-23 11:26:52 +00:00
|
|
|
|
|
|
|
for (i = 0; i < dbg->arch.nr_hw_bp; i++) {
|
|
|
|
switch (bp_data[i].type) {
|
|
|
|
case KVM_HW_WP_WRITE:
|
|
|
|
nr_wp++;
|
|
|
|
break;
|
|
|
|
case KVM_HW_BP:
|
|
|
|
nr_bp++;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-24 17:45:23 +00:00
|
|
|
if (nr_wp > 0) {
|
|
|
|
wp_info = kmalloc_array(nr_wp,
|
|
|
|
sizeof(*wp_info),
|
2020-11-06 07:34:23 +00:00
|
|
|
GFP_KERNEL_ACCOUNT);
|
2014-01-23 11:26:52 +00:00
|
|
|
if (!wp_info) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
}
|
2016-08-24 17:45:23 +00:00
|
|
|
if (nr_bp > 0) {
|
|
|
|
bp_info = kmalloc_array(nr_bp,
|
|
|
|
sizeof(*bp_info),
|
2020-11-06 07:34:23 +00:00
|
|
|
GFP_KERNEL_ACCOUNT);
|
2014-01-23 11:26:52 +00:00
|
|
|
if (!bp_info) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (nr_wp = 0, nr_bp = 0, i = 0; i < dbg->arch.nr_hw_bp; i++) {
|
|
|
|
switch (bp_data[i].type) {
|
|
|
|
case KVM_HW_WP_WRITE:
|
|
|
|
ret = __import_wp_info(vcpu, &bp_data[i],
|
|
|
|
&wp_info[nr_wp]);
|
|
|
|
if (ret)
|
|
|
|
goto error;
|
|
|
|
nr_wp++;
|
|
|
|
break;
|
|
|
|
case KVM_HW_BP:
|
|
|
|
bp_info[nr_bp].len = bp_data[i].len;
|
|
|
|
bp_info[nr_bp].addr = bp_data[i].addr;
|
|
|
|
nr_bp++;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
vcpu->arch.guestdbg.nr_hw_bp = nr_bp;
|
|
|
|
vcpu->arch.guestdbg.hw_bp_info = bp_info;
|
|
|
|
vcpu->arch.guestdbg.nr_hw_wp = nr_wp;
|
|
|
|
vcpu->arch.guestdbg.hw_wp_info = wp_info;
|
|
|
|
return 0;
|
|
|
|
error:
|
|
|
|
kfree(bp_data);
|
|
|
|
kfree(wp_info);
|
|
|
|
kfree(bp_info);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
void kvm_s390_clear_bp_data(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
struct kvm_hw_wp_info_arch *hw_wp_info = NULL;
|
|
|
|
|
|
|
|
for (i = 0; i < vcpu->arch.guestdbg.nr_hw_wp; i++) {
|
|
|
|
hw_wp_info = &vcpu->arch.guestdbg.hw_wp_info[i];
|
|
|
|
kfree(hw_wp_info->old_data);
|
|
|
|
hw_wp_info->old_data = NULL;
|
|
|
|
}
|
|
|
|
kfree(vcpu->arch.guestdbg.hw_wp_info);
|
|
|
|
vcpu->arch.guestdbg.hw_wp_info = NULL;
|
|
|
|
|
|
|
|
kfree(vcpu->arch.guestdbg.hw_bp_info);
|
|
|
|
vcpu->arch.guestdbg.hw_bp_info = NULL;
|
|
|
|
|
|
|
|
vcpu->arch.guestdbg.nr_hw_wp = 0;
|
|
|
|
vcpu->arch.guestdbg.nr_hw_bp = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int in_addr_range(u64 addr, u64 a, u64 b)
|
|
|
|
{
|
|
|
|
if (a <= b)
|
|
|
|
return (addr >= a) && (addr <= b);
|
|
|
|
else
|
|
|
|
/* "overflowing" interval */
|
2017-08-30 16:06:00 +00:00
|
|
|
return (addr >= a) || (addr <= b);
|
2014-01-23 11:26:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#define end_of_range(bp_info) (bp_info->addr + bp_info->len - 1)
|
|
|
|
|
|
|
|
static struct kvm_hw_bp_info_arch *find_hw_bp(struct kvm_vcpu *vcpu,
|
|
|
|
unsigned long addr)
|
|
|
|
{
|
|
|
|
struct kvm_hw_bp_info_arch *bp_info = vcpu->arch.guestdbg.hw_bp_info;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (vcpu->arch.guestdbg.nr_hw_bp == 0)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
for (i = 0; i < vcpu->arch.guestdbg.nr_hw_bp; i++) {
|
|
|
|
/* addr is directly the start or in the range of a bp */
|
|
|
|
if (addr == bp_info->addr)
|
|
|
|
goto found;
|
|
|
|
if (bp_info->len > 0 &&
|
|
|
|
in_addr_range(addr, bp_info->addr, end_of_range(bp_info)))
|
|
|
|
goto found;
|
|
|
|
|
|
|
|
bp_info++;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
found:
|
|
|
|
return bp_info;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct kvm_hw_wp_info_arch *any_wp_changed(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
struct kvm_hw_wp_info_arch *wp_info = NULL;
|
|
|
|
void *temp = NULL;
|
|
|
|
|
|
|
|
if (vcpu->arch.guestdbg.nr_hw_wp == 0)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
for (i = 0; i < vcpu->arch.guestdbg.nr_hw_wp; i++) {
|
|
|
|
wp_info = &vcpu->arch.guestdbg.hw_wp_info[i];
|
|
|
|
if (!wp_info || !wp_info->old_data || wp_info->len <= 0)
|
|
|
|
continue;
|
|
|
|
|
2020-11-06 07:34:23 +00:00
|
|
|
temp = kmalloc(wp_info->len, GFP_KERNEL_ACCOUNT);
|
2014-01-23 11:26:52 +00:00
|
|
|
if (!temp)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* refetch the wp data and compare it to the old value */
|
2015-03-03 16:05:43 +00:00
|
|
|
if (!read_guest_abs(vcpu, wp_info->phys_addr, temp,
|
|
|
|
wp_info->len)) {
|
2014-01-23 11:26:52 +00:00
|
|
|
if (memcmp(temp, wp_info->old_data, wp_info->len)) {
|
|
|
|
kfree(temp);
|
|
|
|
return wp_info;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
kfree(temp);
|
|
|
|
temp = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
void kvm_s390_prepare_debug_exit(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
vcpu->run->exit_reason = KVM_EXIT_DEBUG;
|
|
|
|
vcpu->guest_debug &= ~KVM_GUESTDBG_EXIT_PENDING;
|
|
|
|
}
|
|
|
|
|
2016-05-27 13:24:33 +00:00
|
|
|
#define PER_CODE_MASK (PER_EVENT_MASK >> 24)
|
|
|
|
#define PER_CODE_BRANCH (PER_EVENT_BRANCH >> 24)
|
|
|
|
#define PER_CODE_IFETCH (PER_EVENT_IFETCH >> 24)
|
|
|
|
#define PER_CODE_STORE (PER_EVENT_STORE >> 24)
|
|
|
|
#define PER_CODE_STORE_REAL (PER_EVENT_STORE_REAL >> 24)
|
|
|
|
|
2014-01-23 11:26:52 +00:00
|
|
|
#define per_bp_event(code) \
|
2016-05-27 13:24:33 +00:00
|
|
|
(code & (PER_CODE_IFETCH | PER_CODE_BRANCH))
|
2014-01-23 11:26:52 +00:00
|
|
|
#define per_write_wp_event(code) \
|
2016-05-27 13:24:33 +00:00
|
|
|
(code & (PER_CODE_STORE | PER_CODE_STORE_REAL))
|
2014-01-23 11:26:52 +00:00
|
|
|
|
2016-05-24 10:33:52 +00:00
|
|
|
static int debug_exit_required(struct kvm_vcpu *vcpu, u8 perc,
|
|
|
|
unsigned long peraddr)
|
2014-01-23 11:26:52 +00:00
|
|
|
{
|
|
|
|
struct kvm_debug_exit_arch *debug_exit = &vcpu->run->debug.arch;
|
|
|
|
struct kvm_hw_wp_info_arch *wp_info = NULL;
|
|
|
|
struct kvm_hw_bp_info_arch *bp_info = NULL;
|
|
|
|
unsigned long addr = vcpu->arch.sie_block->gpsw.addr;
|
|
|
|
|
|
|
|
if (guestdbg_hw_bp_enabled(vcpu)) {
|
|
|
|
if (per_write_wp_event(perc) &&
|
|
|
|
vcpu->arch.guestdbg.nr_hw_wp > 0) {
|
|
|
|
wp_info = any_wp_changed(vcpu);
|
|
|
|
if (wp_info) {
|
|
|
|
debug_exit->addr = wp_info->addr;
|
|
|
|
debug_exit->type = KVM_HW_WP_WRITE;
|
|
|
|
goto exit_required;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (per_bp_event(perc) &&
|
|
|
|
vcpu->arch.guestdbg.nr_hw_bp > 0) {
|
|
|
|
bp_info = find_hw_bp(vcpu, addr);
|
|
|
|
/* remove duplicate events if PC==PER address */
|
|
|
|
if (bp_info && (addr != peraddr)) {
|
|
|
|
debug_exit->addr = addr;
|
|
|
|
debug_exit->type = KVM_HW_BP;
|
|
|
|
vcpu->arch.guestdbg.last_bp = addr;
|
|
|
|
goto exit_required;
|
|
|
|
}
|
|
|
|
/* breakpoint missed */
|
|
|
|
bp_info = find_hw_bp(vcpu, peraddr);
|
|
|
|
if (bp_info && vcpu->arch.guestdbg.last_bp != peraddr) {
|
|
|
|
debug_exit->addr = peraddr;
|
|
|
|
debug_exit->type = KVM_HW_BP;
|
|
|
|
goto exit_required;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (guestdbg_sstep_enabled(vcpu) && per_bp_event(perc)) {
|
|
|
|
debug_exit->addr = addr;
|
|
|
|
debug_exit->type = KVM_SINGLESTEP;
|
|
|
|
goto exit_required;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
exit_required:
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2016-05-24 10:40:11 +00:00
|
|
|
static int per_fetched_addr(struct kvm_vcpu *vcpu, unsigned long *addr)
|
|
|
|
{
|
|
|
|
u8 exec_ilen = 0;
|
|
|
|
u16 opcode[3];
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
if (vcpu->arch.sie_block->icptcode == ICPT_PROGI) {
|
|
|
|
/* PER address references the fetched or the execute instr */
|
|
|
|
*addr = vcpu->arch.sie_block->peraddr;
|
|
|
|
/*
|
|
|
|
* Manually detect if we have an EXECUTE instruction. As
|
|
|
|
* instructions are always 2 byte aligned we can read the
|
|
|
|
* first two bytes unconditionally
|
|
|
|
*/
|
|
|
|
rc = read_guest_instr(vcpu, *addr, &opcode, 2);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
if (opcode[0] >> 8 == 0x44)
|
|
|
|
exec_ilen = 4;
|
|
|
|
if ((opcode[0] & 0xff0f) == 0xc600)
|
|
|
|
exec_ilen = 6;
|
|
|
|
} else {
|
|
|
|
/* instr was suppressed, calculate the responsible instr */
|
|
|
|
*addr = __rewind_psw(vcpu->arch.sie_block->gpsw,
|
|
|
|
kvm_s390_get_ilen(vcpu));
|
|
|
|
if (vcpu->arch.sie_block->icptstatus & 0x01) {
|
|
|
|
exec_ilen = (vcpu->arch.sie_block->icptstatus & 0x60) >> 4;
|
|
|
|
if (!exec_ilen)
|
|
|
|
exec_ilen = 4;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (exec_ilen) {
|
|
|
|
/* read the complete EXECUTE instr to detect the fetched addr */
|
|
|
|
rc = read_guest_instr(vcpu, *addr, &opcode, exec_ilen);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
if (exec_ilen == 6) {
|
|
|
|
/* EXECUTE RELATIVE LONG - RIL-b format */
|
|
|
|
s32 rl = *((s32 *) (opcode + 1));
|
|
|
|
|
|
|
|
/* rl is a _signed_ 32 bit value specifying halfwords */
|
|
|
|
*addr += (u64)(s64) rl * 2;
|
|
|
|
} else {
|
|
|
|
/* EXECUTE - RX-a format */
|
|
|
|
u32 base = (opcode[1] & 0xf000) >> 12;
|
|
|
|
u32 disp = opcode[1] & 0x0fff;
|
|
|
|
u32 index = opcode[0] & 0x000f;
|
|
|
|
|
|
|
|
*addr = base ? vcpu->run->s.regs.gprs[base] : 0;
|
|
|
|
*addr += index ? vcpu->run->s.regs.gprs[index] : 0;
|
|
|
|
*addr += disp;
|
|
|
|
}
|
|
|
|
*addr = kvm_s390_logical_to_effective(vcpu, *addr);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-01-23 11:26:52 +00:00
|
|
|
#define guest_per_enabled(vcpu) \
|
|
|
|
(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER)
|
|
|
|
|
2016-05-24 10:10:27 +00:00
|
|
|
int kvm_s390_handle_per_ifetch_icpt(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
2016-05-24 10:33:52 +00:00
|
|
|
const u64 cr10 = vcpu->arch.sie_block->gcr[10];
|
|
|
|
const u64 cr11 = vcpu->arch.sie_block->gcr[11];
|
2016-05-24 10:10:27 +00:00
|
|
|
const u8 ilen = kvm_s390_get_ilen(vcpu);
|
|
|
|
struct kvm_s390_pgm_info pgm_info = {
|
|
|
|
.code = PGM_PER,
|
2016-05-27 13:24:33 +00:00
|
|
|
.per_code = PER_CODE_IFETCH,
|
2016-05-24 10:10:27 +00:00
|
|
|
.per_address = __rewind_psw(vcpu->arch.sie_block->gpsw, ilen),
|
|
|
|
};
|
2016-05-24 10:40:11 +00:00
|
|
|
unsigned long fetched_addr;
|
|
|
|
int rc;
|
2016-05-24 10:10:27 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The PSW points to the next instruction, therefore the intercepted
|
|
|
|
* instruction generated a PER i-fetch event. PER address therefore
|
|
|
|
* points at the previous PSW address (could be an EXECUTE function).
|
|
|
|
*/
|
2016-05-24 10:33:52 +00:00
|
|
|
if (!guestdbg_enabled(vcpu))
|
|
|
|
return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
|
|
|
|
|
|
|
|
if (debug_exit_required(vcpu, pgm_info.per_code, pgm_info.per_address))
|
|
|
|
vcpu->guest_debug |= KVM_GUESTDBG_EXIT_PENDING;
|
|
|
|
|
|
|
|
if (!guest_per_enabled(vcpu) ||
|
|
|
|
!(vcpu->arch.sie_block->gcr[9] & PER_EVENT_IFETCH))
|
|
|
|
return 0;
|
|
|
|
|
2016-05-24 10:40:11 +00:00
|
|
|
rc = per_fetched_addr(vcpu, &fetched_addr);
|
|
|
|
if (rc < 0)
|
|
|
|
return rc;
|
|
|
|
if (rc)
|
|
|
|
/* instruction-fetching exceptions */
|
|
|
|
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
|
|
|
|
|
|
|
|
if (in_addr_range(fetched_addr, cr10, cr11))
|
2016-05-24 10:33:52 +00:00
|
|
|
return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
|
|
|
|
return 0;
|
2016-05-24 10:10:27 +00:00
|
|
|
}
|
|
|
|
|
2016-05-24 10:40:11 +00:00
|
|
|
static int filter_guest_per_event(struct kvm_vcpu *vcpu)
|
2014-01-23 11:26:52 +00:00
|
|
|
{
|
2016-05-27 13:24:33 +00:00
|
|
|
const u8 perc = vcpu->arch.sie_block->perc;
|
2014-01-23 11:26:52 +00:00
|
|
|
u64 addr = vcpu->arch.sie_block->gpsw.addr;
|
|
|
|
u64 cr9 = vcpu->arch.sie_block->gcr[9];
|
|
|
|
u64 cr10 = vcpu->arch.sie_block->gcr[10];
|
|
|
|
u64 cr11 = vcpu->arch.sie_block->gcr[11];
|
|
|
|
/* filter all events, demanded by the guest */
|
2016-05-27 13:24:33 +00:00
|
|
|
u8 guest_perc = perc & (cr9 >> 24) & PER_CODE_MASK;
|
2016-05-24 10:40:11 +00:00
|
|
|
unsigned long fetched_addr;
|
|
|
|
int rc;
|
2014-01-23 11:26:52 +00:00
|
|
|
|
|
|
|
if (!guest_per_enabled(vcpu))
|
|
|
|
guest_perc = 0;
|
|
|
|
|
|
|
|
/* filter "successful-branching" events */
|
2016-05-27 13:24:33 +00:00
|
|
|
if (guest_perc & PER_CODE_BRANCH &&
|
2014-01-23 11:26:52 +00:00
|
|
|
cr9 & PER_CONTROL_BRANCH_ADDRESS &&
|
|
|
|
!in_addr_range(addr, cr10, cr11))
|
2016-05-27 13:24:33 +00:00
|
|
|
guest_perc &= ~PER_CODE_BRANCH;
|
2014-01-23 11:26:52 +00:00
|
|
|
|
|
|
|
/* filter "instruction-fetching" events */
|
2016-05-24 10:40:11 +00:00
|
|
|
if (guest_perc & PER_CODE_IFETCH) {
|
|
|
|
rc = per_fetched_addr(vcpu, &fetched_addr);
|
|
|
|
if (rc < 0)
|
|
|
|
return rc;
|
|
|
|
/*
|
|
|
|
* Don't inject an irq on exceptions. This would make handling
|
|
|
|
* on icpt code 8 very complex (as PSW was already rewound).
|
|
|
|
*/
|
|
|
|
if (rc || !in_addr_range(fetched_addr, cr10, cr11))
|
|
|
|
guest_perc &= ~PER_CODE_IFETCH;
|
|
|
|
}
|
2014-01-23 11:26:52 +00:00
|
|
|
|
|
|
|
/* All other PER events will be given to the guest */
|
2016-05-21 12:08:55 +00:00
|
|
|
/* TODO: Check altered address/address space */
|
2014-01-23 11:26:52 +00:00
|
|
|
|
2016-05-27 13:24:33 +00:00
|
|
|
vcpu->arch.sie_block->perc = guest_perc;
|
2014-01-23 11:26:52 +00:00
|
|
|
|
|
|
|
if (!guest_perc)
|
|
|
|
vcpu->arch.sie_block->iprcc &= ~PGM_PER;
|
2016-05-24 10:40:11 +00:00
|
|
|
return 0;
|
2014-01-23 11:26:52 +00:00
|
|
|
}
|
|
|
|
|
KVM: s390: filter space-switch events when PER is enforced
When guest debugging is active, space-switch events might be enforced
by PER. While the PER events are correctly filtered out,
space-switch-events could be forwarded to the guest, although from a
guest point of view, they should not have been reported.
Therefore we have to filter out space-switch events being concurrently
reported with a PER event, if the PER event got filtered out. To do so,
we theoretically have to know which instruction was responsible for the
event. As the applicable instructions modify the PSW address, the
address space set in the PSW and even the address space in cr1, we
can't figure out the instruction that way.
For this reason, we have to rely on the information about the old and
new address space, in order to guess the responsible instruction type
and do appropriate checks for space-switch events.
Signed-off-by: David Hildenbrand <dahi@linux.vnet.ibm.com>
Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
2015-06-23 20:49:36 +00:00
|
|
|
#define pssec(vcpu) (vcpu->arch.sie_block->gcr[1] & _ASCE_SPACE_SWITCH)
|
|
|
|
#define hssec(vcpu) (vcpu->arch.sie_block->gcr[13] & _ASCE_SPACE_SWITCH)
|
|
|
|
#define old_ssec(vcpu) ((vcpu->arch.sie_block->tecmc >> 31) & 0x1)
|
|
|
|
#define old_as_is_home(vcpu) !(vcpu->arch.sie_block->tecmc & 0xffff)
|
|
|
|
|
2016-05-24 10:40:11 +00:00
|
|
|
int kvm_s390_handle_per_event(struct kvm_vcpu *vcpu)
|
2014-01-23 11:26:52 +00:00
|
|
|
{
|
2016-05-24 10:40:11 +00:00
|
|
|
int rc, new_as;
|
KVM: s390: filter space-switch events when PER is enforced
When guest debugging is active, space-switch events might be enforced
by PER. While the PER events are correctly filtered out,
space-switch-events could be forwarded to the guest, although from a
guest point of view, they should not have been reported.
Therefore we have to filter out space-switch events being concurrently
reported with a PER event, if the PER event got filtered out. To do so,
we theoretically have to know which instruction was responsible for the
event. As the applicable instructions modify the PSW address, the
address space set in the PSW and even the address space in cr1, we
can't figure out the instruction that way.
For this reason, we have to rely on the information about the old and
new address space, in order to guess the responsible instruction type
and do appropriate checks for space-switch events.
Signed-off-by: David Hildenbrand <dahi@linux.vnet.ibm.com>
Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
2015-06-23 20:49:36 +00:00
|
|
|
|
2016-05-24 10:33:52 +00:00
|
|
|
if (debug_exit_required(vcpu, vcpu->arch.sie_block->perc,
|
|
|
|
vcpu->arch.sie_block->peraddr))
|
2014-01-23 11:26:52 +00:00
|
|
|
vcpu->guest_debug |= KVM_GUESTDBG_EXIT_PENDING;
|
|
|
|
|
2016-05-24 10:40:11 +00:00
|
|
|
rc = filter_guest_per_event(vcpu);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
KVM: s390: filter space-switch events when PER is enforced
When guest debugging is active, space-switch events might be enforced
by PER. While the PER events are correctly filtered out,
space-switch-events could be forwarded to the guest, although from a
guest point of view, they should not have been reported.
Therefore we have to filter out space-switch events being concurrently
reported with a PER event, if the PER event got filtered out. To do so,
we theoretically have to know which instruction was responsible for the
event. As the applicable instructions modify the PSW address, the
address space set in the PSW and even the address space in cr1, we
can't figure out the instruction that way.
For this reason, we have to rely on the information about the old and
new address space, in order to guess the responsible instruction type
and do appropriate checks for space-switch events.
Signed-off-by: David Hildenbrand <dahi@linux.vnet.ibm.com>
Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
2015-06-23 20:49:36 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Only RP, SAC, SACF, PT, PTI, PR, PC instructions can trigger
|
|
|
|
* a space-switch event. PER events enforce space-switch events
|
|
|
|
* for these instructions. So if no PER event for the guest is left,
|
|
|
|
* we might have to filter the space-switch element out, too.
|
|
|
|
*/
|
|
|
|
if (vcpu->arch.sie_block->iprcc == PGM_SPACE_SWITCH) {
|
|
|
|
vcpu->arch.sie_block->iprcc = 0;
|
|
|
|
new_as = psw_bits(vcpu->arch.sie_block->gpsw).as;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the AS changed from / to home, we had RP, SAC or SACF
|
|
|
|
* instruction. Check primary and home space-switch-event
|
|
|
|
* controls. (theoretically home -> home produced no event)
|
|
|
|
*/
|
2017-06-03 08:19:55 +00:00
|
|
|
if (((new_as == PSW_BITS_AS_HOME) ^ old_as_is_home(vcpu)) &&
|
|
|
|
(pssec(vcpu) || hssec(vcpu)))
|
KVM: s390: filter space-switch events when PER is enforced
When guest debugging is active, space-switch events might be enforced
by PER. While the PER events are correctly filtered out,
space-switch-events could be forwarded to the guest, although from a
guest point of view, they should not have been reported.
Therefore we have to filter out space-switch events being concurrently
reported with a PER event, if the PER event got filtered out. To do so,
we theoretically have to know which instruction was responsible for the
event. As the applicable instructions modify the PSW address, the
address space set in the PSW and even the address space in cr1, we
can't figure out the instruction that way.
For this reason, we have to rely on the information about the old and
new address space, in order to guess the responsible instruction type
and do appropriate checks for space-switch events.
Signed-off-by: David Hildenbrand <dahi@linux.vnet.ibm.com>
Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
2015-06-23 20:49:36 +00:00
|
|
|
vcpu->arch.sie_block->iprcc = PGM_SPACE_SWITCH;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* PT, PTI, PR, PC instruction operate on primary AS only. Check
|
|
|
|
* if the primary-space-switch-event control was or got set.
|
|
|
|
*/
|
2017-06-03 08:19:55 +00:00
|
|
|
if (new_as == PSW_BITS_AS_PRIMARY && !old_as_is_home(vcpu) &&
|
KVM: s390: filter space-switch events when PER is enforced
When guest debugging is active, space-switch events might be enforced
by PER. While the PER events are correctly filtered out,
space-switch-events could be forwarded to the guest, although from a
guest point of view, they should not have been reported.
Therefore we have to filter out space-switch events being concurrently
reported with a PER event, if the PER event got filtered out. To do so,
we theoretically have to know which instruction was responsible for the
event. As the applicable instructions modify the PSW address, the
address space set in the PSW and even the address space in cr1, we
can't figure out the instruction that way.
For this reason, we have to rely on the information about the old and
new address space, in order to guess the responsible instruction type
and do appropriate checks for space-switch events.
Signed-off-by: David Hildenbrand <dahi@linux.vnet.ibm.com>
Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
2015-06-23 20:49:36 +00:00
|
|
|
(pssec(vcpu) || old_ssec(vcpu)))
|
|
|
|
vcpu->arch.sie_block->iprcc = PGM_SPACE_SWITCH;
|
|
|
|
}
|
2016-05-24 10:40:11 +00:00
|
|
|
return 0;
|
2014-01-23 11:26:52 +00:00
|
|
|
}
|