mirror of
https://github.com/torvalds/linux.git
synced 2024-12-20 18:11:47 +00:00
KVM: s390: Fixes and features for 4.11 (via kvm/next)
- enable some simd extensions for guests - enable nx for guests - debug log for cpu model - PER fixes - remove bitwise annotation from ar_t - detect guests in operation exception program check loops - fix potential null-pointer dereference for ucontrol guests - also contains merge for fix that went into 4.10 to avoid conflicts -----BEGIN PGP SIGNATURE----- Version: GnuPG v2.0.14 (GNU/Linux) iQIcBAABAgAGBQJYmGFiAAoJEBF7vIC1phx8wzYP/2xrpknbKSLhG7Vnn6R7Mmkq joUl4bfqKoYDVkJ2yYLkN3WfCyIx/MA7t406EYjt/6INQcGWOWR4dG8/LU4sw5xk 1EXckD2YIR1iGIasTCsyDjCpABqsldIUofaKPsNeWFtnnKfR7EK9FJowFOkdRAwk BUaBG5drayOnLySo02E7BrN3EAqkIuDdZinpM8e25h6nU9dLjS5o8nxX5iIIItgZ VyHDTfWAGxWMqC5s4MnKsxC01NFA8JJa1KQful199D1jZ2nsC66OobNPr3vpaLFS Nbolls9AF6jKDLPSJQopkkWcr3BuFFYwZSYsNYDRmuUc2Ellvuf0Ug/X2yQEN4lx VnCRo9mDNRIryWVg1h003EQSVT7rgi3pBH+T7U+N9JPwdN7RgkDOvOgbMjWO0I3m glZhJ/l0MIfEfgtam6cu3/k5r/ZKPDoE+kGXZMvOJ4MLI534ErD9yVgEarPYzEM4 fWnOuznUHRUKARhf6zR3DCIyp39UwD+QAfoTPgyvvnUFjWaPaWsuktZ4P4e1KPTT XDfPTqQQJScBQtwphHYVvDGyfPRv6/taQXFyQAu95O31b8OBeQvlunyivBo7E+dr ocL0f7pqjKkaw/0qICyLbUL0rBHGNuf3E2ODyyDl3HEI3+NL8c42+M8KaMps4cas QyhSoeENYV9Kw6UpOBn6 =0x3e -----END PGP SIGNATURE----- Merge tag 'kvm-s390-next-4.11-2' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into HEAD KVM: s390: Fixes and features for 4.11 (via kvm/next) - enable some simd extensions for guests - enable nx for guests - debug log for cpu model - PER fixes - remove bitwise annotation from ar_t - detect guests in operation exception program check loops - fix potential null-pointer dereference for ucontrol guests - also contains merge for fix that went into 4.10 to avoid conflicts
This commit is contained in:
commit
8f00067a0d
@ -373,7 +373,7 @@ void ipte_unlock(struct kvm_vcpu *vcpu)
|
||||
ipte_unlock_simple(vcpu);
|
||||
}
|
||||
|
||||
static int ar_translation(struct kvm_vcpu *vcpu, union asce *asce, ar_t ar,
|
||||
static int ar_translation(struct kvm_vcpu *vcpu, union asce *asce, u8 ar,
|
||||
enum gacc_mode mode)
|
||||
{
|
||||
union alet alet;
|
||||
@ -465,7 +465,9 @@ static int ar_translation(struct kvm_vcpu *vcpu, union asce *asce, ar_t ar,
|
||||
struct trans_exc_code_bits {
|
||||
unsigned long addr : 52; /* Translation-exception Address */
|
||||
unsigned long fsi : 2; /* Access Exception Fetch/Store Indication */
|
||||
unsigned long : 6;
|
||||
unsigned long : 2;
|
||||
unsigned long b56 : 1;
|
||||
unsigned long : 3;
|
||||
unsigned long b60 : 1;
|
||||
unsigned long b61 : 1;
|
||||
unsigned long as : 2; /* ASCE Identifier */
|
||||
@ -485,7 +487,7 @@ enum prot_type {
|
||||
};
|
||||
|
||||
static int trans_exc(struct kvm_vcpu *vcpu, int code, unsigned long gva,
|
||||
ar_t ar, enum gacc_mode mode, enum prot_type prot)
|
||||
u8 ar, enum gacc_mode mode, enum prot_type prot)
|
||||
{
|
||||
struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
|
||||
struct trans_exc_code_bits *tec;
|
||||
@ -497,14 +499,18 @@ static int trans_exc(struct kvm_vcpu *vcpu, int code, unsigned long gva,
|
||||
switch (code) {
|
||||
case PGM_PROTECTION:
|
||||
switch (prot) {
|
||||
case PROT_TYPE_LA:
|
||||
tec->b56 = 1;
|
||||
break;
|
||||
case PROT_TYPE_KEYC:
|
||||
tec->b60 = 1;
|
||||
break;
|
||||
case PROT_TYPE_ALC:
|
||||
tec->b60 = 1;
|
||||
/* FALL THROUGH */
|
||||
case PROT_TYPE_DAT:
|
||||
tec->b61 = 1;
|
||||
break;
|
||||
default: /* LA and KEYC set b61 to 0, other params undefined */
|
||||
return code;
|
||||
}
|
||||
/* FALL THROUGH */
|
||||
case PGM_ASCE_TYPE:
|
||||
@ -539,7 +545,7 @@ static int trans_exc(struct kvm_vcpu *vcpu, int code, unsigned long gva,
|
||||
}
|
||||
|
||||
static int get_vcpu_asce(struct kvm_vcpu *vcpu, union asce *asce,
|
||||
unsigned long ga, ar_t ar, enum gacc_mode mode)
|
||||
unsigned long ga, u8 ar, enum gacc_mode mode)
|
||||
{
|
||||
int rc;
|
||||
struct psw_bits psw = psw_bits(vcpu->arch.sie_block->gpsw);
|
||||
@ -771,7 +777,7 @@ static int low_address_protection_enabled(struct kvm_vcpu *vcpu,
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar,
|
||||
static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar,
|
||||
unsigned long *pages, unsigned long nr_pages,
|
||||
const union asce asce, enum gacc_mode mode)
|
||||
{
|
||||
@ -803,7 +809,7 @@ static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
|
||||
int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data,
|
||||
unsigned long len, enum gacc_mode mode)
|
||||
{
|
||||
psw_t *psw = &vcpu->arch.sie_block->gpsw;
|
||||
@ -877,7 +883,7 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
|
||||
* Note: The IPTE lock is not taken during this function, so the caller
|
||||
* has to take care of this.
|
||||
*/
|
||||
int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar,
|
||||
int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
|
||||
unsigned long *gpa, enum gacc_mode mode)
|
||||
{
|
||||
psw_t *psw = &vcpu->arch.sie_block->gpsw;
|
||||
@ -910,7 +916,7 @@ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar,
|
||||
/**
|
||||
* check_gva_range - test a range of guest virtual addresses for accessibility
|
||||
*/
|
||||
int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar,
|
||||
int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
|
||||
unsigned long length, enum gacc_mode mode)
|
||||
{
|
||||
unsigned long gpa;
|
||||
|
@ -162,11 +162,11 @@ enum gacc_mode {
|
||||
};
|
||||
|
||||
int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva,
|
||||
ar_t ar, unsigned long *gpa, enum gacc_mode mode);
|
||||
int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar,
|
||||
u8 ar, unsigned long *gpa, enum gacc_mode mode);
|
||||
int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
|
||||
unsigned long length, enum gacc_mode mode);
|
||||
|
||||
int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
|
||||
int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data,
|
||||
unsigned long len, enum gacc_mode mode);
|
||||
|
||||
int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
|
||||
@ -218,7 +218,7 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
|
||||
* if data has been changed in guest space in case of an exception.
|
||||
*/
|
||||
static inline __must_check
|
||||
int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
|
||||
int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data,
|
||||
unsigned long len)
|
||||
{
|
||||
return access_guest(vcpu, ga, ar, data, len, GACC_STORE);
|
||||
@ -238,7 +238,7 @@ int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
|
||||
* data will be copied from guest space to kernel space.
|
||||
*/
|
||||
static inline __must_check
|
||||
int read_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
|
||||
int read_guest(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data,
|
||||
unsigned long len)
|
||||
{
|
||||
return access_guest(vcpu, ga, ar, data, len, GACC_FETCH);
|
||||
@ -247,10 +247,11 @@ int read_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
|
||||
/**
|
||||
* read_guest_instr - copy instruction data from guest space to kernel space
|
||||
* @vcpu: virtual cpu
|
||||
* @ga: guest address
|
||||
* @data: destination address in kernel space
|
||||
* @len: number of bytes to copy
|
||||
*
|
||||
* Copy @len bytes from the current psw address (guest space) to @data (kernel
|
||||
* Copy @len bytes from the given address (guest space) to @data (kernel
|
||||
* space).
|
||||
*
|
||||
* The behaviour of read_guest_instr is identical to read_guest, except that
|
||||
@ -258,10 +259,10 @@ int read_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
|
||||
* address-space mode.
|
||||
*/
|
||||
static inline __must_check
|
||||
int read_guest_instr(struct kvm_vcpu *vcpu, void *data, unsigned long len)
|
||||
int read_guest_instr(struct kvm_vcpu *vcpu, unsigned long ga, void *data,
|
||||
unsigned long len)
|
||||
{
|
||||
return access_guest(vcpu, vcpu->arch.sie_block->gpsw.addr, 0, data, len,
|
||||
GACC_IFETCH);
|
||||
return access_guest(vcpu, ga, 0, data, len, GACC_IFETCH);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -388,14 +388,13 @@ void kvm_s390_prepare_debug_exit(struct kvm_vcpu *vcpu)
|
||||
#define per_write_wp_event(code) \
|
||||
(code & (PER_CODE_STORE | PER_CODE_STORE_REAL))
|
||||
|
||||
static int debug_exit_required(struct kvm_vcpu *vcpu)
|
||||
static int debug_exit_required(struct kvm_vcpu *vcpu, u8 perc,
|
||||
unsigned long peraddr)
|
||||
{
|
||||
u8 perc = vcpu->arch.sie_block->perc;
|
||||
struct kvm_debug_exit_arch *debug_exit = &vcpu->run->debug.arch;
|
||||
struct kvm_hw_wp_info_arch *wp_info = NULL;
|
||||
struct kvm_hw_bp_info_arch *bp_info = NULL;
|
||||
unsigned long addr = vcpu->arch.sie_block->gpsw.addr;
|
||||
unsigned long peraddr = vcpu->arch.sie_block->peraddr;
|
||||
|
||||
if (guestdbg_hw_bp_enabled(vcpu)) {
|
||||
if (per_write_wp_event(perc) &&
|
||||
@ -437,36 +436,118 @@ exit_required:
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int per_fetched_addr(struct kvm_vcpu *vcpu, unsigned long *addr)
|
||||
{
|
||||
u8 exec_ilen = 0;
|
||||
u16 opcode[3];
|
||||
int rc;
|
||||
|
||||
if (vcpu->arch.sie_block->icptcode == ICPT_PROGI) {
|
||||
/* PER address references the fetched or the execute instr */
|
||||
*addr = vcpu->arch.sie_block->peraddr;
|
||||
/*
|
||||
* Manually detect if we have an EXECUTE instruction. As
|
||||
* instructions are always 2 byte aligned we can read the
|
||||
* first two bytes unconditionally
|
||||
*/
|
||||
rc = read_guest_instr(vcpu, *addr, &opcode, 2);
|
||||
if (rc)
|
||||
return rc;
|
||||
if (opcode[0] >> 8 == 0x44)
|
||||
exec_ilen = 4;
|
||||
if ((opcode[0] & 0xff0f) == 0xc600)
|
||||
exec_ilen = 6;
|
||||
} else {
|
||||
/* instr was suppressed, calculate the responsible instr */
|
||||
*addr = __rewind_psw(vcpu->arch.sie_block->gpsw,
|
||||
kvm_s390_get_ilen(vcpu));
|
||||
if (vcpu->arch.sie_block->icptstatus & 0x01) {
|
||||
exec_ilen = (vcpu->arch.sie_block->icptstatus & 0x60) >> 4;
|
||||
if (!exec_ilen)
|
||||
exec_ilen = 4;
|
||||
}
|
||||
}
|
||||
|
||||
if (exec_ilen) {
|
||||
/* read the complete EXECUTE instr to detect the fetched addr */
|
||||
rc = read_guest_instr(vcpu, *addr, &opcode, exec_ilen);
|
||||
if (rc)
|
||||
return rc;
|
||||
if (exec_ilen == 6) {
|
||||
/* EXECUTE RELATIVE LONG - RIL-b format */
|
||||
s32 rl = *((s32 *) (opcode + 1));
|
||||
|
||||
/* rl is a _signed_ 32 bit value specifying halfwords */
|
||||
*addr += (u64)(s64) rl * 2;
|
||||
} else {
|
||||
/* EXECUTE - RX-a format */
|
||||
u32 base = (opcode[1] & 0xf000) >> 12;
|
||||
u32 disp = opcode[1] & 0x0fff;
|
||||
u32 index = opcode[0] & 0x000f;
|
||||
|
||||
*addr = base ? vcpu->run->s.regs.gprs[base] : 0;
|
||||
*addr += index ? vcpu->run->s.regs.gprs[index] : 0;
|
||||
*addr += disp;
|
||||
}
|
||||
*addr = kvm_s390_logical_to_effective(vcpu, *addr);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define guest_per_enabled(vcpu) \
|
||||
(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER)
|
||||
|
||||
int kvm_s390_handle_per_ifetch_icpt(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
const u64 cr10 = vcpu->arch.sie_block->gcr[10];
|
||||
const u64 cr11 = vcpu->arch.sie_block->gcr[11];
|
||||
const u8 ilen = kvm_s390_get_ilen(vcpu);
|
||||
struct kvm_s390_pgm_info pgm_info = {
|
||||
.code = PGM_PER,
|
||||
.per_code = PER_CODE_IFETCH,
|
||||
.per_address = __rewind_psw(vcpu->arch.sie_block->gpsw, ilen),
|
||||
};
|
||||
unsigned long fetched_addr;
|
||||
int rc;
|
||||
|
||||
/*
|
||||
* The PSW points to the next instruction, therefore the intercepted
|
||||
* instruction generated a PER i-fetch event. PER address therefore
|
||||
* points at the previous PSW address (could be an EXECUTE function).
|
||||
*/
|
||||
return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
|
||||
if (!guestdbg_enabled(vcpu))
|
||||
return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
|
||||
|
||||
if (debug_exit_required(vcpu, pgm_info.per_code, pgm_info.per_address))
|
||||
vcpu->guest_debug |= KVM_GUESTDBG_EXIT_PENDING;
|
||||
|
||||
if (!guest_per_enabled(vcpu) ||
|
||||
!(vcpu->arch.sie_block->gcr[9] & PER_EVENT_IFETCH))
|
||||
return 0;
|
||||
|
||||
rc = per_fetched_addr(vcpu, &fetched_addr);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
if (rc)
|
||||
/* instruction-fetching exceptions */
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
|
||||
|
||||
if (in_addr_range(fetched_addr, cr10, cr11))
|
||||
return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void filter_guest_per_event(struct kvm_vcpu *vcpu)
|
||||
static int filter_guest_per_event(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
const u8 perc = vcpu->arch.sie_block->perc;
|
||||
u64 peraddr = vcpu->arch.sie_block->peraddr;
|
||||
u64 addr = vcpu->arch.sie_block->gpsw.addr;
|
||||
u64 cr9 = vcpu->arch.sie_block->gcr[9];
|
||||
u64 cr10 = vcpu->arch.sie_block->gcr[10];
|
||||
u64 cr11 = vcpu->arch.sie_block->gcr[11];
|
||||
/* filter all events, demanded by the guest */
|
||||
u8 guest_perc = perc & (cr9 >> 24) & PER_CODE_MASK;
|
||||
unsigned long fetched_addr;
|
||||
int rc;
|
||||
|
||||
if (!guest_per_enabled(vcpu))
|
||||
guest_perc = 0;
|
||||
@ -478,9 +559,17 @@ static void filter_guest_per_event(struct kvm_vcpu *vcpu)
|
||||
guest_perc &= ~PER_CODE_BRANCH;
|
||||
|
||||
/* filter "instruction-fetching" events */
|
||||
if (guest_perc & PER_CODE_IFETCH &&
|
||||
!in_addr_range(peraddr, cr10, cr11))
|
||||
guest_perc &= ~PER_CODE_IFETCH;
|
||||
if (guest_perc & PER_CODE_IFETCH) {
|
||||
rc = per_fetched_addr(vcpu, &fetched_addr);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
/*
|
||||
* Don't inject an irq on exceptions. This would make handling
|
||||
* on icpt code 8 very complex (as PSW was already rewound).
|
||||
*/
|
||||
if (rc || !in_addr_range(fetched_addr, cr10, cr11))
|
||||
guest_perc &= ~PER_CODE_IFETCH;
|
||||
}
|
||||
|
||||
/* All other PER events will be given to the guest */
|
||||
/* TODO: Check altered address/address space */
|
||||
@ -489,6 +578,7 @@ static void filter_guest_per_event(struct kvm_vcpu *vcpu)
|
||||
|
||||
if (!guest_perc)
|
||||
vcpu->arch.sie_block->iprcc &= ~PGM_PER;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define pssec(vcpu) (vcpu->arch.sie_block->gcr[1] & _ASCE_SPACE_SWITCH)
|
||||
@ -496,14 +586,17 @@ static void filter_guest_per_event(struct kvm_vcpu *vcpu)
|
||||
#define old_ssec(vcpu) ((vcpu->arch.sie_block->tecmc >> 31) & 0x1)
|
||||
#define old_as_is_home(vcpu) !(vcpu->arch.sie_block->tecmc & 0xffff)
|
||||
|
||||
void kvm_s390_handle_per_event(struct kvm_vcpu *vcpu)
|
||||
int kvm_s390_handle_per_event(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int new_as;
|
||||
int rc, new_as;
|
||||
|
||||
if (debug_exit_required(vcpu))
|
||||
if (debug_exit_required(vcpu, vcpu->arch.sie_block->perc,
|
||||
vcpu->arch.sie_block->peraddr))
|
||||
vcpu->guest_debug |= KVM_GUESTDBG_EXIT_PENDING;
|
||||
|
||||
filter_guest_per_event(vcpu);
|
||||
rc = filter_guest_per_event(vcpu);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
/*
|
||||
* Only RP, SAC, SACF, PT, PTI, PR, PC instructions can trigger
|
||||
@ -532,4 +625,5 @@ void kvm_s390_handle_per_event(struct kvm_vcpu *vcpu)
|
||||
(pssec(vcpu) || old_ssec(vcpu)))
|
||||
vcpu->arch.sie_block->iprcc = PGM_SPACE_SWITCH;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -238,7 +238,9 @@ static int handle_prog(struct kvm_vcpu *vcpu)
|
||||
vcpu->stat.exit_program_interruption++;
|
||||
|
||||
if (guestdbg_enabled(vcpu) && per_event(vcpu)) {
|
||||
kvm_s390_handle_per_event(vcpu);
|
||||
rc = kvm_s390_handle_per_event(vcpu);
|
||||
if (rc)
|
||||
return rc;
|
||||
/* the interrupt might have been filtered out completely */
|
||||
if (vcpu->arch.sie_block->iprcc == 0)
|
||||
return 0;
|
||||
@ -359,6 +361,9 @@ static int handle_partial_execution(struct kvm_vcpu *vcpu)
|
||||
|
||||
static int handle_operexc(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
psw_t oldpsw, newpsw;
|
||||
int rc;
|
||||
|
||||
vcpu->stat.exit_operation_exception++;
|
||||
trace_kvm_s390_handle_operexc(vcpu, vcpu->arch.sie_block->ipa,
|
||||
vcpu->arch.sie_block->ipb);
|
||||
@ -369,6 +374,24 @@ static int handle_operexc(struct kvm_vcpu *vcpu)
|
||||
|
||||
if (vcpu->arch.sie_block->ipa == 0 && vcpu->kvm->arch.user_instr0)
|
||||
return -EOPNOTSUPP;
|
||||
rc = read_guest_lc(vcpu, __LC_PGM_NEW_PSW, &newpsw, sizeof(psw_t));
|
||||
if (rc)
|
||||
return rc;
|
||||
/*
|
||||
* Avoid endless loops of operation exceptions, if the pgm new
|
||||
* PSW will cause a new operation exception.
|
||||
* The heuristic checks if the pgm new psw is within 6 bytes before
|
||||
* the faulting psw address (with same DAT, AS settings) and the
|
||||
* new psw is not a wait psw and the fault was not triggered by
|
||||
* problem state.
|
||||
*/
|
||||
oldpsw = vcpu->arch.sie_block->gpsw;
|
||||
if (oldpsw.addr - newpsw.addr <= 6 &&
|
||||
!(newpsw.mask & PSW_MASK_WAIT) &&
|
||||
!(oldpsw.mask & PSW_MASK_PSTATE) &&
|
||||
(newpsw.mask & PSW_MASK_ASC) == (oldpsw.mask & PSW_MASK_ASC) &&
|
||||
(newpsw.mask & PSW_MASK_DAT) == (oldpsw.mask & PSW_MASK_DAT))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
|
||||
}
|
||||
|
@ -217,7 +217,7 @@ static void allow_cpu_feat(unsigned long nr)
|
||||
static inline int plo_test_bit(unsigned char nr)
|
||||
{
|
||||
register unsigned long r0 asm("0") = (unsigned long) nr | 0x100;
|
||||
int cc = 3; /* subfunction not available */
|
||||
int cc;
|
||||
|
||||
asm volatile(
|
||||
/* Parameter registers are ignored for "test bit" */
|
||||
@ -442,6 +442,9 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
|
||||
struct kvm_memory_slot *memslot;
|
||||
int is_dirty = 0;
|
||||
|
||||
if (kvm_is_ucontrol(kvm))
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&kvm->slots_lock);
|
||||
|
||||
r = -EINVAL;
|
||||
@ -505,6 +508,14 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
|
||||
} else if (MACHINE_HAS_VX) {
|
||||
set_kvm_facility(kvm->arch.model.fac_mask, 129);
|
||||
set_kvm_facility(kvm->arch.model.fac_list, 129);
|
||||
if (test_facility(134)) {
|
||||
set_kvm_facility(kvm->arch.model.fac_mask, 134);
|
||||
set_kvm_facility(kvm->arch.model.fac_list, 134);
|
||||
}
|
||||
if (test_facility(135)) {
|
||||
set_kvm_facility(kvm->arch.model.fac_mask, 135);
|
||||
set_kvm_facility(kvm->arch.model.fac_list, 135);
|
||||
}
|
||||
r = 0;
|
||||
} else
|
||||
r = -EINVAL;
|
||||
@ -821,6 +832,13 @@ static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
|
||||
}
|
||||
memcpy(kvm->arch.model.fac_list, proc->fac_list,
|
||||
S390_ARCH_FAC_LIST_SIZE_BYTE);
|
||||
VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
|
||||
kvm->arch.model.ibc,
|
||||
kvm->arch.model.cpuid);
|
||||
VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
|
||||
kvm->arch.model.fac_list[0],
|
||||
kvm->arch.model.fac_list[1],
|
||||
kvm->arch.model.fac_list[2]);
|
||||
} else
|
||||
ret = -EFAULT;
|
||||
kfree(proc);
|
||||
@ -894,6 +912,13 @@ static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
|
||||
proc->ibc = kvm->arch.model.ibc;
|
||||
memcpy(&proc->fac_list, kvm->arch.model.fac_list,
|
||||
S390_ARCH_FAC_LIST_SIZE_BYTE);
|
||||
VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
|
||||
kvm->arch.model.ibc,
|
||||
kvm->arch.model.cpuid);
|
||||
VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
|
||||
kvm->arch.model.fac_list[0],
|
||||
kvm->arch.model.fac_list[1],
|
||||
kvm->arch.model.fac_list[2]);
|
||||
if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
|
||||
ret = -EFAULT;
|
||||
kfree(proc);
|
||||
@ -916,7 +941,18 @@ static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
|
||||
memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
|
||||
S390_ARCH_FAC_LIST_SIZE_BYTE);
|
||||
memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
|
||||
S390_ARCH_FAC_LIST_SIZE_BYTE);
|
||||
sizeof(S390_lowcore.stfle_fac_list));
|
||||
VM_EVENT(kvm, 3, "GET: host ibc: 0x%4.4x, host cpuid: 0x%16.16llx",
|
||||
kvm->arch.model.ibc,
|
||||
kvm->arch.model.cpuid);
|
||||
VM_EVENT(kvm, 3, "GET: host facmask: 0x%16.16llx.%16.16llx.%16.16llx",
|
||||
mach->fac_mask[0],
|
||||
mach->fac_mask[1],
|
||||
mach->fac_mask[2]);
|
||||
VM_EVENT(kvm, 3, "GET: host faclist: 0x%16.16llx.%16.16llx.%16.16llx",
|
||||
mach->fac_list[0],
|
||||
mach->fac_list[1],
|
||||
mach->fac_list[2]);
|
||||
if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
|
||||
ret = -EFAULT;
|
||||
kfree(mach);
|
||||
@ -1437,7 +1473,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
||||
|
||||
/* Populate the facility mask initially. */
|
||||
memcpy(kvm->arch.model.fac_mask, S390_lowcore.stfle_fac_list,
|
||||
S390_ARCH_FAC_LIST_SIZE_BYTE);
|
||||
sizeof(S390_lowcore.stfle_fac_list));
|
||||
for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
|
||||
if (i < kvm_s390_fac_list_mask_size())
|
||||
kvm->arch.model.fac_mask[i] &= kvm_s390_fac_list_mask[i];
|
||||
@ -1938,6 +1974,8 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
|
||||
|
||||
if (test_kvm_facility(vcpu->kvm, 8) && sclp.has_pfmfi)
|
||||
vcpu->arch.sie_block->ecb2 |= 0x08;
|
||||
if (test_kvm_facility(vcpu->kvm, 130))
|
||||
vcpu->arch.sie_block->ecb2 |= 0x20;
|
||||
vcpu->arch.sie_block->eca = 0x1002000U;
|
||||
if (sclp.has_cei)
|
||||
vcpu->arch.sie_block->eca |= 0x80000000U;
|
||||
@ -2578,7 +2616,7 @@ static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
|
||||
* to look up the current opcode to get the length of the instruction
|
||||
* to be able to forward the PSW.
|
||||
*/
|
||||
rc = read_guest_instr(vcpu, &opcode, 1);
|
||||
rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1);
|
||||
ilen = insn_length(opcode);
|
||||
if (rc < 0) {
|
||||
return rc;
|
||||
|
@ -86,9 +86,7 @@ static inline void kvm_s390_set_prefix(struct kvm_vcpu *vcpu, u32 prefix)
|
||||
kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
|
||||
}
|
||||
|
||||
typedef u8 __bitwise ar_t;
|
||||
|
||||
static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu, ar_t *ar)
|
||||
static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu, u8 *ar)
|
||||
{
|
||||
u32 base2 = vcpu->arch.sie_block->ipb >> 28;
|
||||
u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
|
||||
@ -101,7 +99,7 @@ static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu, ar_t *ar)
|
||||
|
||||
static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu,
|
||||
u64 *address1, u64 *address2,
|
||||
ar_t *ar_b1, ar_t *ar_b2)
|
||||
u8 *ar_b1, u8 *ar_b2)
|
||||
{
|
||||
u32 base1 = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28;
|
||||
u32 disp1 = (vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16;
|
||||
@ -125,7 +123,7 @@ static inline void kvm_s390_get_regs_rre(struct kvm_vcpu *vcpu, int *r1, int *r2
|
||||
*r2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16;
|
||||
}
|
||||
|
||||
static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu, ar_t *ar)
|
||||
static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu, u8 *ar)
|
||||
{
|
||||
u32 base2 = vcpu->arch.sie_block->ipb >> 28;
|
||||
u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) +
|
||||
@ -140,7 +138,7 @@ static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu, ar_t *ar)
|
||||
return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + (long)(int)disp2;
|
||||
}
|
||||
|
||||
static inline u64 kvm_s390_get_base_disp_rs(struct kvm_vcpu *vcpu, ar_t *ar)
|
||||
static inline u64 kvm_s390_get_base_disp_rs(struct kvm_vcpu *vcpu, u8 *ar)
|
||||
{
|
||||
u32 base2 = vcpu->arch.sie_block->ipb >> 28;
|
||||
u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
|
||||
@ -379,7 +377,7 @@ int kvm_s390_import_bp_data(struct kvm_vcpu *vcpu,
|
||||
void kvm_s390_clear_bp_data(struct kvm_vcpu *vcpu);
|
||||
void kvm_s390_prepare_debug_exit(struct kvm_vcpu *vcpu);
|
||||
int kvm_s390_handle_per_ifetch_icpt(struct kvm_vcpu *vcpu);
|
||||
void kvm_s390_handle_per_event(struct kvm_vcpu *vcpu);
|
||||
int kvm_s390_handle_per_event(struct kvm_vcpu *vcpu);
|
||||
|
||||
/* support for Basic/Extended SCA handling */
|
||||
static inline union ipte_control *kvm_s390_get_ipte_control(struct kvm *kvm)
|
||||
|
@ -54,7 +54,7 @@ int kvm_s390_handle_aa(struct kvm_vcpu *vcpu)
|
||||
static int handle_set_clock(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int rc;
|
||||
ar_t ar;
|
||||
u8 ar;
|
||||
u64 op2, val;
|
||||
|
||||
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
|
||||
@ -79,7 +79,7 @@ static int handle_set_prefix(struct kvm_vcpu *vcpu)
|
||||
u64 operand2;
|
||||
u32 address;
|
||||
int rc;
|
||||
ar_t ar;
|
||||
u8 ar;
|
||||
|
||||
vcpu->stat.instruction_spx++;
|
||||
|
||||
@ -117,7 +117,7 @@ static int handle_store_prefix(struct kvm_vcpu *vcpu)
|
||||
u64 operand2;
|
||||
u32 address;
|
||||
int rc;
|
||||
ar_t ar;
|
||||
u8 ar;
|
||||
|
||||
vcpu->stat.instruction_stpx++;
|
||||
|
||||
@ -147,7 +147,7 @@ static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
|
||||
u16 vcpu_id = vcpu->vcpu_id;
|
||||
u64 ga;
|
||||
int rc;
|
||||
ar_t ar;
|
||||
u8 ar;
|
||||
|
||||
vcpu->stat.instruction_stap++;
|
||||
|
||||
@ -380,7 +380,7 @@ static int handle_tpi(struct kvm_vcpu *vcpu)
|
||||
u32 tpi_data[3];
|
||||
int rc;
|
||||
u64 addr;
|
||||
ar_t ar;
|
||||
u8 ar;
|
||||
|
||||
addr = kvm_s390_get_base_disp_s(vcpu, &ar);
|
||||
if (addr & 3)
|
||||
@ -548,7 +548,7 @@ int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
|
||||
psw_compat_t new_psw;
|
||||
u64 addr;
|
||||
int rc;
|
||||
ar_t ar;
|
||||
u8 ar;
|
||||
|
||||
if (gpsw->mask & PSW_MASK_PSTATE)
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
|
||||
@ -575,7 +575,7 @@ static int handle_lpswe(struct kvm_vcpu *vcpu)
|
||||
psw_t new_psw;
|
||||
u64 addr;
|
||||
int rc;
|
||||
ar_t ar;
|
||||
u8 ar;
|
||||
|
||||
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
|
||||
@ -597,7 +597,7 @@ static int handle_stidp(struct kvm_vcpu *vcpu)
|
||||
u64 stidp_data = vcpu->kvm->arch.model.cpuid;
|
||||
u64 operand2;
|
||||
int rc;
|
||||
ar_t ar;
|
||||
u8 ar;
|
||||
|
||||
vcpu->stat.instruction_stidp++;
|
||||
|
||||
@ -644,7 +644,7 @@ static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
|
||||
ASCEBC(mem->vm[0].cpi, 16);
|
||||
}
|
||||
|
||||
static void insert_stsi_usr_data(struct kvm_vcpu *vcpu, u64 addr, ar_t ar,
|
||||
static void insert_stsi_usr_data(struct kvm_vcpu *vcpu, u64 addr, u8 ar,
|
||||
u8 fc, u8 sel1, u16 sel2)
|
||||
{
|
||||
vcpu->run->exit_reason = KVM_EXIT_S390_STSI;
|
||||
@ -663,7 +663,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
|
||||
unsigned long mem = 0;
|
||||
u64 operand2;
|
||||
int rc = 0;
|
||||
ar_t ar;
|
||||
u8 ar;
|
||||
|
||||
vcpu->stat.instruction_stsi++;
|
||||
VCPU_EVENT(vcpu, 3, "STSI: fc: %u sel1: %u sel2: %u", fc, sel1, sel2);
|
||||
@ -970,7 +970,7 @@ int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
|
||||
int reg, rc, nr_regs;
|
||||
u32 ctl_array[16];
|
||||
u64 ga;
|
||||
ar_t ar;
|
||||
u8 ar;
|
||||
|
||||
vcpu->stat.instruction_lctl++;
|
||||
|
||||
@ -1009,7 +1009,7 @@ int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu)
|
||||
int reg, rc, nr_regs;
|
||||
u32 ctl_array[16];
|
||||
u64 ga;
|
||||
ar_t ar;
|
||||
u8 ar;
|
||||
|
||||
vcpu->stat.instruction_stctl++;
|
||||
|
||||
@ -1043,7 +1043,7 @@ static int handle_lctlg(struct kvm_vcpu *vcpu)
|
||||
int reg, rc, nr_regs;
|
||||
u64 ctl_array[16];
|
||||
u64 ga;
|
||||
ar_t ar;
|
||||
u8 ar;
|
||||
|
||||
vcpu->stat.instruction_lctlg++;
|
||||
|
||||
@ -1081,7 +1081,7 @@ static int handle_stctg(struct kvm_vcpu *vcpu)
|
||||
int reg, rc, nr_regs;
|
||||
u64 ctl_array[16];
|
||||
u64 ga;
|
||||
ar_t ar;
|
||||
u8 ar;
|
||||
|
||||
vcpu->stat.instruction_stctg++;
|
||||
|
||||
@ -1132,7 +1132,7 @@ static int handle_tprot(struct kvm_vcpu *vcpu)
|
||||
unsigned long hva, gpa;
|
||||
int ret = 0, cc = 0;
|
||||
bool writable;
|
||||
ar_t ar;
|
||||
u8 ar;
|
||||
|
||||
vcpu->stat.instruction_tprot++;
|
||||
|
||||
|
@ -324,6 +324,9 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
|
||||
/* Run-time-Instrumentation */
|
||||
if (test_kvm_facility(vcpu->kvm, 64))
|
||||
scb_s->ecb3 |= scb_o->ecb3 & 0x01U;
|
||||
/* Instruction Execution Prevention */
|
||||
if (test_kvm_facility(vcpu->kvm, 130))
|
||||
scb_s->ecb2 |= scb_o->ecb2 & 0x20U;
|
||||
if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_SIIF))
|
||||
scb_s->eca |= scb_o->eca & 0x00000001U;
|
||||
if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_IB))
|
||||
|
@ -741,7 +741,7 @@ int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr)
|
||||
|
||||
pgste_set_unlock(ptep, new);
|
||||
pte_unmap_unlock(ptep, ptl);
|
||||
return 0;
|
||||
return cc;
|
||||
}
|
||||
EXPORT_SYMBOL(reset_guest_reference_bit);
|
||||
|
||||
|
@ -80,6 +80,8 @@ static struct facility_def facility_defs[] = {
|
||||
76, /* msa extension 3 */
|
||||
77, /* msa extension 4 */
|
||||
78, /* enhanced-DAT 2 */
|
||||
130, /* instruction-execution-protection */
|
||||
131, /* enhanced-SOP 2 and side-effect */
|
||||
-1 /* END */
|
||||
}
|
||||
},
|
||||
|
Loading…
Reference in New Issue
Block a user