KVM: s390: Features and Test for 5.11
- memcg accouting for s390 specific parts of kvm and gmap - selftest for diag318 - new kvm_stat for when async_pf falls back to sync The selftest even triggers a non-critical bug that is unrelated to diag318, fix will follow later. -----BEGIN PGP SIGNATURE----- Version: GnuPG v2.0.22 (GNU/Linux) iQIcBAABAgAGBQJf0iDFAAoJEBF7vIC1phx8WukP/iRNbs2HP/cOo/PneF2UMMm5 sLwRbjqPXOcz2dvmgTVkf5R5hDwCtg9DEwACalrHH2JHCSxp+RJ1LlDpjvPdKMEz w6zizKRCnjsYsmASAtbE0L+JcKok+yeJ4hCjlR79AGpgIcOM4aqZFgKItq0a8Gnf /nKQCiEHqckRcG9q5cDJHairloeMrb85+1TlZH79SifN4iS+ac1VQh34jmmxL2hW 06aJxxaAeSn8wrJfx9L1J6+icsycofiawD4N4glQgSG5jFy7W/TlXS8egoUklgfr uNeKukufW/eMsIbC7p6VUJOm/kdScl/5iIHkcW+61o1Q2dBhgByGpb4gWBlqrLzr nVVKpTJEkDX04NWZ8MrLJIGHeEaVdIQp6TG3fsO8UtF3zmj+hmf9G9rX/FWluyK+ l7ThONffTLYorUFU2R9Hex+3N+YNA5njzXO9PY3Mzfrcy1Z/IK+OpVCZFNsvAbK9 s/FfKe+IYZ1uOm6qb7/yWxtJruk/92R3ZBdnaaC8wfN3L+zSs/a9TcLbLj8O2fnC VpdKkATwvsvo97UK6YKLNsq9jGmKImsudiFoiCkfrPHEwQRMPwPtrqpjq+3XdfYw LTHhwXyCNS1oyS2H59uO07dD+ZJvBIgSmFkl7KuhZPL/fFfHiA88Xbv0WWCyobXQ 40r2R/MAR8syI4dBvoOE =hT2c -----END PGP SIGNATURE----- Merge tag 'kvm-s390-next-5.11-1' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into HEAD KVM: s390: Features and Test for 5.11 - memcg accouting for s390 specific parts of kvm and gmap - selftest for diag318 - new kvm_stat for when async_pf falls back to sync The selftest even triggers a non-critical bug that is unrelated to diag318, fix will follow later.
This commit is contained in:
commit
e8614e5e8d
@ -459,6 +459,7 @@ struct kvm_vcpu_stat {
|
||||
u64 diagnose_308;
|
||||
u64 diagnose_500;
|
||||
u64 diagnose_other;
|
||||
u64 pfault_sync;
|
||||
};
|
||||
|
||||
#define PGM_OPERATION 0x01
|
||||
|
@ -184,7 +184,7 @@ static int __import_wp_info(struct kvm_vcpu *vcpu,
|
||||
if (wp_info->len < 0 || wp_info->len > MAX_WP_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
wp_info->old_data = kmalloc(bp_data->len, GFP_KERNEL);
|
||||
wp_info->old_data = kmalloc(bp_data->len, GFP_KERNEL_ACCOUNT);
|
||||
if (!wp_info->old_data)
|
||||
return -ENOMEM;
|
||||
/* try to backup the original value */
|
||||
@ -234,7 +234,7 @@ int kvm_s390_import_bp_data(struct kvm_vcpu *vcpu,
|
||||
if (nr_wp > 0) {
|
||||
wp_info = kmalloc_array(nr_wp,
|
||||
sizeof(*wp_info),
|
||||
GFP_KERNEL);
|
||||
GFP_KERNEL_ACCOUNT);
|
||||
if (!wp_info) {
|
||||
ret = -ENOMEM;
|
||||
goto error;
|
||||
@ -243,7 +243,7 @@ int kvm_s390_import_bp_data(struct kvm_vcpu *vcpu,
|
||||
if (nr_bp > 0) {
|
||||
bp_info = kmalloc_array(nr_bp,
|
||||
sizeof(*bp_info),
|
||||
GFP_KERNEL);
|
||||
GFP_KERNEL_ACCOUNT);
|
||||
if (!bp_info) {
|
||||
ret = -ENOMEM;
|
||||
goto error;
|
||||
@ -349,7 +349,7 @@ static struct kvm_hw_wp_info_arch *any_wp_changed(struct kvm_vcpu *vcpu)
|
||||
if (!wp_info || !wp_info->old_data || wp_info->len <= 0)
|
||||
continue;
|
||||
|
||||
temp = kmalloc(wp_info->len, GFP_KERNEL);
|
||||
temp = kmalloc(wp_info->len, GFP_KERNEL_ACCOUNT);
|
||||
if (!temp)
|
||||
continue;
|
||||
|
||||
|
@ -398,7 +398,7 @@ int handle_sthyi(struct kvm_vcpu *vcpu)
|
||||
if (!kvm_s390_pv_cpu_is_protected(vcpu) && (addr & ~PAGE_MASK))
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
|
||||
|
||||
sctns = (void *)get_zeroed_page(GFP_KERNEL);
|
||||
sctns = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
|
||||
if (!sctns)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -1792,7 +1792,7 @@ struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
|
||||
goto out;
|
||||
}
|
||||
gisa_out:
|
||||
tmp_inti = kzalloc(sizeof(*inti), GFP_KERNEL);
|
||||
tmp_inti = kzalloc(sizeof(*inti), GFP_KERNEL_ACCOUNT);
|
||||
if (tmp_inti) {
|
||||
tmp_inti->type = KVM_S390_INT_IO(1, 0, 0, 0);
|
||||
tmp_inti->io.io_int_word = isc_to_int_word(isc);
|
||||
@ -2015,7 +2015,7 @@ int kvm_s390_inject_vm(struct kvm *kvm,
|
||||
struct kvm_s390_interrupt_info *inti;
|
||||
int rc;
|
||||
|
||||
inti = kzalloc(sizeof(*inti), GFP_KERNEL);
|
||||
inti = kzalloc(sizeof(*inti), GFP_KERNEL_ACCOUNT);
|
||||
if (!inti)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -2414,7 +2414,7 @@ static int enqueue_floating_irq(struct kvm_device *dev,
|
||||
return -EINVAL;
|
||||
|
||||
while (len >= sizeof(struct kvm_s390_irq)) {
|
||||
inti = kzalloc(sizeof(*inti), GFP_KERNEL);
|
||||
inti = kzalloc(sizeof(*inti), GFP_KERNEL_ACCOUNT);
|
||||
if (!inti)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -2462,7 +2462,7 @@ static int register_io_adapter(struct kvm_device *dev,
|
||||
if (dev->kvm->arch.adapters[adapter_info.id] != NULL)
|
||||
return -EINVAL;
|
||||
|
||||
adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
|
||||
adapter = kzalloc(sizeof(*adapter), GFP_KERNEL_ACCOUNT);
|
||||
if (!adapter)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -3290,7 +3290,7 @@ int kvm_s390_gib_init(u8 nisc)
|
||||
goto out;
|
||||
}
|
||||
|
||||
gib = (struct kvm_s390_gib *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
|
||||
gib = (struct kvm_s390_gib *)get_zeroed_page(GFP_KERNEL_ACCOUNT | GFP_DMA);
|
||||
if (!gib) {
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
|
@ -60,6 +60,7 @@
|
||||
struct kvm_stats_debugfs_item debugfs_entries[] = {
|
||||
VCPU_STAT("userspace_handled", exit_userspace),
|
||||
VCPU_STAT("exit_null", exit_null),
|
||||
VCPU_STAT("pfault_sync", pfault_sync),
|
||||
VCPU_STAT("exit_validity", exit_validity),
|
||||
VCPU_STAT("exit_stop_request", exit_stop_request),
|
||||
VCPU_STAT("exit_external_request", exit_external_request),
|
||||
@ -1254,7 +1255,7 @@ static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
|
||||
ret = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
proc = kzalloc(sizeof(*proc), GFP_KERNEL);
|
||||
proc = kzalloc(sizeof(*proc), GFP_KERNEL_ACCOUNT);
|
||||
if (!proc) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
@ -1416,7 +1417,7 @@ static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
|
||||
struct kvm_s390_vm_cpu_processor *proc;
|
||||
int ret = 0;
|
||||
|
||||
proc = kzalloc(sizeof(*proc), GFP_KERNEL);
|
||||
proc = kzalloc(sizeof(*proc), GFP_KERNEL_ACCOUNT);
|
||||
if (!proc) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
@ -1444,7 +1445,7 @@ static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
|
||||
struct kvm_s390_vm_cpu_machine *mach;
|
||||
int ret = 0;
|
||||
|
||||
mach = kzalloc(sizeof(*mach), GFP_KERNEL);
|
||||
mach = kzalloc(sizeof(*mach), GFP_KERNEL_ACCOUNT);
|
||||
if (!mach) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
@ -1812,7 +1813,7 @@ static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
|
||||
if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
|
||||
keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL_ACCOUNT);
|
||||
if (!keys)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -1857,7 +1858,7 @@ static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
|
||||
if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
|
||||
keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL_ACCOUNT);
|
||||
if (!keys)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -2625,7 +2626,7 @@ static void sca_dispose(struct kvm *kvm)
|
||||
|
||||
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
||||
{
|
||||
gfp_t alloc_flags = GFP_KERNEL;
|
||||
gfp_t alloc_flags = GFP_KERNEL_ACCOUNT;
|
||||
int i, rc;
|
||||
char debug_name[16];
|
||||
static unsigned long sca_offset;
|
||||
@ -2670,7 +2671,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
||||
|
||||
BUILD_BUG_ON(sizeof(struct sie_page2) != 4096);
|
||||
kvm->arch.sie_page2 =
|
||||
(struct sie_page2 *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
|
||||
(struct sie_page2 *) get_zeroed_page(GFP_KERNEL_ACCOUNT | GFP_DMA);
|
||||
if (!kvm->arch.sie_page2)
|
||||
goto out_err;
|
||||
|
||||
@ -2900,7 +2901,7 @@ static int sca_switch_to_extended(struct kvm *kvm)
|
||||
if (kvm->arch.use_esca)
|
||||
return 0;
|
||||
|
||||
new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO);
|
||||
new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL_ACCOUNT | __GFP_ZERO);
|
||||
if (!new_sca)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -3133,7 +3134,7 @@ void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
|
||||
|
||||
int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
|
||||
vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL_ACCOUNT);
|
||||
if (!vcpu->arch.sie_block->cbrlo)
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
@ -3243,7 +3244,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
|
||||
int rc;
|
||||
|
||||
BUILD_BUG_ON(sizeof(struct sie_page) != 4096);
|
||||
sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
|
||||
sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL_ACCOUNT);
|
||||
if (!sie_page)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -4111,6 +4112,7 @@ static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
|
||||
current->thread.gmap_pfault = 0;
|
||||
if (kvm_arch_setup_async_pf(vcpu))
|
||||
return 0;
|
||||
vcpu->stat.pfault_sync++;
|
||||
return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
|
||||
}
|
||||
return vcpu_post_run_fault_in_sie(vcpu);
|
||||
|
@ -879,7 +879,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
|
||||
switch (fc) {
|
||||
case 1: /* same handling for 1 and 2 */
|
||||
case 2:
|
||||
mem = get_zeroed_page(GFP_KERNEL);
|
||||
mem = get_zeroed_page(GFP_KERNEL_ACCOUNT);
|
||||
if (!mem)
|
||||
goto out_no_data;
|
||||
if (stsi((void *) mem, fc, sel1, sel2))
|
||||
@ -888,7 +888,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
|
||||
case 3:
|
||||
if (sel1 != 2 || sel2 != 2)
|
||||
goto out_no_data;
|
||||
mem = get_zeroed_page(GFP_KERNEL);
|
||||
mem = get_zeroed_page(GFP_KERNEL_ACCOUNT);
|
||||
if (!mem)
|
||||
goto out_no_data;
|
||||
handle_stsi_3_2_2(vcpu, (void *) mem);
|
||||
|
@ -60,7 +60,7 @@ int kvm_s390_pv_create_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc)
|
||||
if (kvm_s390_pv_cpu_get_handle(vcpu))
|
||||
return -EINVAL;
|
||||
|
||||
vcpu->arch.pv.stor_base = __get_free_pages(GFP_KERNEL,
|
||||
vcpu->arch.pv.stor_base = __get_free_pages(GFP_KERNEL_ACCOUNT,
|
||||
get_order(uv_info.guest_cpu_stor_len));
|
||||
if (!vcpu->arch.pv.stor_base)
|
||||
return -ENOMEM;
|
||||
@ -72,7 +72,7 @@ int kvm_s390_pv_create_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc)
|
||||
uvcb.stor_origin = (u64)vcpu->arch.pv.stor_base;
|
||||
|
||||
/* Alloc Secure Instruction Data Area Designation */
|
||||
vcpu->arch.sie_block->sidad = __get_free_page(GFP_KERNEL | __GFP_ZERO);
|
||||
vcpu->arch.sie_block->sidad = __get_free_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
|
||||
if (!vcpu->arch.sie_block->sidad) {
|
||||
free_pages(vcpu->arch.pv.stor_base,
|
||||
get_order(uv_info.guest_cpu_stor_len));
|
||||
@ -120,7 +120,7 @@ static int kvm_s390_pv_alloc_vm(struct kvm *kvm)
|
||||
struct kvm_memory_slot *memslot;
|
||||
|
||||
kvm->arch.pv.stor_var = NULL;
|
||||
kvm->arch.pv.stor_base = __get_free_pages(GFP_KERNEL, get_order(base));
|
||||
kvm->arch.pv.stor_base = __get_free_pages(GFP_KERNEL_ACCOUNT, get_order(base));
|
||||
if (!kvm->arch.pv.stor_base)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -1234,7 +1234,7 @@ static struct vsie_page *get_vsie_page(struct kvm *kvm, unsigned long addr)
|
||||
|
||||
mutex_lock(&kvm->arch.vsie.mutex);
|
||||
if (kvm->arch.vsie.page_count < nr_vcpus) {
|
||||
page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA);
|
||||
page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO | GFP_DMA);
|
||||
if (!page) {
|
||||
mutex_unlock(&kvm->arch.vsie.mutex);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
@ -1336,7 +1336,7 @@ out_put:
|
||||
void kvm_s390_vsie_init(struct kvm *kvm)
|
||||
{
|
||||
mutex_init(&kvm->arch.vsie.mutex);
|
||||
INIT_RADIX_TREE(&kvm->arch.vsie.addr_to_page, GFP_KERNEL);
|
||||
INIT_RADIX_TREE(&kvm->arch.vsie.addr_to_page, GFP_KERNEL_ACCOUNT);
|
||||
}
|
||||
|
||||
/* Destroy the vsie data structures. To be called when a vm is destroyed. */
|
||||
|
@ -2,7 +2,7 @@
|
||||
/*
|
||||
* KVM guest address space mapping code
|
||||
*
|
||||
* Copyright IBM Corp. 2007, 2016, 2018
|
||||
* Copyright IBM Corp. 2007, 2020
|
||||
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
|
||||
* David Hildenbrand <david@redhat.com>
|
||||
* Janosch Frank <frankja@linux.vnet.ibm.com>
|
||||
@ -56,19 +56,19 @@ static struct gmap *gmap_alloc(unsigned long limit)
|
||||
atype = _ASCE_TYPE_REGION1;
|
||||
etype = _REGION1_ENTRY_EMPTY;
|
||||
}
|
||||
gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL);
|
||||
gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL_ACCOUNT);
|
||||
if (!gmap)
|
||||
goto out;
|
||||
INIT_LIST_HEAD(&gmap->crst_list);
|
||||
INIT_LIST_HEAD(&gmap->children);
|
||||
INIT_LIST_HEAD(&gmap->pt_list);
|
||||
INIT_RADIX_TREE(&gmap->guest_to_host, GFP_KERNEL);
|
||||
INIT_RADIX_TREE(&gmap->host_to_guest, GFP_ATOMIC);
|
||||
INIT_RADIX_TREE(&gmap->host_to_rmap, GFP_ATOMIC);
|
||||
INIT_RADIX_TREE(&gmap->guest_to_host, GFP_KERNEL_ACCOUNT);
|
||||
INIT_RADIX_TREE(&gmap->host_to_guest, GFP_ATOMIC | __GFP_ACCOUNT);
|
||||
INIT_RADIX_TREE(&gmap->host_to_rmap, GFP_ATOMIC | __GFP_ACCOUNT);
|
||||
spin_lock_init(&gmap->guest_table_lock);
|
||||
spin_lock_init(&gmap->shadow_lock);
|
||||
refcount_set(&gmap->ref_count, 1);
|
||||
page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
|
||||
page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
|
||||
if (!page)
|
||||
goto out_free;
|
||||
page->index = 0;
|
||||
@ -309,7 +309,7 @@ static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
|
||||
unsigned long *new;
|
||||
|
||||
/* since we dont free the gmap table until gmap_free we can unlock */
|
||||
page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
|
||||
page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
new = (unsigned long *) page_to_phys(page);
|
||||
@ -594,7 +594,7 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
|
||||
if (pmd_large(*pmd) && !gmap->mm->context.allow_gmap_hpage_1m)
|
||||
return -EFAULT;
|
||||
/* Link gmap segment table entry location to page table. */
|
||||
rc = radix_tree_preload(GFP_KERNEL);
|
||||
rc = radix_tree_preload(GFP_KERNEL_ACCOUNT);
|
||||
if (rc)
|
||||
return rc;
|
||||
ptl = pmd_lock(mm, pmd);
|
||||
@ -1218,11 +1218,11 @@ static int gmap_protect_rmap(struct gmap *sg, unsigned long raddr,
|
||||
vmaddr = __gmap_translate(parent, paddr);
|
||||
if (IS_ERR_VALUE(vmaddr))
|
||||
return vmaddr;
|
||||
rmap = kzalloc(sizeof(*rmap), GFP_KERNEL);
|
||||
rmap = kzalloc(sizeof(*rmap), GFP_KERNEL_ACCOUNT);
|
||||
if (!rmap)
|
||||
return -ENOMEM;
|
||||
rmap->raddr = raddr;
|
||||
rc = radix_tree_preload(GFP_KERNEL);
|
||||
rc = radix_tree_preload(GFP_KERNEL_ACCOUNT);
|
||||
if (rc) {
|
||||
kfree(rmap);
|
||||
return rc;
|
||||
@ -1741,7 +1741,7 @@ int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t,
|
||||
|
||||
BUG_ON(!gmap_is_shadow(sg));
|
||||
/* Allocate a shadow region second table */
|
||||
page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
|
||||
page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
page->index = r2t & _REGION_ENTRY_ORIGIN;
|
||||
@ -1825,7 +1825,7 @@ int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t,
|
||||
|
||||
BUG_ON(!gmap_is_shadow(sg));
|
||||
/* Allocate a shadow region second table */
|
||||
page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
|
||||
page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
page->index = r3t & _REGION_ENTRY_ORIGIN;
|
||||
@ -1909,7 +1909,7 @@ int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
|
||||
|
||||
BUG_ON(!gmap_is_shadow(sg) || (sgt & _REGION3_ENTRY_LARGE));
|
||||
/* Allocate a shadow segment table */
|
||||
page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
|
||||
page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
page->index = sgt & _REGION_ENTRY_ORIGIN;
|
||||
@ -2116,7 +2116,7 @@ int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte)
|
||||
parent = sg->parent;
|
||||
prot = (pte_val(pte) & _PAGE_PROTECT) ? PROT_READ : PROT_WRITE;
|
||||
|
||||
rmap = kzalloc(sizeof(*rmap), GFP_KERNEL);
|
||||
rmap = kzalloc(sizeof(*rmap), GFP_KERNEL_ACCOUNT);
|
||||
if (!rmap)
|
||||
return -ENOMEM;
|
||||
rmap->raddr = (saddr & PAGE_MASK) | _SHADOW_RMAP_PGTABLE;
|
||||
@ -2128,7 +2128,7 @@ int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte)
|
||||
rc = vmaddr;
|
||||
break;
|
||||
}
|
||||
rc = radix_tree_preload(GFP_KERNEL);
|
||||
rc = radix_tree_preload(GFP_KERNEL_ACCOUNT);
|
||||
if (rc)
|
||||
break;
|
||||
rc = -EAGAIN;
|
||||
|
@ -36,7 +36,7 @@ endif
|
||||
LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/sparsebit.c lib/test_util.c
|
||||
LIBKVM_x86_64 = lib/x86_64/processor.c lib/x86_64/vmx.c lib/x86_64/svm.c lib/x86_64/ucall.c lib/x86_64/handlers.S
|
||||
LIBKVM_aarch64 = lib/aarch64/processor.c lib/aarch64/ucall.c
|
||||
LIBKVM_s390x = lib/s390x/processor.c lib/s390x/ucall.c
|
||||
LIBKVM_s390x = lib/s390x/processor.c lib/s390x/ucall.c lib/s390x/diag318_test_handler.c
|
||||
|
||||
TEST_GEN_PROGS_x86_64 = x86_64/cr4_cpuid_sync_test
|
||||
TEST_GEN_PROGS_x86_64 += x86_64/evmcs_test
|
||||
|
@ -0,0 +1,13 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-or-later
|
||||
*
|
||||
* Test handler for the s390x DIAGNOSE 0x0318 instruction.
|
||||
*
|
||||
* Copyright (C) 2020, IBM
|
||||
*/
|
||||
|
||||
#ifndef SELFTEST_KVM_DIAG318_TEST_HANDLER
|
||||
#define SELFTEST_KVM_DIAG318_TEST_HANDLER
|
||||
|
||||
uint64_t get_diag318_info(void);
|
||||
|
||||
#endif
|
82
tools/testing/selftests/kvm/lib/s390x/diag318_test_handler.c
Normal file
82
tools/testing/selftests/kvm/lib/s390x/diag318_test_handler.c
Normal file
@ -0,0 +1,82 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
/*
|
||||
* Test handler for the s390x DIAGNOSE 0x0318 instruction.
|
||||
*
|
||||
* Copyright (C) 2020, IBM
|
||||
*/
|
||||
|
||||
#include "test_util.h"
|
||||
#include "kvm_util.h"
|
||||
|
||||
#define VCPU_ID 6
|
||||
|
||||
#define ICPT_INSTRUCTION 0x04
|
||||
#define IPA0_DIAG 0x8300
|
||||
|
||||
static void guest_code(void)
|
||||
{
|
||||
uint64_t diag318_info = 0x12345678;
|
||||
|
||||
asm volatile ("diag %0,0,0x318\n" : : "d" (diag318_info));
|
||||
}
|
||||
|
||||
/*
|
||||
* The DIAGNOSE 0x0318 instruction call must be handled via userspace. As such,
|
||||
* we create an ad-hoc VM here to handle the instruction then extract the
|
||||
* necessary data. It is up to the caller to decide what to do with that data.
|
||||
*/
|
||||
static uint64_t diag318_handler(void)
|
||||
{
|
||||
struct kvm_vm *vm;
|
||||
struct kvm_run *run;
|
||||
uint64_t reg;
|
||||
uint64_t diag318_info;
|
||||
|
||||
vm = vm_create_default(VCPU_ID, 0, guest_code);
|
||||
vcpu_run(vm, VCPU_ID);
|
||||
run = vcpu_state(vm, VCPU_ID);
|
||||
|
||||
TEST_ASSERT(run->exit_reason == KVM_EXIT_S390_SIEIC,
|
||||
"DIAGNOSE 0x0318 instruction was not intercepted");
|
||||
TEST_ASSERT(run->s390_sieic.icptcode == ICPT_INSTRUCTION,
|
||||
"Unexpected intercept code: 0x%x", run->s390_sieic.icptcode);
|
||||
TEST_ASSERT((run->s390_sieic.ipa & 0xff00) == IPA0_DIAG,
|
||||
"Unexpected IPA0 code: 0x%x", (run->s390_sieic.ipa & 0xff00));
|
||||
|
||||
reg = (run->s390_sieic.ipa & 0x00f0) >> 4;
|
||||
diag318_info = run->s.regs.gprs[reg];
|
||||
|
||||
TEST_ASSERT(diag318_info != 0, "DIAGNOSE 0x0318 info not set");
|
||||
|
||||
kvm_vm_free(vm);
|
||||
|
||||
return diag318_info;
|
||||
}
|
||||
|
||||
uint64_t get_diag318_info(void)
|
||||
{
|
||||
static uint64_t diag318_info;
|
||||
static bool printed_skip;
|
||||
|
||||
/*
|
||||
* If KVM does not support diag318, then return 0 to
|
||||
* ensure tests do not break.
|
||||
*/
|
||||
if (!kvm_check_cap(KVM_CAP_S390_DIAG318)) {
|
||||
if (!printed_skip) {
|
||||
fprintf(stdout, "KVM_CAP_S390_DIAG318 not supported. "
|
||||
"Skipping diag318 test.\n");
|
||||
printed_skip = true;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* If a test has previously requested the diag318 info,
|
||||
* then don't bother spinning up a temporary VM again.
|
||||
*/
|
||||
if (!diag318_info)
|
||||
diag318_info = diag318_handler();
|
||||
|
||||
return diag318_info;
|
||||
}
|
@ -20,6 +20,7 @@
|
||||
|
||||
#include "test_util.h"
|
||||
#include "kvm_util.h"
|
||||
#include "diag318_test_handler.h"
|
||||
|
||||
#define VCPU_ID 5
|
||||
|
||||
@ -70,7 +71,7 @@ static void compare_sregs(struct kvm_sregs *left, struct kvm_sync_regs *right)
|
||||
|
||||
#undef REG_COMPARE
|
||||
|
||||
#define TEST_SYNC_FIELDS (KVM_SYNC_GPRS|KVM_SYNC_ACRS|KVM_SYNC_CRS)
|
||||
#define TEST_SYNC_FIELDS (KVM_SYNC_GPRS|KVM_SYNC_ACRS|KVM_SYNC_CRS|KVM_SYNC_DIAG318)
|
||||
#define INVALID_SYNC_FIELD 0x80000000
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
@ -152,6 +153,12 @@ int main(int argc, char *argv[])
|
||||
|
||||
run->kvm_valid_regs = TEST_SYNC_FIELDS;
|
||||
run->kvm_dirty_regs = KVM_SYNC_GPRS | KVM_SYNC_ACRS;
|
||||
|
||||
if (get_diag318_info() > 0) {
|
||||
run->s.regs.diag318 = get_diag318_info();
|
||||
run->kvm_dirty_regs |= KVM_SYNC_DIAG318;
|
||||
}
|
||||
|
||||
rv = _vcpu_run(vm, VCPU_ID);
|
||||
TEST_ASSERT(rv == 0, "vcpu_run failed: %d\n", rv);
|
||||
TEST_ASSERT(run->exit_reason == KVM_EXIT_S390_SIEIC,
|
||||
@ -164,6 +171,9 @@ int main(int argc, char *argv[])
|
||||
TEST_ASSERT(run->s.regs.acrs[0] == 1 << 11,
|
||||
"acr0 sync regs value incorrect 0x%x.",
|
||||
run->s.regs.acrs[0]);
|
||||
TEST_ASSERT(run->s.regs.diag318 == get_diag318_info(),
|
||||
"diag318 sync regs value incorrect 0x%llx.",
|
||||
run->s.regs.diag318);
|
||||
|
||||
vcpu_regs_get(vm, VCPU_ID, ®s);
|
||||
compare_regs(®s, &run->s.regs);
|
||||
@ -177,6 +187,7 @@ int main(int argc, char *argv[])
|
||||
run->kvm_valid_regs = TEST_SYNC_FIELDS;
|
||||
run->kvm_dirty_regs = 0;
|
||||
run->s.regs.gprs[11] = 0xDEADBEEF;
|
||||
run->s.regs.diag318 = 0x4B1D;
|
||||
rv = _vcpu_run(vm, VCPU_ID);
|
||||
TEST_ASSERT(rv == 0, "vcpu_run failed: %d\n", rv);
|
||||
TEST_ASSERT(run->exit_reason == KVM_EXIT_S390_SIEIC,
|
||||
@ -186,6 +197,9 @@ int main(int argc, char *argv[])
|
||||
TEST_ASSERT(run->s.regs.gprs[11] != 0xDEADBEEF,
|
||||
"r11 sync regs value incorrect 0x%llx.",
|
||||
run->s.regs.gprs[11]);
|
||||
TEST_ASSERT(run->s.regs.diag318 != 0x4B1D,
|
||||
"diag318 sync regs value incorrect 0x%llx.",
|
||||
run->s.regs.diag318);
|
||||
|
||||
kvm_vm_free(vm);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user