mirror of
https://github.com/torvalds/linux.git
synced 2024-12-29 14:21:47 +00:00
5c83511bdb
Instead of using six globally visible paravirt ops structures combine them in a single structure, keeping the original structures as sub-structures. This avoids the need to assemble struct paravirt_patch_template at runtime on the stack each time apply_paravirt() is being called (i.e. when loading a module). [ tglx: Made the struct and the initializer tabular for readability sake ] Signed-off-by: Juergen Gross <jgross@suse.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: xen-devel@lists.xenproject.org Cc: virtualization@lists.linux-foundation.org Cc: akataria@vmware.com Cc: rusty@rustcorp.com.au Cc: boris.ostrovsky@oracle.com Cc: hpa@zytor.com Link: https://lkml.kernel.org/r/20180828074026.820-9-jgross@suse.com
81 lines
1.7 KiB
C
81 lines
1.7 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
#include <linux/types.h>
|
|
#include <linux/crash_dump.h>
|
|
|
|
#include <xen/interface/xen.h>
|
|
#include <xen/hvm.h>
|
|
|
|
#include "mmu.h"
|
|
|
|
#ifdef CONFIG_PROC_VMCORE
|
|
/*
|
|
* This function is used in two contexts:
|
|
* - the kdump kernel has to check whether a pfn of the crashed kernel
|
|
* was a ballooned page. vmcore is using this function to decide
|
|
* whether to access a pfn of the crashed kernel.
|
|
* - the kexec kernel has to check whether a pfn was ballooned by the
|
|
* previous kernel. If the pfn is ballooned, handle it properly.
|
|
* Returns 0 if the pfn is not backed by a RAM page, the caller may
|
|
* handle the pfn special in this case.
|
|
*/
|
|
static int xen_oldmem_pfn_is_ram(unsigned long pfn)
|
|
{
|
|
struct xen_hvm_get_mem_type a = {
|
|
.domid = DOMID_SELF,
|
|
.pfn = pfn,
|
|
};
|
|
int ram;
|
|
|
|
if (HYPERVISOR_hvm_op(HVMOP_get_mem_type, &a))
|
|
return -ENXIO;
|
|
|
|
switch (a.mem_type) {
|
|
case HVMMEM_mmio_dm:
|
|
ram = 0;
|
|
break;
|
|
case HVMMEM_ram_rw:
|
|
case HVMMEM_ram_ro:
|
|
default:
|
|
ram = 1;
|
|
break;
|
|
}
|
|
|
|
return ram;
|
|
}
|
|
#endif
|
|
|
|
static void xen_hvm_exit_mmap(struct mm_struct *mm)
|
|
{
|
|
struct xen_hvm_pagetable_dying a;
|
|
int rc;
|
|
|
|
a.domid = DOMID_SELF;
|
|
a.gpa = __pa(mm->pgd);
|
|
rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
|
|
WARN_ON_ONCE(rc < 0);
|
|
}
|
|
|
|
static int is_pagetable_dying_supported(void)
|
|
{
|
|
struct xen_hvm_pagetable_dying a;
|
|
int rc = 0;
|
|
|
|
a.domid = DOMID_SELF;
|
|
a.gpa = 0x00;
|
|
rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
|
|
if (rc < 0) {
|
|
printk(KERN_DEBUG "HVMOP_pagetable_dying not supported\n");
|
|
return 0;
|
|
}
|
|
return 1;
|
|
}
|
|
|
|
void __init xen_hvm_init_mmu_ops(void)
|
|
{
|
|
if (is_pagetable_dying_supported())
|
|
pv_ops.mmu.exit_mmap = xen_hvm_exit_mmap;
|
|
#ifdef CONFIG_PROC_VMCORE
|
|
WARN_ON(register_oldmem_pfn_is_ram(&xen_oldmem_pfn_is_ram));
|
|
#endif
|
|
}
|