mirror of
https://github.com/torvalds/linux.git
synced 2024-12-25 20:32:22 +00:00
2ce802f62b
During early boot, local IRQ is disabled until IRQ subsystem is properly initialized. During this time, no one should enable local IRQ and some operations which usually are not allowed with IRQ disabled, e.g. operations which might sleep or require communications with other processors, are allowed. lockdep tracked this with early_boot_irqs_off/on() callbacks. As other subsystems need this information too, move it to init/main.c and make it generally available. While at it, toggle the boolean to early_boot_irqs_disabled instead of enabled so that it can be initialized with %false and %true indicates the exceptional condition. Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Pekka Enberg <penberg@kernel.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> LKML-Reference: <20110120110635.GB6036@htj.dyndns.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
1396 lines
32 KiB
C
1396 lines
32 KiB
C
/*
|
|
* Core of Xen paravirt_ops implementation.
|
|
*
|
|
* This file contains the xen_paravirt_ops structure itself, and the
|
|
* implementations for:
|
|
* - privileged instructions
|
|
* - interrupt flags
|
|
* - segment operations
|
|
* - booting and setup
|
|
*
|
|
* Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
|
|
*/
|
|
|
|
#include <linux/cpu.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/init.h>
|
|
#include <linux/smp.h>
|
|
#include <linux/preempt.h>
|
|
#include <linux/hardirq.h>
|
|
#include <linux/percpu.h>
|
|
#include <linux/delay.h>
|
|
#include <linux/start_kernel.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/kprobes.h>
|
|
#include <linux/bootmem.h>
|
|
#include <linux/module.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/page-flags.h>
|
|
#include <linux/highmem.h>
|
|
#include <linux/console.h>
|
|
#include <linux/pci.h>
|
|
#include <linux/gfp.h>
|
|
#include <linux/memblock.h>
|
|
|
|
#include <xen/xen.h>
|
|
#include <xen/interface/xen.h>
|
|
#include <xen/interface/version.h>
|
|
#include <xen/interface/physdev.h>
|
|
#include <xen/interface/vcpu.h>
|
|
#include <xen/interface/memory.h>
|
|
#include <xen/features.h>
|
|
#include <xen/page.h>
|
|
#include <xen/hvm.h>
|
|
#include <xen/hvc-console.h>
|
|
|
|
#include <asm/paravirt.h>
|
|
#include <asm/apic.h>
|
|
#include <asm/page.h>
|
|
#include <asm/xen/pci.h>
|
|
#include <asm/xen/hypercall.h>
|
|
#include <asm/xen/hypervisor.h>
|
|
#include <asm/fixmap.h>
|
|
#include <asm/processor.h>
|
|
#include <asm/proto.h>
|
|
#include <asm/msr-index.h>
|
|
#include <asm/traps.h>
|
|
#include <asm/setup.h>
|
|
#include <asm/desc.h>
|
|
#include <asm/pgalloc.h>
|
|
#include <asm/pgtable.h>
|
|
#include <asm/tlbflush.h>
|
|
#include <asm/reboot.h>
|
|
#include <asm/stackprotector.h>
|
|
#include <asm/hypervisor.h>
|
|
|
|
#include "xen-ops.h"
|
|
#include "mmu.h"
|
|
#include "multicalls.h"
|
|
|
|
EXPORT_SYMBOL_GPL(hypercall_page);
|
|
|
|
DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
|
|
DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
|
|
|
|
enum xen_domain_type xen_domain_type = XEN_NATIVE;
|
|
EXPORT_SYMBOL_GPL(xen_domain_type);
|
|
|
|
unsigned long *machine_to_phys_mapping = (void *)MACH2PHYS_VIRT_START;
|
|
EXPORT_SYMBOL(machine_to_phys_mapping);
|
|
unsigned int machine_to_phys_order;
|
|
EXPORT_SYMBOL(machine_to_phys_order);
|
|
|
|
struct start_info *xen_start_info;
|
|
EXPORT_SYMBOL_GPL(xen_start_info);
|
|
|
|
struct shared_info xen_dummy_shared_info;
|
|
|
|
void *xen_initial_gdt;
|
|
|
|
RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
|
|
__read_mostly int xen_have_vector_callback;
|
|
EXPORT_SYMBOL_GPL(xen_have_vector_callback);
|
|
|
|
/*
|
|
* Point at some empty memory to start with. We map the real shared_info
|
|
* page as soon as fixmap is up and running.
|
|
*/
|
|
struct shared_info *HYPERVISOR_shared_info = (void *)&xen_dummy_shared_info;
|
|
|
|
/*
|
|
* Flag to determine whether vcpu info placement is available on all
|
|
* VCPUs. We assume it is to start with, and then set it to zero on
|
|
* the first failure. This is because it can succeed on some VCPUs
|
|
* and not others, since it can involve hypervisor memory allocation,
|
|
* or because the guest failed to guarantee all the appropriate
|
|
* constraints on all VCPUs (ie buffer can't cross a page boundary).
|
|
*
|
|
* Note that any particular CPU may be using a placed vcpu structure,
|
|
* but we can only optimise if the all are.
|
|
*
|
|
* 0: not available, 1: available
|
|
*/
|
|
static int have_vcpu_info_placement = 1;
|
|
|
|
static void clamp_max_cpus(void)
|
|
{
|
|
#ifdef CONFIG_SMP
|
|
if (setup_max_cpus > MAX_VIRT_CPUS)
|
|
setup_max_cpus = MAX_VIRT_CPUS;
|
|
#endif
|
|
}
|
|
|
|
static void xen_vcpu_setup(int cpu)
|
|
{
|
|
struct vcpu_register_vcpu_info info;
|
|
int err;
|
|
struct vcpu_info *vcpup;
|
|
|
|
BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
|
|
|
|
if (cpu < MAX_VIRT_CPUS)
|
|
per_cpu(xen_vcpu,cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
|
|
|
|
if (!have_vcpu_info_placement) {
|
|
if (cpu >= MAX_VIRT_CPUS)
|
|
clamp_max_cpus();
|
|
return;
|
|
}
|
|
|
|
vcpup = &per_cpu(xen_vcpu_info, cpu);
|
|
info.mfn = arbitrary_virt_to_mfn(vcpup);
|
|
info.offset = offset_in_page(vcpup);
|
|
|
|
/* Check to see if the hypervisor will put the vcpu_info
|
|
structure where we want it, which allows direct access via
|
|
a percpu-variable. */
|
|
err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info);
|
|
|
|
if (err) {
|
|
printk(KERN_DEBUG "register_vcpu_info failed: err=%d\n", err);
|
|
have_vcpu_info_placement = 0;
|
|
clamp_max_cpus();
|
|
} else {
|
|
/* This cpu is using the registered vcpu info, even if
|
|
later ones fail to. */
|
|
per_cpu(xen_vcpu, cpu) = vcpup;
|
|
}
|
|
}
|
|
|
|
/*
|
|
* On restore, set the vcpu placement up again.
|
|
* If it fails, then we're in a bad state, since
|
|
* we can't back out from using it...
|
|
*/
|
|
void xen_vcpu_restore(void)
|
|
{
|
|
int cpu;
|
|
|
|
for_each_online_cpu(cpu) {
|
|
bool other_cpu = (cpu != smp_processor_id());
|
|
|
|
if (other_cpu &&
|
|
HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL))
|
|
BUG();
|
|
|
|
xen_setup_runstate_info(cpu);
|
|
|
|
if (have_vcpu_info_placement)
|
|
xen_vcpu_setup(cpu);
|
|
|
|
if (other_cpu &&
|
|
HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL))
|
|
BUG();
|
|
}
|
|
}
|
|
|
|
static void __init xen_banner(void)
|
|
{
|
|
unsigned version = HYPERVISOR_xen_version(XENVER_version, NULL);
|
|
struct xen_extraversion extra;
|
|
HYPERVISOR_xen_version(XENVER_extraversion, &extra);
|
|
|
|
printk(KERN_INFO "Booting paravirtualized kernel on %s\n",
|
|
pv_info.name);
|
|
printk(KERN_INFO "Xen version: %d.%d%s%s\n",
|
|
version >> 16, version & 0xffff, extra.extraversion,
|
|
xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : "");
|
|
}
|
|
|
|
static __read_mostly unsigned int cpuid_leaf1_edx_mask = ~0;
|
|
static __read_mostly unsigned int cpuid_leaf1_ecx_mask = ~0;
|
|
|
|
static void xen_cpuid(unsigned int *ax, unsigned int *bx,
|
|
unsigned int *cx, unsigned int *dx)
|
|
{
|
|
unsigned maskebx = ~0;
|
|
unsigned maskecx = ~0;
|
|
unsigned maskedx = ~0;
|
|
|
|
/*
|
|
* Mask out inconvenient features, to try and disable as many
|
|
* unsupported kernel subsystems as possible.
|
|
*/
|
|
switch (*ax) {
|
|
case 1:
|
|
maskecx = cpuid_leaf1_ecx_mask;
|
|
maskedx = cpuid_leaf1_edx_mask;
|
|
break;
|
|
|
|
case 0xb:
|
|
/* Suppress extended topology stuff */
|
|
maskebx = 0;
|
|
break;
|
|
}
|
|
|
|
asm(XEN_EMULATE_PREFIX "cpuid"
|
|
: "=a" (*ax),
|
|
"=b" (*bx),
|
|
"=c" (*cx),
|
|
"=d" (*dx)
|
|
: "0" (*ax), "2" (*cx));
|
|
|
|
*bx &= maskebx;
|
|
*cx &= maskecx;
|
|
*dx &= maskedx;
|
|
}
|
|
|
|
static __init void xen_init_cpuid_mask(void)
|
|
{
|
|
unsigned int ax, bx, cx, dx;
|
|
|
|
cpuid_leaf1_edx_mask =
|
|
~((1 << X86_FEATURE_MCE) | /* disable MCE */
|
|
(1 << X86_FEATURE_MCA) | /* disable MCA */
|
|
(1 << X86_FEATURE_MTRR) | /* disable MTRR */
|
|
(1 << X86_FEATURE_ACC)); /* thermal monitoring */
|
|
|
|
if (!xen_initial_domain())
|
|
cpuid_leaf1_edx_mask &=
|
|
~((1 << X86_FEATURE_APIC) | /* disable local APIC */
|
|
(1 << X86_FEATURE_ACPI)); /* disable ACPI */
|
|
|
|
ax = 1;
|
|
cx = 0;
|
|
xen_cpuid(&ax, &bx, &cx, &dx);
|
|
|
|
/* cpuid claims we support xsave; try enabling it to see what happens */
|
|
if (cx & (1 << (X86_FEATURE_XSAVE % 32))) {
|
|
unsigned long cr4;
|
|
|
|
set_in_cr4(X86_CR4_OSXSAVE);
|
|
|
|
cr4 = read_cr4();
|
|
|
|
if ((cr4 & X86_CR4_OSXSAVE) == 0)
|
|
cpuid_leaf1_ecx_mask &= ~(1 << (X86_FEATURE_XSAVE % 32));
|
|
|
|
clear_in_cr4(X86_CR4_OSXSAVE);
|
|
}
|
|
}
|
|
|
|
static void xen_set_debugreg(int reg, unsigned long val)
|
|
{
|
|
HYPERVISOR_set_debugreg(reg, val);
|
|
}
|
|
|
|
static unsigned long xen_get_debugreg(int reg)
|
|
{
|
|
return HYPERVISOR_get_debugreg(reg);
|
|
}
|
|
|
|
static void xen_end_context_switch(struct task_struct *next)
|
|
{
|
|
xen_mc_flush();
|
|
paravirt_end_context_switch(next);
|
|
}
|
|
|
|
static unsigned long xen_store_tr(void)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* Set the page permissions for a particular virtual address. If the
|
|
* address is a vmalloc mapping (or other non-linear mapping), then
|
|
* find the linear mapping of the page and also set its protections to
|
|
* match.
|
|
*/
|
|
static void set_aliased_prot(void *v, pgprot_t prot)
|
|
{
|
|
int level;
|
|
pte_t *ptep;
|
|
pte_t pte;
|
|
unsigned long pfn;
|
|
struct page *page;
|
|
|
|
ptep = lookup_address((unsigned long)v, &level);
|
|
BUG_ON(ptep == NULL);
|
|
|
|
pfn = pte_pfn(*ptep);
|
|
page = pfn_to_page(pfn);
|
|
|
|
pte = pfn_pte(pfn, prot);
|
|
|
|
if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0))
|
|
BUG();
|
|
|
|
if (!PageHighMem(page)) {
|
|
void *av = __va(PFN_PHYS(pfn));
|
|
|
|
if (av != v)
|
|
if (HYPERVISOR_update_va_mapping((unsigned long)av, pte, 0))
|
|
BUG();
|
|
} else
|
|
kmap_flush_unused();
|
|
}
|
|
|
|
static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
|
|
{
|
|
const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE;
|
|
int i;
|
|
|
|
for(i = 0; i < entries; i += entries_per_page)
|
|
set_aliased_prot(ldt + i, PAGE_KERNEL_RO);
|
|
}
|
|
|
|
static void xen_free_ldt(struct desc_struct *ldt, unsigned entries)
|
|
{
|
|
const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE;
|
|
int i;
|
|
|
|
for(i = 0; i < entries; i += entries_per_page)
|
|
set_aliased_prot(ldt + i, PAGE_KERNEL);
|
|
}
|
|
|
|
static void xen_set_ldt(const void *addr, unsigned entries)
|
|
{
|
|
struct mmuext_op *op;
|
|
struct multicall_space mcs = xen_mc_entry(sizeof(*op));
|
|
|
|
op = mcs.args;
|
|
op->cmd = MMUEXT_SET_LDT;
|
|
op->arg1.linear_addr = (unsigned long)addr;
|
|
op->arg2.nr_ents = entries;
|
|
|
|
MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
|
|
|
|
xen_mc_issue(PARAVIRT_LAZY_CPU);
|
|
}
|
|
|
|
static void xen_load_gdt(const struct desc_ptr *dtr)
|
|
{
|
|
unsigned long va = dtr->address;
|
|
unsigned int size = dtr->size + 1;
|
|
unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
|
|
unsigned long frames[pages];
|
|
int f;
|
|
|
|
/*
|
|
* A GDT can be up to 64k in size, which corresponds to 8192
|
|
* 8-byte entries, or 16 4k pages..
|
|
*/
|
|
|
|
BUG_ON(size > 65536);
|
|
BUG_ON(va & ~PAGE_MASK);
|
|
|
|
for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
|
|
int level;
|
|
pte_t *ptep;
|
|
unsigned long pfn, mfn;
|
|
void *virt;
|
|
|
|
/*
|
|
* The GDT is per-cpu and is in the percpu data area.
|
|
* That can be virtually mapped, so we need to do a
|
|
* page-walk to get the underlying MFN for the
|
|
* hypercall. The page can also be in the kernel's
|
|
* linear range, so we need to RO that mapping too.
|
|
*/
|
|
ptep = lookup_address(va, &level);
|
|
BUG_ON(ptep == NULL);
|
|
|
|
pfn = pte_pfn(*ptep);
|
|
mfn = pfn_to_mfn(pfn);
|
|
virt = __va(PFN_PHYS(pfn));
|
|
|
|
frames[f] = mfn;
|
|
|
|
make_lowmem_page_readonly((void *)va);
|
|
make_lowmem_page_readonly(virt);
|
|
}
|
|
|
|
if (HYPERVISOR_set_gdt(frames, size / sizeof(struct desc_struct)))
|
|
BUG();
|
|
}
|
|
|
|
/*
|
|
* load_gdt for early boot, when the gdt is only mapped once
|
|
*/
|
|
static __init void xen_load_gdt_boot(const struct desc_ptr *dtr)
|
|
{
|
|
unsigned long va = dtr->address;
|
|
unsigned int size = dtr->size + 1;
|
|
unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
|
|
unsigned long frames[pages];
|
|
int f;
|
|
|
|
/*
|
|
* A GDT can be up to 64k in size, which corresponds to 8192
|
|
* 8-byte entries, or 16 4k pages..
|
|
*/
|
|
|
|
BUG_ON(size > 65536);
|
|
BUG_ON(va & ~PAGE_MASK);
|
|
|
|
for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
|
|
pte_t pte;
|
|
unsigned long pfn, mfn;
|
|
|
|
pfn = virt_to_pfn(va);
|
|
mfn = pfn_to_mfn(pfn);
|
|
|
|
pte = pfn_pte(pfn, PAGE_KERNEL_RO);
|
|
|
|
if (HYPERVISOR_update_va_mapping((unsigned long)va, pte, 0))
|
|
BUG();
|
|
|
|
frames[f] = mfn;
|
|
}
|
|
|
|
if (HYPERVISOR_set_gdt(frames, size / sizeof(struct desc_struct)))
|
|
BUG();
|
|
}
|
|
|
|
static void load_TLS_descriptor(struct thread_struct *t,
|
|
unsigned int cpu, unsigned int i)
|
|
{
|
|
struct desc_struct *gdt = get_cpu_gdt_table(cpu);
|
|
xmaddr_t maddr = arbitrary_virt_to_machine(&gdt[GDT_ENTRY_TLS_MIN+i]);
|
|
struct multicall_space mc = __xen_mc_entry(0);
|
|
|
|
MULTI_update_descriptor(mc.mc, maddr.maddr, t->tls_array[i]);
|
|
}
|
|
|
|
static void xen_load_tls(struct thread_struct *t, unsigned int cpu)
|
|
{
|
|
/*
|
|
* XXX sleazy hack: If we're being called in a lazy-cpu zone
|
|
* and lazy gs handling is enabled, it means we're in a
|
|
* context switch, and %gs has just been saved. This means we
|
|
* can zero it out to prevent faults on exit from the
|
|
* hypervisor if the next process has no %gs. Either way, it
|
|
* has been saved, and the new value will get loaded properly.
|
|
* This will go away as soon as Xen has been modified to not
|
|
* save/restore %gs for normal hypercalls.
|
|
*
|
|
* On x86_64, this hack is not used for %gs, because gs points
|
|
* to KERNEL_GS_BASE (and uses it for PDA references), so we
|
|
* must not zero %gs on x86_64
|
|
*
|
|
* For x86_64, we need to zero %fs, otherwise we may get an
|
|
* exception between the new %fs descriptor being loaded and
|
|
* %fs being effectively cleared at __switch_to().
|
|
*/
|
|
if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU) {
|
|
#ifdef CONFIG_X86_32
|
|
lazy_load_gs(0);
|
|
#else
|
|
loadsegment(fs, 0);
|
|
#endif
|
|
}
|
|
|
|
xen_mc_batch();
|
|
|
|
load_TLS_descriptor(t, cpu, 0);
|
|
load_TLS_descriptor(t, cpu, 1);
|
|
load_TLS_descriptor(t, cpu, 2);
|
|
|
|
xen_mc_issue(PARAVIRT_LAZY_CPU);
|
|
}
|
|
|
|
#ifdef CONFIG_X86_64
|
|
static void xen_load_gs_index(unsigned int idx)
|
|
{
|
|
if (HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL, idx))
|
|
BUG();
|
|
}
|
|
#endif
|
|
|
|
static void xen_write_ldt_entry(struct desc_struct *dt, int entrynum,
|
|
const void *ptr)
|
|
{
|
|
xmaddr_t mach_lp = arbitrary_virt_to_machine(&dt[entrynum]);
|
|
u64 entry = *(u64 *)ptr;
|
|
|
|
preempt_disable();
|
|
|
|
xen_mc_flush();
|
|
if (HYPERVISOR_update_descriptor(mach_lp.maddr, entry))
|
|
BUG();
|
|
|
|
preempt_enable();
|
|
}
|
|
|
|
static int cvt_gate_to_trap(int vector, const gate_desc *val,
|
|
struct trap_info *info)
|
|
{
|
|
unsigned long addr;
|
|
|
|
if (val->type != GATE_TRAP && val->type != GATE_INTERRUPT)
|
|
return 0;
|
|
|
|
info->vector = vector;
|
|
|
|
addr = gate_offset(*val);
|
|
#ifdef CONFIG_X86_64
|
|
/*
|
|
* Look for known traps using IST, and substitute them
|
|
* appropriately. The debugger ones are the only ones we care
|
|
* about. Xen will handle faults like double_fault and
|
|
* machine_check, so we should never see them. Warn if
|
|
* there's an unexpected IST-using fault handler.
|
|
*/
|
|
if (addr == (unsigned long)debug)
|
|
addr = (unsigned long)xen_debug;
|
|
else if (addr == (unsigned long)int3)
|
|
addr = (unsigned long)xen_int3;
|
|
else if (addr == (unsigned long)stack_segment)
|
|
addr = (unsigned long)xen_stack_segment;
|
|
else if (addr == (unsigned long)double_fault ||
|
|
addr == (unsigned long)nmi) {
|
|
/* Don't need to handle these */
|
|
return 0;
|
|
#ifdef CONFIG_X86_MCE
|
|
} else if (addr == (unsigned long)machine_check) {
|
|
return 0;
|
|
#endif
|
|
} else {
|
|
/* Some other trap using IST? */
|
|
if (WARN_ON(val->ist != 0))
|
|
return 0;
|
|
}
|
|
#endif /* CONFIG_X86_64 */
|
|
info->address = addr;
|
|
|
|
info->cs = gate_segment(*val);
|
|
info->flags = val->dpl;
|
|
/* interrupt gates clear IF */
|
|
if (val->type == GATE_INTERRUPT)
|
|
info->flags |= 1 << 2;
|
|
|
|
return 1;
|
|
}
|
|
|
|
/* Locations of each CPU's IDT */
|
|
static DEFINE_PER_CPU(struct desc_ptr, idt_desc);
|
|
|
|
/* Set an IDT entry. If the entry is part of the current IDT, then
|
|
also update Xen. */
|
|
static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g)
|
|
{
|
|
unsigned long p = (unsigned long)&dt[entrynum];
|
|
unsigned long start, end;
|
|
|
|
preempt_disable();
|
|
|
|
start = __this_cpu_read(idt_desc.address);
|
|
end = start + __this_cpu_read(idt_desc.size) + 1;
|
|
|
|
xen_mc_flush();
|
|
|
|
native_write_idt_entry(dt, entrynum, g);
|
|
|
|
if (p >= start && (p + 8) <= end) {
|
|
struct trap_info info[2];
|
|
|
|
info[1].address = 0;
|
|
|
|
if (cvt_gate_to_trap(entrynum, g, &info[0]))
|
|
if (HYPERVISOR_set_trap_table(info))
|
|
BUG();
|
|
}
|
|
|
|
preempt_enable();
|
|
}
|
|
|
|
static void xen_convert_trap_info(const struct desc_ptr *desc,
|
|
struct trap_info *traps)
|
|
{
|
|
unsigned in, out, count;
|
|
|
|
count = (desc->size+1) / sizeof(gate_desc);
|
|
BUG_ON(count > 256);
|
|
|
|
for (in = out = 0; in < count; in++) {
|
|
gate_desc *entry = (gate_desc*)(desc->address) + in;
|
|
|
|
if (cvt_gate_to_trap(in, entry, &traps[out]))
|
|
out++;
|
|
}
|
|
traps[out].address = 0;
|
|
}
|
|
|
|
void xen_copy_trap_info(struct trap_info *traps)
|
|
{
|
|
const struct desc_ptr *desc = &__get_cpu_var(idt_desc);
|
|
|
|
xen_convert_trap_info(desc, traps);
|
|
}
|
|
|
|
/* Load a new IDT into Xen. In principle this can be per-CPU, so we
|
|
hold a spinlock to protect the static traps[] array (static because
|
|
it avoids allocation, and saves stack space). */
|
|
static void xen_load_idt(const struct desc_ptr *desc)
|
|
{
|
|
static DEFINE_SPINLOCK(lock);
|
|
static struct trap_info traps[257];
|
|
|
|
spin_lock(&lock);
|
|
|
|
__get_cpu_var(idt_desc) = *desc;
|
|
|
|
xen_convert_trap_info(desc, traps);
|
|
|
|
xen_mc_flush();
|
|
if (HYPERVISOR_set_trap_table(traps))
|
|
BUG();
|
|
|
|
spin_unlock(&lock);
|
|
}
|
|
|
|
/* Write a GDT descriptor entry. Ignore LDT descriptors, since
|
|
they're handled differently. */
|
|
static void xen_write_gdt_entry(struct desc_struct *dt, int entry,
|
|
const void *desc, int type)
|
|
{
|
|
preempt_disable();
|
|
|
|
switch (type) {
|
|
case DESC_LDT:
|
|
case DESC_TSS:
|
|
/* ignore */
|
|
break;
|
|
|
|
default: {
|
|
xmaddr_t maddr = arbitrary_virt_to_machine(&dt[entry]);
|
|
|
|
xen_mc_flush();
|
|
if (HYPERVISOR_update_descriptor(maddr.maddr, *(u64 *)desc))
|
|
BUG();
|
|
}
|
|
|
|
}
|
|
|
|
preempt_enable();
|
|
}
|
|
|
|
/*
|
|
* Version of write_gdt_entry for use at early boot-time needed to
|
|
* update an entry as simply as possible.
|
|
*/
|
|
static __init void xen_write_gdt_entry_boot(struct desc_struct *dt, int entry,
|
|
const void *desc, int type)
|
|
{
|
|
switch (type) {
|
|
case DESC_LDT:
|
|
case DESC_TSS:
|
|
/* ignore */
|
|
break;
|
|
|
|
default: {
|
|
xmaddr_t maddr = virt_to_machine(&dt[entry]);
|
|
|
|
if (HYPERVISOR_update_descriptor(maddr.maddr, *(u64 *)desc))
|
|
dt[entry] = *(struct desc_struct *)desc;
|
|
}
|
|
|
|
}
|
|
}
|
|
|
|
static void xen_load_sp0(struct tss_struct *tss,
|
|
struct thread_struct *thread)
|
|
{
|
|
struct multicall_space mcs = xen_mc_entry(0);
|
|
MULTI_stack_switch(mcs.mc, __KERNEL_DS, thread->sp0);
|
|
xen_mc_issue(PARAVIRT_LAZY_CPU);
|
|
}
|
|
|
|
static void xen_set_iopl_mask(unsigned mask)
|
|
{
|
|
struct physdev_set_iopl set_iopl;
|
|
|
|
/* Force the change at ring 0. */
|
|
set_iopl.iopl = (mask == 0) ? 1 : (mask >> 12) & 3;
|
|
HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
|
|
}
|
|
|
|
static void xen_io_delay(void)
|
|
{
|
|
}
|
|
|
|
#ifdef CONFIG_X86_LOCAL_APIC
|
|
static u32 xen_apic_read(u32 reg)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static void xen_apic_write(u32 reg, u32 val)
|
|
{
|
|
/* Warn to see if there's any stray references */
|
|
WARN_ON(1);
|
|
}
|
|
|
|
static u64 xen_apic_icr_read(void)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static void xen_apic_icr_write(u32 low, u32 id)
|
|
{
|
|
/* Warn to see if there's any stray references */
|
|
WARN_ON(1);
|
|
}
|
|
|
|
static void xen_apic_wait_icr_idle(void)
|
|
{
|
|
return;
|
|
}
|
|
|
|
static u32 xen_safe_apic_wait_icr_idle(void)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static void set_xen_basic_apic_ops(void)
|
|
{
|
|
apic->read = xen_apic_read;
|
|
apic->write = xen_apic_write;
|
|
apic->icr_read = xen_apic_icr_read;
|
|
apic->icr_write = xen_apic_icr_write;
|
|
apic->wait_icr_idle = xen_apic_wait_icr_idle;
|
|
apic->safe_wait_icr_idle = xen_safe_apic_wait_icr_idle;
|
|
}
|
|
|
|
#endif
|
|
|
|
static void xen_clts(void)
|
|
{
|
|
struct multicall_space mcs;
|
|
|
|
mcs = xen_mc_entry(0);
|
|
|
|
MULTI_fpu_taskswitch(mcs.mc, 0);
|
|
|
|
xen_mc_issue(PARAVIRT_LAZY_CPU);
|
|
}
|
|
|
|
static DEFINE_PER_CPU(unsigned long, xen_cr0_value);
|
|
|
|
static unsigned long xen_read_cr0(void)
|
|
{
|
|
unsigned long cr0 = percpu_read(xen_cr0_value);
|
|
|
|
if (unlikely(cr0 == 0)) {
|
|
cr0 = native_read_cr0();
|
|
percpu_write(xen_cr0_value, cr0);
|
|
}
|
|
|
|
return cr0;
|
|
}
|
|
|
|
static void xen_write_cr0(unsigned long cr0)
|
|
{
|
|
struct multicall_space mcs;
|
|
|
|
percpu_write(xen_cr0_value, cr0);
|
|
|
|
/* Only pay attention to cr0.TS; everything else is
|
|
ignored. */
|
|
mcs = xen_mc_entry(0);
|
|
|
|
MULTI_fpu_taskswitch(mcs.mc, (cr0 & X86_CR0_TS) != 0);
|
|
|
|
xen_mc_issue(PARAVIRT_LAZY_CPU);
|
|
}
|
|
|
|
static void xen_write_cr4(unsigned long cr4)
|
|
{
|
|
cr4 &= ~X86_CR4_PGE;
|
|
cr4 &= ~X86_CR4_PSE;
|
|
|
|
native_write_cr4(cr4);
|
|
}
|
|
|
|
static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
|
|
{
|
|
int ret;
|
|
|
|
ret = 0;
|
|
|
|
switch (msr) {
|
|
#ifdef CONFIG_X86_64
|
|
unsigned which;
|
|
u64 base;
|
|
|
|
case MSR_FS_BASE: which = SEGBASE_FS; goto set;
|
|
case MSR_KERNEL_GS_BASE: which = SEGBASE_GS_USER; goto set;
|
|
case MSR_GS_BASE: which = SEGBASE_GS_KERNEL; goto set;
|
|
|
|
set:
|
|
base = ((u64)high << 32) | low;
|
|
if (HYPERVISOR_set_segment_base(which, base) != 0)
|
|
ret = -EIO;
|
|
break;
|
|
#endif
|
|
|
|
case MSR_STAR:
|
|
case MSR_CSTAR:
|
|
case MSR_LSTAR:
|
|
case MSR_SYSCALL_MASK:
|
|
case MSR_IA32_SYSENTER_CS:
|
|
case MSR_IA32_SYSENTER_ESP:
|
|
case MSR_IA32_SYSENTER_EIP:
|
|
/* Fast syscall setup is all done in hypercalls, so
|
|
these are all ignored. Stub them out here to stop
|
|
Xen console noise. */
|
|
break;
|
|
|
|
case MSR_IA32_CR_PAT:
|
|
if (smp_processor_id() == 0)
|
|
xen_set_pat(((u64)high << 32) | low);
|
|
break;
|
|
|
|
default:
|
|
ret = native_write_msr_safe(msr, low, high);
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
void xen_setup_shared_info(void)
|
|
{
|
|
if (!xen_feature(XENFEAT_auto_translated_physmap)) {
|
|
set_fixmap(FIX_PARAVIRT_BOOTMAP,
|
|
xen_start_info->shared_info);
|
|
|
|
HYPERVISOR_shared_info =
|
|
(struct shared_info *)fix_to_virt(FIX_PARAVIRT_BOOTMAP);
|
|
} else
|
|
HYPERVISOR_shared_info =
|
|
(struct shared_info *)__va(xen_start_info->shared_info);
|
|
|
|
#ifndef CONFIG_SMP
|
|
/* In UP this is as good a place as any to set up shared info */
|
|
xen_setup_vcpu_info_placement();
|
|
#endif
|
|
|
|
xen_setup_mfn_list_list();
|
|
}
|
|
|
|
/* This is called once we have the cpu_possible_map */
|
|
void xen_setup_vcpu_info_placement(void)
|
|
{
|
|
int cpu;
|
|
|
|
for_each_possible_cpu(cpu)
|
|
xen_vcpu_setup(cpu);
|
|
|
|
/* xen_vcpu_setup managed to place the vcpu_info within the
|
|
percpu area for all cpus, so make use of it */
|
|
if (have_vcpu_info_placement) {
|
|
pv_irq_ops.save_fl = __PV_IS_CALLEE_SAVE(xen_save_fl_direct);
|
|
pv_irq_ops.restore_fl = __PV_IS_CALLEE_SAVE(xen_restore_fl_direct);
|
|
pv_irq_ops.irq_disable = __PV_IS_CALLEE_SAVE(xen_irq_disable_direct);
|
|
pv_irq_ops.irq_enable = __PV_IS_CALLEE_SAVE(xen_irq_enable_direct);
|
|
pv_mmu_ops.read_cr2 = xen_read_cr2_direct;
|
|
}
|
|
}
|
|
|
|
static unsigned xen_patch(u8 type, u16 clobbers, void *insnbuf,
|
|
unsigned long addr, unsigned len)
|
|
{
|
|
char *start, *end, *reloc;
|
|
unsigned ret;
|
|
|
|
start = end = reloc = NULL;
|
|
|
|
#define SITE(op, x) \
|
|
case PARAVIRT_PATCH(op.x): \
|
|
if (have_vcpu_info_placement) { \
|
|
start = (char *)xen_##x##_direct; \
|
|
end = xen_##x##_direct_end; \
|
|
reloc = xen_##x##_direct_reloc; \
|
|
} \
|
|
goto patch_site
|
|
|
|
switch (type) {
|
|
SITE(pv_irq_ops, irq_enable);
|
|
SITE(pv_irq_ops, irq_disable);
|
|
SITE(pv_irq_ops, save_fl);
|
|
SITE(pv_irq_ops, restore_fl);
|
|
#undef SITE
|
|
|
|
patch_site:
|
|
if (start == NULL || (end-start) > len)
|
|
goto default_patch;
|
|
|
|
ret = paravirt_patch_insns(insnbuf, len, start, end);
|
|
|
|
/* Note: because reloc is assigned from something that
|
|
appears to be an array, gcc assumes it's non-null,
|
|
but doesn't know its relationship with start and
|
|
end. */
|
|
if (reloc > start && reloc < end) {
|
|
int reloc_off = reloc - start;
|
|
long *relocp = (long *)(insnbuf + reloc_off);
|
|
long delta = start - (char *)addr;
|
|
|
|
*relocp += delta;
|
|
}
|
|
break;
|
|
|
|
default_patch:
|
|
default:
|
|
ret = paravirt_patch_default(type, clobbers, insnbuf,
|
|
addr, len);
|
|
break;
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
static const struct pv_info xen_info __initdata = {
|
|
.paravirt_enabled = 1,
|
|
.shared_kernel_pmd = 0,
|
|
|
|
.name = "Xen",
|
|
};
|
|
|
|
static const struct pv_init_ops xen_init_ops __initdata = {
|
|
.patch = xen_patch,
|
|
};
|
|
|
|
static const struct pv_cpu_ops xen_cpu_ops __initdata = {
|
|
.cpuid = xen_cpuid,
|
|
|
|
.set_debugreg = xen_set_debugreg,
|
|
.get_debugreg = xen_get_debugreg,
|
|
|
|
.clts = xen_clts,
|
|
|
|
.read_cr0 = xen_read_cr0,
|
|
.write_cr0 = xen_write_cr0,
|
|
|
|
.read_cr4 = native_read_cr4,
|
|
.read_cr4_safe = native_read_cr4_safe,
|
|
.write_cr4 = xen_write_cr4,
|
|
|
|
.wbinvd = native_wbinvd,
|
|
|
|
.read_msr = native_read_msr_safe,
|
|
.write_msr = xen_write_msr_safe,
|
|
.read_tsc = native_read_tsc,
|
|
.read_pmc = native_read_pmc,
|
|
|
|
.iret = xen_iret,
|
|
.irq_enable_sysexit = xen_sysexit,
|
|
#ifdef CONFIG_X86_64
|
|
.usergs_sysret32 = xen_sysret32,
|
|
.usergs_sysret64 = xen_sysret64,
|
|
#endif
|
|
|
|
.load_tr_desc = paravirt_nop,
|
|
.set_ldt = xen_set_ldt,
|
|
.load_gdt = xen_load_gdt,
|
|
.load_idt = xen_load_idt,
|
|
.load_tls = xen_load_tls,
|
|
#ifdef CONFIG_X86_64
|
|
.load_gs_index = xen_load_gs_index,
|
|
#endif
|
|
|
|
.alloc_ldt = xen_alloc_ldt,
|
|
.free_ldt = xen_free_ldt,
|
|
|
|
.store_gdt = native_store_gdt,
|
|
.store_idt = native_store_idt,
|
|
.store_tr = xen_store_tr,
|
|
|
|
.write_ldt_entry = xen_write_ldt_entry,
|
|
.write_gdt_entry = xen_write_gdt_entry,
|
|
.write_idt_entry = xen_write_idt_entry,
|
|
.load_sp0 = xen_load_sp0,
|
|
|
|
.set_iopl_mask = xen_set_iopl_mask,
|
|
.io_delay = xen_io_delay,
|
|
|
|
/* Xen takes care of %gs when switching to usermode for us */
|
|
.swapgs = paravirt_nop,
|
|
|
|
.start_context_switch = paravirt_start_context_switch,
|
|
.end_context_switch = xen_end_context_switch,
|
|
};
|
|
|
|
static const struct pv_apic_ops xen_apic_ops __initdata = {
|
|
#ifdef CONFIG_X86_LOCAL_APIC
|
|
.startup_ipi_hook = paravirt_nop,
|
|
#endif
|
|
};
|
|
|
|
static void xen_reboot(int reason)
|
|
{
|
|
struct sched_shutdown r = { .reason = reason };
|
|
|
|
if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
|
|
BUG();
|
|
}
|
|
|
|
static void xen_restart(char *msg)
|
|
{
|
|
xen_reboot(SHUTDOWN_reboot);
|
|
}
|
|
|
|
static void xen_emergency_restart(void)
|
|
{
|
|
xen_reboot(SHUTDOWN_reboot);
|
|
}
|
|
|
|
static void xen_machine_halt(void)
|
|
{
|
|
xen_reboot(SHUTDOWN_poweroff);
|
|
}
|
|
|
|
static void xen_crash_shutdown(struct pt_regs *regs)
|
|
{
|
|
xen_reboot(SHUTDOWN_crash);
|
|
}
|
|
|
|
static int
|
|
xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
|
|
{
|
|
xen_reboot(SHUTDOWN_crash);
|
|
return NOTIFY_DONE;
|
|
}
|
|
|
|
static struct notifier_block xen_panic_block = {
|
|
.notifier_call= xen_panic_event,
|
|
};
|
|
|
|
int xen_panic_handler_init(void)
|
|
{
|
|
atomic_notifier_chain_register(&panic_notifier_list, &xen_panic_block);
|
|
return 0;
|
|
}
|
|
|
|
static const struct machine_ops __initdata xen_machine_ops = {
|
|
.restart = xen_restart,
|
|
.halt = xen_machine_halt,
|
|
.power_off = xen_machine_halt,
|
|
.shutdown = xen_machine_halt,
|
|
.crash_shutdown = xen_crash_shutdown,
|
|
.emergency_restart = xen_emergency_restart,
|
|
};
|
|
|
|
/*
|
|
* Set up the GDT and segment registers for -fstack-protector. Until
|
|
* we do this, we have to be careful not to call any stack-protected
|
|
* function, which is most of the kernel.
|
|
*/
|
|
static void __init xen_setup_stackprotector(void)
|
|
{
|
|
pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry_boot;
|
|
pv_cpu_ops.load_gdt = xen_load_gdt_boot;
|
|
|
|
setup_stack_canary_segment(0);
|
|
switch_to_new_gdt(0);
|
|
|
|
pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry;
|
|
pv_cpu_ops.load_gdt = xen_load_gdt;
|
|
}
|
|
|
|
/* First C function to be called on Xen boot */
|
|
asmlinkage void __init xen_start_kernel(void)
|
|
{
|
|
struct physdev_set_iopl set_iopl;
|
|
int rc;
|
|
pgd_t *pgd;
|
|
|
|
if (!xen_start_info)
|
|
return;
|
|
|
|
xen_domain_type = XEN_PV_DOMAIN;
|
|
|
|
xen_setup_machphys_mapping();
|
|
|
|
/* Install Xen paravirt ops */
|
|
pv_info = xen_info;
|
|
pv_init_ops = xen_init_ops;
|
|
pv_cpu_ops = xen_cpu_ops;
|
|
pv_apic_ops = xen_apic_ops;
|
|
|
|
x86_init.resources.memory_setup = xen_memory_setup;
|
|
x86_init.oem.arch_setup = xen_arch_setup;
|
|
x86_init.oem.banner = xen_banner;
|
|
|
|
xen_init_time_ops();
|
|
|
|
/*
|
|
* Set up some pagetable state before starting to set any ptes.
|
|
*/
|
|
|
|
xen_init_mmu_ops();
|
|
|
|
/* Prevent unwanted bits from being set in PTEs. */
|
|
__supported_pte_mask &= ~_PAGE_GLOBAL;
|
|
if (!xen_initial_domain())
|
|
__supported_pte_mask &= ~(_PAGE_PWT | _PAGE_PCD);
|
|
|
|
__supported_pte_mask |= _PAGE_IOMAP;
|
|
|
|
/*
|
|
* Prevent page tables from being allocated in highmem, even
|
|
* if CONFIG_HIGHPTE is enabled.
|
|
*/
|
|
__userpte_alloc_gfp &= ~__GFP_HIGHMEM;
|
|
|
|
/* Work out if we support NX */
|
|
x86_configure_nx();
|
|
|
|
xen_setup_features();
|
|
|
|
/* Get mfn list */
|
|
if (!xen_feature(XENFEAT_auto_translated_physmap))
|
|
xen_build_dynamic_phys_to_machine();
|
|
|
|
/*
|
|
* Set up kernel GDT and segment registers, mainly so that
|
|
* -fstack-protector code can be executed.
|
|
*/
|
|
xen_setup_stackprotector();
|
|
|
|
xen_init_irq_ops();
|
|
xen_init_cpuid_mask();
|
|
|
|
#ifdef CONFIG_X86_LOCAL_APIC
|
|
/*
|
|
* set up the basic apic ops.
|
|
*/
|
|
set_xen_basic_apic_ops();
|
|
#endif
|
|
|
|
if (xen_feature(XENFEAT_mmu_pt_update_preserve_ad)) {
|
|
pv_mmu_ops.ptep_modify_prot_start = xen_ptep_modify_prot_start;
|
|
pv_mmu_ops.ptep_modify_prot_commit = xen_ptep_modify_prot_commit;
|
|
}
|
|
|
|
machine_ops = xen_machine_ops;
|
|
|
|
/*
|
|
* The only reliable way to retain the initial address of the
|
|
* percpu gdt_page is to remember it here, so we can go and
|
|
* mark it RW later, when the initial percpu area is freed.
|
|
*/
|
|
xen_initial_gdt = &per_cpu(gdt_page, 0);
|
|
|
|
xen_smp_init();
|
|
|
|
#ifdef CONFIG_ACPI_NUMA
|
|
/*
|
|
* The pages we from Xen are not related to machine pages, so
|
|
* any NUMA information the kernel tries to get from ACPI will
|
|
* be meaningless. Prevent it from trying.
|
|
*/
|
|
acpi_numa = -1;
|
|
#endif
|
|
|
|
pgd = (pgd_t *)xen_start_info->pt_base;
|
|
|
|
if (!xen_initial_domain())
|
|
__supported_pte_mask &= ~(_PAGE_PWT | _PAGE_PCD);
|
|
|
|
__supported_pte_mask |= _PAGE_IOMAP;
|
|
/* Don't do the full vcpu_info placement stuff until we have a
|
|
possible map and a non-dummy shared_info. */
|
|
per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0];
|
|
|
|
local_irq_disable();
|
|
early_boot_irqs_disabled = true;
|
|
|
|
memblock_init();
|
|
|
|
xen_raw_console_write("mapping kernel into physical memory\n");
|
|
pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages);
|
|
xen_ident_map_ISA();
|
|
|
|
/* Allocate and initialize top and mid mfn levels for p2m structure */
|
|
xen_build_mfn_list_list();
|
|
|
|
/* keep using Xen gdt for now; no urgent need to change it */
|
|
|
|
#ifdef CONFIG_X86_32
|
|
pv_info.kernel_rpl = 1;
|
|
if (xen_feature(XENFEAT_supervisor_mode_kernel))
|
|
pv_info.kernel_rpl = 0;
|
|
#else
|
|
pv_info.kernel_rpl = 0;
|
|
#endif
|
|
/* set the limit of our address space */
|
|
xen_reserve_top();
|
|
|
|
/* We used to do this in xen_arch_setup, but that is too late on AMD
|
|
* were early_cpu_init (run before ->arch_setup()) calls early_amd_init
|
|
* which pokes 0xcf8 port.
|
|
*/
|
|
set_iopl.iopl = 1;
|
|
rc = HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
|
|
if (rc != 0)
|
|
xen_raw_printk("physdev_op failed %d\n", rc);
|
|
|
|
#ifdef CONFIG_X86_32
|
|
/* set up basic CPUID stuff */
|
|
cpu_detect(&new_cpu_data);
|
|
new_cpu_data.hard_math = 1;
|
|
new_cpu_data.wp_works_ok = 1;
|
|
new_cpu_data.x86_capability[0] = cpuid_edx(1);
|
|
#endif
|
|
|
|
/* Poke various useful things into boot_params */
|
|
boot_params.hdr.type_of_loader = (9 << 4) | 0;
|
|
boot_params.hdr.ramdisk_image = xen_start_info->mod_start
|
|
? __pa(xen_start_info->mod_start) : 0;
|
|
boot_params.hdr.ramdisk_size = xen_start_info->mod_len;
|
|
boot_params.hdr.cmd_line_ptr = __pa(xen_start_info->cmd_line);
|
|
|
|
if (!xen_initial_domain()) {
|
|
add_preferred_console("xenboot", 0, NULL);
|
|
add_preferred_console("tty", 0, NULL);
|
|
add_preferred_console("hvc", 0, NULL);
|
|
if (pci_xen)
|
|
x86_init.pci.arch_init = pci_xen_init;
|
|
} else {
|
|
/* Make sure ACS will be enabled */
|
|
pci_request_acs();
|
|
}
|
|
|
|
|
|
xen_raw_console_write("about to get started...\n");
|
|
|
|
xen_setup_runstate_info(0);
|
|
|
|
/* Start the world */
|
|
#ifdef CONFIG_X86_32
|
|
i386_start_kernel();
|
|
#else
|
|
x86_64_start_reservations((char *)__pa_symbol(&boot_params));
|
|
#endif
|
|
}
|
|
|
|
static int init_hvm_pv_info(int *major, int *minor)
|
|
{
|
|
uint32_t eax, ebx, ecx, edx, pages, msr, base;
|
|
u64 pfn;
|
|
|
|
base = xen_cpuid_base();
|
|
cpuid(base + 1, &eax, &ebx, &ecx, &edx);
|
|
|
|
*major = eax >> 16;
|
|
*minor = eax & 0xffff;
|
|
printk(KERN_INFO "Xen version %d.%d.\n", *major, *minor);
|
|
|
|
cpuid(base + 2, &pages, &msr, &ecx, &edx);
|
|
|
|
pfn = __pa(hypercall_page);
|
|
wrmsr_safe(msr, (u32)pfn, (u32)(pfn >> 32));
|
|
|
|
xen_setup_features();
|
|
|
|
pv_info = xen_info;
|
|
pv_info.kernel_rpl = 0;
|
|
|
|
xen_domain_type = XEN_HVM_DOMAIN;
|
|
|
|
return 0;
|
|
}
|
|
|
|
void xen_hvm_init_shared_info(void)
|
|
{
|
|
int cpu;
|
|
struct xen_add_to_physmap xatp;
|
|
static struct shared_info *shared_info_page = 0;
|
|
|
|
if (!shared_info_page)
|
|
shared_info_page = (struct shared_info *)
|
|
extend_brk(PAGE_SIZE, PAGE_SIZE);
|
|
xatp.domid = DOMID_SELF;
|
|
xatp.idx = 0;
|
|
xatp.space = XENMAPSPACE_shared_info;
|
|
xatp.gpfn = __pa(shared_info_page) >> PAGE_SHIFT;
|
|
if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
|
|
BUG();
|
|
|
|
HYPERVISOR_shared_info = (struct shared_info *)shared_info_page;
|
|
|
|
/* xen_vcpu is a pointer to the vcpu_info struct in the shared_info
|
|
* page, we use it in the event channel upcall and in some pvclock
|
|
* related functions. We don't need the vcpu_info placement
|
|
* optimizations because we don't use any pv_mmu or pv_irq op on
|
|
* HVM.
|
|
* When xen_hvm_init_shared_info is run at boot time only vcpu 0 is
|
|
* online but xen_hvm_init_shared_info is run at resume time too and
|
|
* in that case multiple vcpus might be online. */
|
|
for_each_online_cpu(cpu) {
|
|
per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
|
|
}
|
|
}
|
|
|
|
#ifdef CONFIG_XEN_PVHVM
|
|
static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self,
|
|
unsigned long action, void *hcpu)
|
|
{
|
|
int cpu = (long)hcpu;
|
|
switch (action) {
|
|
case CPU_UP_PREPARE:
|
|
per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
|
|
break;
|
|
default:
|
|
break;
|
|
}
|
|
return NOTIFY_OK;
|
|
}
|
|
|
|
static struct notifier_block __cpuinitdata xen_hvm_cpu_notifier = {
|
|
.notifier_call = xen_hvm_cpu_notify,
|
|
};
|
|
|
|
static void __init xen_hvm_guest_init(void)
|
|
{
|
|
int r;
|
|
int major, minor;
|
|
|
|
r = init_hvm_pv_info(&major, &minor);
|
|
if (r < 0)
|
|
return;
|
|
|
|
xen_hvm_init_shared_info();
|
|
|
|
if (xen_feature(XENFEAT_hvm_callback_vector))
|
|
xen_have_vector_callback = 1;
|
|
register_cpu_notifier(&xen_hvm_cpu_notifier);
|
|
xen_unplug_emulated_devices();
|
|
have_vcpu_info_placement = 0;
|
|
x86_init.irqs.intr_init = xen_init_IRQ;
|
|
xen_hvm_init_time_ops();
|
|
xen_hvm_init_mmu_ops();
|
|
}
|
|
|
|
static bool __init xen_hvm_platform(void)
|
|
{
|
|
if (xen_pv_domain())
|
|
return false;
|
|
|
|
if (!xen_cpuid_base())
|
|
return false;
|
|
|
|
return true;
|
|
}
|
|
|
|
bool xen_hvm_need_lapic(void)
|
|
{
|
|
if (xen_pv_domain())
|
|
return false;
|
|
if (!xen_hvm_domain())
|
|
return false;
|
|
if (xen_feature(XENFEAT_hvm_pirqs) && xen_have_vector_callback)
|
|
return false;
|
|
return true;
|
|
}
|
|
EXPORT_SYMBOL_GPL(xen_hvm_need_lapic);
|
|
|
|
const __refconst struct hypervisor_x86 x86_hyper_xen_hvm = {
|
|
.name = "Xen HVM",
|
|
.detect = xen_hvm_platform,
|
|
.init_platform = xen_hvm_guest_init,
|
|
};
|
|
EXPORT_SYMBOL(x86_hyper_xen_hvm);
|
|
#endif
|