forked from Minki/linux
xen64: add 64-bit assembler
Split xen-asm into 32- and 64-bit files, and implement the 64-bit variants. Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Cc: Stephen Tweedie <sct@redhat.com> Cc: Eduardo Habkost <ehabkost@redhat.com> Cc: Mark McLoughlin <markmc@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
555cf2b580
commit
cdacc1278b
@ -1,4 +1,4 @@
|
|||||||
obj-y := enlighten.o setup.o multicalls.o mmu.o \
|
obj-y := enlighten.o setup.o multicalls.o mmu.o \
|
||||||
time.o xen-asm.o grant-table.o suspend.o
|
time.o xen-asm_$(BITS).o grant-table.o suspend.o
|
||||||
|
|
||||||
obj-$(CONFIG_SMP) += smp.o
|
obj-$(CONFIG_SMP) += smp.o
|
||||||
|
141
arch/x86/xen/xen-asm_64.S
Normal file
141
arch/x86/xen/xen-asm_64.S
Normal file
@ -0,0 +1,141 @@
|
|||||||
|
/*
|
||||||
|
Asm versions of Xen pv-ops, suitable for either direct use or inlining.
|
||||||
|
The inline versions are the same as the direct-use versions, with the
|
||||||
|
pre- and post-amble chopped off.
|
||||||
|
|
||||||
|
This code is encoded for size rather than absolute efficiency,
|
||||||
|
with a view to being able to inline as much as possible.
|
||||||
|
|
||||||
|
We only bother with direct forms (ie, vcpu in pda) of the operations
|
||||||
|
here; the indirect forms are better handled in C, since they're
|
||||||
|
generally too large to inline anyway.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <linux/linkage.h>
|
||||||
|
|
||||||
|
#include <asm/asm-offsets.h>
|
||||||
|
#include <asm/processor-flags.h>
|
||||||
|
|
||||||
|
#include <xen/interface/xen.h>
|
||||||
|
|
||||||
|
#define RELOC(x, v) .globl x##_reloc; x##_reloc=v
|
||||||
|
#define ENDPATCH(x) .globl x##_end; x##_end=.
|
||||||
|
|
||||||
|
/* Pseudo-flag used for virtual NMI, which we don't implement yet */
|
||||||
|
#define XEN_EFLAGS_NMI 0x80000000
|
||||||
|
|
||||||
|
#if 0
|
||||||
|
#include <asm/percpu.h>
|
||||||
|
|
||||||
|
/*
|
||||||
|
Enable events. This clears the event mask and tests the pending
|
||||||
|
event status with one and operation. If there are pending
|
||||||
|
events, then enter the hypervisor to get them handled.
|
||||||
|
*/
|
||||||
|
ENTRY(xen_irq_enable_direct)
|
||||||
|
/* Unmask events */
|
||||||
|
movb $0, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask)
|
||||||
|
|
||||||
|
/* Preempt here doesn't matter because that will deal with
|
||||||
|
any pending interrupts. The pending check may end up being
|
||||||
|
run on the wrong CPU, but that doesn't hurt. */
|
||||||
|
|
||||||
|
/* Test for pending */
|
||||||
|
testb $0xff, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_pending)
|
||||||
|
jz 1f
|
||||||
|
|
||||||
|
2: call check_events
|
||||||
|
1:
|
||||||
|
ENDPATCH(xen_irq_enable_direct)
|
||||||
|
ret
|
||||||
|
ENDPROC(xen_irq_enable_direct)
|
||||||
|
RELOC(xen_irq_enable_direct, 2b+1)
|
||||||
|
|
||||||
|
/*
|
||||||
|
Disabling events is simply a matter of making the event mask
|
||||||
|
non-zero.
|
||||||
|
*/
|
||||||
|
ENTRY(xen_irq_disable_direct)
|
||||||
|
movb $1, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask)
|
||||||
|
ENDPATCH(xen_irq_disable_direct)
|
||||||
|
ret
|
||||||
|
ENDPROC(xen_irq_disable_direct)
|
||||||
|
RELOC(xen_irq_disable_direct, 0)
|
||||||
|
|
||||||
|
/*
|
||||||
|
(xen_)save_fl is used to get the current interrupt enable status.
|
||||||
|
Callers expect the status to be in X86_EFLAGS_IF, and other bits
|
||||||
|
may be set in the return value. We take advantage of this by
|
||||||
|
making sure that X86_EFLAGS_IF has the right value (and other bits
|
||||||
|
in that byte are 0), but other bits in the return value are
|
||||||
|
undefined. We need to toggle the state of the bit, because
|
||||||
|
Xen and x86 use opposite senses (mask vs enable).
|
||||||
|
*/
|
||||||
|
ENTRY(xen_save_fl_direct)
|
||||||
|
testb $0xff, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask)
|
||||||
|
setz %ah
|
||||||
|
addb %ah,%ah
|
||||||
|
ENDPATCH(xen_save_fl_direct)
|
||||||
|
ret
|
||||||
|
ENDPROC(xen_save_fl_direct)
|
||||||
|
RELOC(xen_save_fl_direct, 0)
|
||||||
|
|
||||||
|
/*
|
||||||
|
In principle the caller should be passing us a value return
|
||||||
|
from xen_save_fl_direct, but for robustness sake we test only
|
||||||
|
the X86_EFLAGS_IF flag rather than the whole byte. After
|
||||||
|
setting the interrupt mask state, it checks for unmasked
|
||||||
|
pending events and enters the hypervisor to get them delivered
|
||||||
|
if so.
|
||||||
|
*/
|
||||||
|
ENTRY(xen_restore_fl_direct)
|
||||||
|
testb $X86_EFLAGS_IF>>8, %ah
|
||||||
|
setz PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask)
|
||||||
|
/* Preempt here doesn't matter because that will deal with
|
||||||
|
any pending interrupts. The pending check may end up being
|
||||||
|
run on the wrong CPU, but that doesn't hurt. */
|
||||||
|
|
||||||
|
/* check for unmasked and pending */
|
||||||
|
cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_pending)
|
||||||
|
jz 1f
|
||||||
|
2: call check_events
|
||||||
|
1:
|
||||||
|
ENDPATCH(xen_restore_fl_direct)
|
||||||
|
ret
|
||||||
|
ENDPROC(xen_restore_fl_direct)
|
||||||
|
RELOC(xen_restore_fl_direct, 2b+1)
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
Force an event check by making a hypercall,
|
||||||
|
but preserve regs before making the call.
|
||||||
|
*/
|
||||||
|
check_events:
|
||||||
|
push %rax
|
||||||
|
push %rcx
|
||||||
|
push %rdx
|
||||||
|
push %rsi
|
||||||
|
push %rdi
|
||||||
|
push %r8
|
||||||
|
push %r9
|
||||||
|
push %r10
|
||||||
|
push %r11
|
||||||
|
call force_evtchn_callback
|
||||||
|
pop %r11
|
||||||
|
pop %r10
|
||||||
|
pop %r9
|
||||||
|
pop %r8
|
||||||
|
pop %rdi
|
||||||
|
pop %rsi
|
||||||
|
pop %rdx
|
||||||
|
pop %rcx
|
||||||
|
pop %rax
|
||||||
|
ret
|
||||||
|
#endif
|
||||||
|
|
||||||
|
ENTRY(xen_iret)
|
||||||
|
pushq $0
|
||||||
|
jmp hypercall_page + __HYPERVISOR_iret * 32
|
||||||
|
|
||||||
|
ENTRY(xen_sysexit)
|
||||||
|
ud2a
|
Loading…
Reference in New Issue
Block a user