x86/xen: Make save_fl() noinstr
vmlinux.o: warning: objtool: pv_ops[30]: native_save_fl vmlinux.o: warning: objtool: pv_ops[30]: __raw_callee_save_xen_save_fl vmlinux.o: warning: objtool: pv_ops[30]: xen_save_fl_direct vmlinux.o: warning: objtool: lockdep_hardirqs_off()+0x73: call to pv_ops[30]() leaves .noinstr.text section Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Juergen Gross <jgross@suse.com> Link: https://lore.kernel.org/r/20210624095148.749712274@infradead.org
This commit is contained in:
@@ -653,10 +653,10 @@ bool __raw_callee_save___native_vcpu_is_preempted(long cpu);
|
|||||||
* functions.
|
* functions.
|
||||||
*/
|
*/
|
||||||
#define PV_THUNK_NAME(func) "__raw_callee_save_" #func
|
#define PV_THUNK_NAME(func) "__raw_callee_save_" #func
|
||||||
#define PV_CALLEE_SAVE_REGS_THUNK(func) \
|
#define __PV_CALLEE_SAVE_REGS_THUNK(func, section) \
|
||||||
extern typeof(func) __raw_callee_save_##func; \
|
extern typeof(func) __raw_callee_save_##func; \
|
||||||
\
|
\
|
||||||
asm(".pushsection .text;" \
|
asm(".pushsection " section ", \"ax\";" \
|
||||||
".globl " PV_THUNK_NAME(func) ";" \
|
".globl " PV_THUNK_NAME(func) ";" \
|
||||||
".type " PV_THUNK_NAME(func) ", @function;" \
|
".type " PV_THUNK_NAME(func) ", @function;" \
|
||||||
PV_THUNK_NAME(func) ":" \
|
PV_THUNK_NAME(func) ":" \
|
||||||
@@ -669,6 +669,9 @@ bool __raw_callee_save___native_vcpu_is_preempted(long cpu);
|
|||||||
".size " PV_THUNK_NAME(func) ", .-" PV_THUNK_NAME(func) ";" \
|
".size " PV_THUNK_NAME(func) ", .-" PV_THUNK_NAME(func) ";" \
|
||||||
".popsection")
|
".popsection")
|
||||||
|
|
||||||
|
#define PV_CALLEE_SAVE_REGS_THUNK(func) \
|
||||||
|
__PV_CALLEE_SAVE_REGS_THUNK(func, ".text")
|
||||||
|
|
||||||
/* Get a reference to a callee-save function */
|
/* Get a reference to a callee-save function */
|
||||||
#define PV_CALLEE_SAVE(func) \
|
#define PV_CALLEE_SAVE(func) \
|
||||||
((struct paravirt_callee_save) { __raw_callee_save_##func })
|
((struct paravirt_callee_save) { __raw_callee_save_##func })
|
||||||
|
|||||||
@@ -7,9 +7,11 @@
|
|||||||
/*
|
/*
|
||||||
* unsigned long native_save_fl(void)
|
* unsigned long native_save_fl(void)
|
||||||
*/
|
*/
|
||||||
|
.pushsection .noinstr.text, "ax"
|
||||||
SYM_FUNC_START(native_save_fl)
|
SYM_FUNC_START(native_save_fl)
|
||||||
pushf
|
pushf
|
||||||
pop %_ASM_AX
|
pop %_ASM_AX
|
||||||
ret
|
ret
|
||||||
SYM_FUNC_END(native_save_fl)
|
SYM_FUNC_END(native_save_fl)
|
||||||
|
.popsection
|
||||||
EXPORT_SYMBOL(native_save_fl)
|
EXPORT_SYMBOL(native_save_fl)
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ void xen_force_evtchn_callback(void)
|
|||||||
(void)HYPERVISOR_xen_version(0, NULL);
|
(void)HYPERVISOR_xen_version(0, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
asmlinkage __visible unsigned long xen_save_fl(void)
|
asmlinkage __visible noinstr unsigned long xen_save_fl(void)
|
||||||
{
|
{
|
||||||
struct vcpu_info *vcpu;
|
struct vcpu_info *vcpu;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
@@ -40,7 +40,7 @@ asmlinkage __visible unsigned long xen_save_fl(void)
|
|||||||
*/
|
*/
|
||||||
return (-flags) & X86_EFLAGS_IF;
|
return (-flags) & X86_EFLAGS_IF;
|
||||||
}
|
}
|
||||||
PV_CALLEE_SAVE_REGS_THUNK(xen_save_fl);
|
__PV_CALLEE_SAVE_REGS_THUNK(xen_save_fl, ".noinstr.text");
|
||||||
|
|
||||||
asmlinkage __visible void xen_irq_disable(void)
|
asmlinkage __visible void xen_irq_disable(void)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -57,22 +57,6 @@ SYM_FUNC_START(xen_irq_disable_direct)
|
|||||||
ret
|
ret
|
||||||
SYM_FUNC_END(xen_irq_disable_direct)
|
SYM_FUNC_END(xen_irq_disable_direct)
|
||||||
|
|
||||||
/*
|
|
||||||
* (xen_)save_fl is used to get the current interrupt enable status.
|
|
||||||
* Callers expect the status to be in X86_EFLAGS_IF, and other bits
|
|
||||||
* may be set in the return value. We take advantage of this by
|
|
||||||
* making sure that X86_EFLAGS_IF has the right value (and other bits
|
|
||||||
* in that byte are 0), but other bits in the return value are
|
|
||||||
* undefined. We need to toggle the state of the bit, because Xen and
|
|
||||||
* x86 use opposite senses (mask vs enable).
|
|
||||||
*/
|
|
||||||
SYM_FUNC_START(xen_save_fl_direct)
|
|
||||||
testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
|
|
||||||
setz %ah
|
|
||||||
addb %ah, %ah
|
|
||||||
ret
|
|
||||||
SYM_FUNC_END(xen_save_fl_direct)
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Force an event check by making a hypercall, but preserve regs
|
* Force an event check by making a hypercall, but preserve regs
|
||||||
* before making the call.
|
* before making the call.
|
||||||
@@ -103,6 +87,22 @@ SYM_FUNC_START(check_events)
|
|||||||
SYM_FUNC_END(check_events)
|
SYM_FUNC_END(check_events)
|
||||||
|
|
||||||
.pushsection .noinstr.text, "ax"
|
.pushsection .noinstr.text, "ax"
|
||||||
|
/*
|
||||||
|
* (xen_)save_fl is used to get the current interrupt enable status.
|
||||||
|
* Callers expect the status to be in X86_EFLAGS_IF, and other bits
|
||||||
|
* may be set in the return value. We take advantage of this by
|
||||||
|
* making sure that X86_EFLAGS_IF has the right value (and other bits
|
||||||
|
* in that byte are 0), but other bits in the return value are
|
||||||
|
* undefined. We need to toggle the state of the bit, because Xen and
|
||||||
|
* x86 use opposite senses (mask vs enable).
|
||||||
|
*/
|
||||||
|
SYM_FUNC_START(xen_save_fl_direct)
|
||||||
|
testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
|
||||||
|
setz %ah
|
||||||
|
addb %ah, %ah
|
||||||
|
ret
|
||||||
|
SYM_FUNC_END(xen_save_fl_direct)
|
||||||
|
|
||||||
SYM_FUNC_START(xen_read_cr2)
|
SYM_FUNC_START(xen_read_cr2)
|
||||||
FRAME_BEGIN
|
FRAME_BEGIN
|
||||||
_ASM_MOV PER_CPU_VAR(xen_vcpu), %_ASM_AX
|
_ASM_MOV PER_CPU_VAR(xen_vcpu), %_ASM_AX
|
||||||
|
|||||||
Reference in New Issue
Block a user