mirror of
https://github.com/torvalds/linux.git
synced 2024-12-03 17:41:22 +00:00
5c83511bdb
Instead of using six globally visible paravirt ops structures combine them in a single structure, keeping the original structures as sub-structures. This avoids the need to assemble struct paravirt_patch_template at runtime on the stack each time apply_paravirt() is being called (i.e. when loading a module). [ tglx: Made the struct and the initializer tabular for readability sake ] Signed-off-by: Juergen Gross <jgross@suse.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: xen-devel@lists.xenproject.org Cc: virtualization@lists.linux-foundation.org Cc: akataria@vmware.com Cc: rusty@rustcorp.com.au Cc: boris.ostrovsky@oracle.com Cc: hpa@zytor.com Link: https://lkml.kernel.org/r/20180828074026.820-9-jgross@suse.com
35 lines
830 B
C
35 lines
830 B
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* Split spinlock implementation out into its own file, so it can be
|
|
* compiled in a FTRACE-compatible way.
|
|
*/
|
|
#include <linux/spinlock.h>
|
|
#include <linux/export.h>
|
|
#include <linux/jump_label.h>
|
|
|
|
#include <asm/paravirt.h>
|
|
|
|
__visible void __native_queued_spin_unlock(struct qspinlock *lock)
|
|
{
|
|
native_queued_spin_unlock(lock);
|
|
}
|
|
PV_CALLEE_SAVE_REGS_THUNK(__native_queued_spin_unlock);
|
|
|
|
bool pv_is_native_spin_unlock(void)
|
|
{
|
|
return pv_ops.lock.queued_spin_unlock.func ==
|
|
__raw_callee_save___native_queued_spin_unlock;
|
|
}
|
|
|
|
__visible bool __native_vcpu_is_preempted(long cpu)
|
|
{
|
|
return false;
|
|
}
|
|
PV_CALLEE_SAVE_REGS_THUNK(__native_vcpu_is_preempted);
|
|
|
|
bool pv_is_native_vcpu_is_preempted(void)
|
|
{
|
|
return pv_ops.lock.vcpu_is_preempted.func ==
|
|
__raw_callee_save___native_vcpu_is_preempted;
|
|
}
|