mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 12:11:40 +00:00
x86: Share definition of __is_canonical_address()
Reduce code duplication by moving canonical address code to a common header file. Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20220131072453.2839535-3-adrian.hunter@intel.com
This commit is contained in:
parent
c243cecb58
commit
1fb85d06ad
@ -1350,20 +1350,10 @@ static void pt_addr_filters_fini(struct perf_event *event)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
static u64 canonical_address(u64 vaddr, u8 vaddr_bits)
|
||||
{
|
||||
return ((s64)vaddr << (64 - vaddr_bits)) >> (64 - vaddr_bits);
|
||||
}
|
||||
|
||||
static u64 is_canonical_address(u64 vaddr, u8 vaddr_bits)
|
||||
{
|
||||
return canonical_address(vaddr, vaddr_bits) == vaddr;
|
||||
}
|
||||
|
||||
/* Clamp to a canonical address greater-than-or-equal-to the address given */
|
||||
static u64 clamp_to_ge_canonical_addr(u64 vaddr, u8 vaddr_bits)
|
||||
{
|
||||
return is_canonical_address(vaddr, vaddr_bits) ?
|
||||
return __is_canonical_address(vaddr, vaddr_bits) ?
|
||||
vaddr :
|
||||
-BIT_ULL(vaddr_bits - 1);
|
||||
}
|
||||
@ -1371,7 +1361,7 @@ static u64 clamp_to_ge_canonical_addr(u64 vaddr, u8 vaddr_bits)
|
||||
/* Clamp to a canonical address less-than-or-equal-to the address given */
|
||||
static u64 clamp_to_le_canonical_addr(u64 vaddr, u8 vaddr_bits)
|
||||
{
|
||||
return is_canonical_address(vaddr, vaddr_bits) ?
|
||||
return __is_canonical_address(vaddr, vaddr_bits) ?
|
||||
vaddr :
|
||||
BIT_ULL(vaddr_bits - 1) - 1;
|
||||
}
|
||||
|
@ -71,6 +71,16 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
|
||||
extern bool __virt_addr_valid(unsigned long kaddr);
|
||||
#define virt_addr_valid(kaddr) __virt_addr_valid((unsigned long) (kaddr))
|
||||
|
||||
static __always_inline u64 __canonical_address(u64 vaddr, u8 vaddr_bits)
|
||||
{
|
||||
return ((s64)vaddr << (64 - vaddr_bits)) >> (64 - vaddr_bits);
|
||||
}
|
||||
|
||||
static __always_inline u64 __is_canonical_address(u64 vaddr, u8 vaddr_bits)
|
||||
{
|
||||
return __canonical_address(vaddr, vaddr_bits) == vaddr;
|
||||
}
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#include <asm-generic/memory_model.h>
|
||||
|
@ -665,7 +665,7 @@ static inline u8 ctxt_virt_addr_bits(struct x86_emulate_ctxt *ctxt)
|
||||
static inline bool emul_is_noncanonical_address(u64 la,
|
||||
struct x86_emulate_ctxt *ctxt)
|
||||
{
|
||||
return get_canonical(la, ctxt_virt_addr_bits(ctxt)) != la;
|
||||
return !__is_canonical_address(la, ctxt_virt_addr_bits(ctxt));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -715,7 +715,7 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
|
||||
case X86EMUL_MODE_PROT64:
|
||||
*linear = la;
|
||||
va_bits = ctxt_virt_addr_bits(ctxt);
|
||||
if (get_canonical(la, va_bits) != la)
|
||||
if (!__is_canonical_address(la, va_bits))
|
||||
goto bad;
|
||||
|
||||
*max_size = min_t(u64, ~0u, (1ull << va_bits) - la);
|
||||
|
@ -1735,7 +1735,7 @@ static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data,
|
||||
* value, and that something deterministic happens if the guest
|
||||
* invokes 64-bit SYSENTER.
|
||||
*/
|
||||
data = get_canonical(data, vcpu_virt_addr_bits(vcpu));
|
||||
data = __canonical_address(data, vcpu_virt_addr_bits(vcpu));
|
||||
break;
|
||||
case MSR_TSC_AUX:
|
||||
if (!kvm_is_supported_user_return_msr(MSR_TSC_AUX))
|
||||
|
@ -211,14 +211,9 @@ static inline u8 vcpu_virt_addr_bits(struct kvm_vcpu *vcpu)
|
||||
return kvm_read_cr4_bits(vcpu, X86_CR4_LA57) ? 57 : 48;
|
||||
}
|
||||
|
||||
static inline u64 get_canonical(u64 la, u8 vaddr_bits)
|
||||
{
|
||||
return ((int64_t)la << (64 - vaddr_bits)) >> (64 - vaddr_bits);
|
||||
}
|
||||
|
||||
static inline bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return get_canonical(la, vcpu_virt_addr_bits(vcpu)) != la;
|
||||
return !__is_canonical_address(la, vcpu_virt_addr_bits(vcpu));
|
||||
}
|
||||
|
||||
static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,
|
||||
|
@ -4,11 +4,6 @@
|
||||
#include <linux/kernel.h>
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
static __always_inline u64 canonical_address(u64 vaddr, u8 vaddr_bits)
|
||||
{
|
||||
return ((s64)vaddr << (64 - vaddr_bits)) >> (64 - vaddr_bits);
|
||||
}
|
||||
|
||||
bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
|
||||
{
|
||||
unsigned long vaddr = (unsigned long)unsafe_src;
|
||||
@ -19,7 +14,7 @@ bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
|
||||
* we also need to include the userspace guard page.
|
||||
*/
|
||||
return vaddr >= TASK_SIZE_MAX + PAGE_SIZE &&
|
||||
canonical_address(vaddr, boot_cpu_data.x86_virt_bits) == vaddr;
|
||||
__is_canonical_address(vaddr, boot_cpu_data.x86_virt_bits);
|
||||
}
|
||||
#else
|
||||
bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
|
||||
|
Loading…
Reference in New Issue
Block a user