tools arch: Update arch/x86/lib/memcpy_64.S copy used in 'perf bench mem memcpy'
To cope with the changes in:12c89130a5("x86/asm/memcpy_mcsafe: Add write-protection-fault handling")60622d6822("x86/asm/memcpy_mcsafe: Return bytes remaining")bd131544aa("x86/asm/memcpy_mcsafe: Add labels for __memcpy_mcsafe() write fault handling")da7bc9c57e("x86/asm/memcpy_mcsafe: Remove loop unrolling") This needed introducing a file with a copy of the mcsafe_handle_tail() function, that is used in the new memcpy_64.S file, as well as a dummy mcsafe_test.h header. Testing it: $ nm ~/bin/perf | grep mcsafe 0000000000484130 T mcsafe_handle_tail 0000000000484300 T __memcpy_mcsafe $ $ perf bench mem memcpy # Running 'mem/memcpy' benchmark: # function 'default' (Default memcpy() provided by glibc) # Copying 1MB bytes ... 44.389205 GB/sec # function 'x86-64-unrolled' (unrolled memcpy() in arch/x86/lib/memcpy_64.S) # Copying 1MB bytes ... 22.710756 GB/sec # function 'x86-64-movsq' (movsq-based memcpy() in arch/x86/lib/memcpy_64.S) # Copying 1MB bytes ... 42.459239 GB/sec # function 'x86-64-movsb' (movsb-based memcpy() in arch/x86/lib/memcpy_64.S) # Copying 1MB bytes ... 42.459239 GB/sec $ This silences this perf tools build warning: Warning: Kernel ABI header at 'tools/arch/x86/lib/memcpy_64.S' differs from latest version at 'arch/x86/lib/memcpy_64.S' Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Dan Williams <dan.j.williams@intel.com> Cc: David Ahern <dsahern@gmail.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Mika Penttilä <mika.penttila@nextfour.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Tony Luck <tony.luck@intel.com> Cc: Wang Nan <wangnan0@huawei.com> Link: https://lkml.kernel.org/n/tip-igdpciheradk3gb3qqal52d0@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
@@ -7,6 +7,7 @@ perf-y += futex-wake-parallel.o
|
||||
perf-y += futex-requeue.o
|
||||
perf-y += futex-lock-pi.o
|
||||
|
||||
perf-$(CONFIG_X86_64) += mem-memcpy-x86-64-lib.o
|
||||
perf-$(CONFIG_X86_64) += mem-memcpy-x86-64-asm.o
|
||||
perf-$(CONFIG_X86_64) += mem-memset-x86-64-asm.o
|
||||
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
#define altinstr_replacement text
|
||||
#define globl p2align 4; .globl
|
||||
#define _ASM_EXTABLE_FAULT(x, y)
|
||||
#define _ASM_EXTABLE(x, y)
|
||||
|
||||
#include "../../arch/x86/lib/memcpy_64.S"
|
||||
/*
|
||||
|
||||
24
tools/perf/bench/mem-memcpy-x86-64-lib.c
Normal file
24
tools/perf/bench/mem-memcpy-x86-64-lib.c
Normal file
@@ -0,0 +1,24 @@
|
||||
/*
|
||||
* From code in arch/x86/lib/usercopy_64.c, copied to keep tools/ copy
|
||||
* of the kernel's arch/x86/lib/memcpy_64.s used in 'perf bench mem memcpy'
|
||||
* happy.
|
||||
*/
|
||||
#include <linux/types.h>
|
||||
|
||||
unsigned long __memcpy_mcsafe(void *dst, const void *src, size_t cnt);
|
||||
unsigned long mcsafe_handle_tail(char *to, char *from, unsigned len);
|
||||
|
||||
unsigned long mcsafe_handle_tail(char *to, char *from, unsigned len)
|
||||
{
|
||||
for (; len; --len, to++, from++) {
|
||||
/*
|
||||
* Call the assembly routine back directly since
|
||||
* memcpy_mcsafe() may silently fallback to memcpy.
|
||||
*/
|
||||
unsigned long rem = __memcpy_mcsafe(to, from, 1);
|
||||
|
||||
if (rem)
|
||||
break;
|
||||
}
|
||||
return len;
|
||||
}
|
||||
Reference in New Issue
Block a user