mirror of
https://github.com/torvalds/linux.git
synced 2024-12-24 20:01:55 +00:00
0bdf525f04
This patch is meant to improve overall system performance when making use of the __phys_addr call. To do this I have implemented several changes. First if CONFIG_DEBUG_VIRTUAL is not defined __phys_addr is made an inline, similar to how this is currently handled in 32 bit. However in order to do this it is required to export phys_base so that it is available if __phys_addr is used in kernel modules. The second change was to streamline the code by making use of the carry flag on an add operation instead of performing a compare on a 64 bit value. The advantage to this is that it allows us to significantly reduce the overall size of the call. On my Xeon E5 system the entire __phys_addr inline call consumes a little less than 32 bytes and 5 instructions. I also applied similar logic to the debug version of the function. My testing shows that the debug version of the function with this patch applied is slightly faster than the non-debug version without the patch. Finally I also applied the same logic changes to __virt_addr_valid since it used the same general code flow as __phys_addr and could achieve similar gains though these changes. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Link: http://lkml.kernel.org/r/20121116215315.8521.46270.stgit@ahduyck-cp1.jf.intel.com Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
69 lines
1.6 KiB
C
69 lines
1.6 KiB
C
/* Exports for assembly files.
|
|
All C exports should go in the respective C files. */
|
|
|
|
#include <linux/module.h>
|
|
#include <linux/smp.h>
|
|
|
|
#include <net/checksum.h>
|
|
|
|
#include <asm/processor.h>
|
|
#include <asm/pgtable.h>
|
|
#include <asm/uaccess.h>
|
|
#include <asm/desc.h>
|
|
#include <asm/ftrace.h>
|
|
|
|
#ifdef CONFIG_FUNCTION_TRACER
|
|
/* mcount and __fentry__ are defined in assembly */
|
|
#ifdef CC_USING_FENTRY
|
|
EXPORT_SYMBOL(__fentry__);
|
|
#else
|
|
EXPORT_SYMBOL(mcount);
|
|
#endif
|
|
#endif
|
|
|
|
EXPORT_SYMBOL(__get_user_1);
|
|
EXPORT_SYMBOL(__get_user_2);
|
|
EXPORT_SYMBOL(__get_user_4);
|
|
EXPORT_SYMBOL(__get_user_8);
|
|
EXPORT_SYMBOL(__put_user_1);
|
|
EXPORT_SYMBOL(__put_user_2);
|
|
EXPORT_SYMBOL(__put_user_4);
|
|
EXPORT_SYMBOL(__put_user_8);
|
|
|
|
EXPORT_SYMBOL(copy_user_generic_string);
|
|
EXPORT_SYMBOL(copy_user_generic_unrolled);
|
|
EXPORT_SYMBOL(copy_user_enhanced_fast_string);
|
|
EXPORT_SYMBOL(__copy_user_nocache);
|
|
EXPORT_SYMBOL(_copy_from_user);
|
|
EXPORT_SYMBOL(_copy_to_user);
|
|
|
|
EXPORT_SYMBOL(copy_page);
|
|
EXPORT_SYMBOL(clear_page);
|
|
|
|
EXPORT_SYMBOL(csum_partial);
|
|
|
|
/*
|
|
* Export string functions. We normally rely on gcc builtin for most of these,
|
|
* but gcc sometimes decides not to inline them.
|
|
*/
|
|
#undef memcpy
|
|
#undef memset
|
|
#undef memmove
|
|
|
|
extern void *memset(void *, int, __kernel_size_t);
|
|
extern void *memcpy(void *, const void *, __kernel_size_t);
|
|
extern void *__memcpy(void *, const void *, __kernel_size_t);
|
|
|
|
EXPORT_SYMBOL(memset);
|
|
EXPORT_SYMBOL(memcpy);
|
|
EXPORT_SYMBOL(__memcpy);
|
|
EXPORT_SYMBOL(memmove);
|
|
|
|
#ifndef CONFIG_DEBUG_VIRTUAL
|
|
EXPORT_SYMBOL(phys_base);
|
|
#endif
|
|
EXPORT_SYMBOL(empty_zero_page);
|
|
#ifndef CONFIG_PARAVIRT
|
|
EXPORT_SYMBOL(native_load_gs_index);
|
|
#endif
|