forked from Minki/linux
fdf4289679
memmove() allow source and destination address to be overlap, but there is no such limitation for memcpy(). Therefore, explicitly implement memmove() in both the forwards and backward directions, to give us the ability to optimize memcpy(). Signed-off-by: Ma Ling <ling.ma@intel.com> LKML-Reference: <C10D3FB0CD45994C8A51FEC1227CE22F0E483AD86A@shsmsx502.ccr.corp.intel.com> Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
60 lines
998 B
C
60 lines
998 B
C
#include <linux/string.h>
|
|
#include <linux/module.h>
|
|
|
|
#undef memcpy
|
|
#undef memset
|
|
|
|
void *memcpy(void *to, const void *from, size_t n)
|
|
{
|
|
#ifdef CONFIG_X86_USE_3DNOW
|
|
return __memcpy3d(to, from, n);
|
|
#else
|
|
return __memcpy(to, from, n);
|
|
#endif
|
|
}
|
|
EXPORT_SYMBOL(memcpy);
|
|
|
|
void *memset(void *s, int c, size_t count)
|
|
{
|
|
return __memset(s, c, count);
|
|
}
|
|
EXPORT_SYMBOL(memset);
|
|
|
|
void *memmove(void *dest, const void *src, size_t n)
|
|
{
|
|
int d0, d1, d2;
|
|
|
|
if (dest < src) {
|
|
if ((dest + n) < src)
|
|
return memcpy(dest, src, n);
|
|
else
|
|
__asm__ __volatile__(
|
|
"rep\n\t"
|
|
"movsb\n\t"
|
|
: "=&c" (d0), "=&S" (d1), "=&D" (d2)
|
|
:"0" (n),
|
|
"1" (src),
|
|
"2" (dest)
|
|
:"memory");
|
|
|
|
} else {
|
|
|
|
if((src + count) < dest)
|
|
return memcpy(dest, src, count);
|
|
else
|
|
__asm__ __volatile__(
|
|
"std\n\t"
|
|
"rep\n\t"
|
|
"movsb\n\t"
|
|
"cld"
|
|
: "=&c" (d0), "=&S" (d1), "=&D" (d2)
|
|
:"0" (n),
|
|
"1" (n-1+src),
|
|
"2" (n-1+dest)
|
|
:"memory");
|
|
}
|
|
|
|
return dest;
|
|
}
|
|
EXPORT_SYMBOL(memmove);
|