2008-10-23 05:26:29 +00:00
|
|
|
#ifndef _ASM_X86_XOR_64_H
|
|
|
|
#define _ASM_X86_XOR_64_H
|
2008-06-10 21:45:45 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
static struct xor_block_template xor_block_sse = {
|
2008-03-23 08:04:03 +00:00
|
|
|
.name = "generic_sse",
|
|
|
|
.do_2 = xor_sse_2,
|
|
|
|
.do_3 = xor_sse_3,
|
|
|
|
.do_4 = xor_sse_4,
|
|
|
|
.do_5 = xor_sse_5,
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
|
2012-05-22 03:54:04 +00:00
|
|
|
|
|
|
|
/* Also try the AVX routines */
|
2012-10-02 17:01:25 +00:00
|
|
|
#include <asm/xor_avx.h>
|
2012-05-22 03:54:04 +00:00
|
|
|
|
2012-11-02 14:20:24 +00:00
|
|
|
/* We force the use of the SSE xor block because it can write around L2.
|
|
|
|
We may also be able to load into the L1 only depending on how the cpu
|
|
|
|
deals with a load to a line that is being prefetched. */
|
2005-04-16 22:20:36 +00:00
|
|
|
#undef XOR_TRY_TEMPLATES
|
2008-03-23 08:04:03 +00:00
|
|
|
#define XOR_TRY_TEMPLATES \
|
|
|
|
do { \
|
2012-05-22 03:54:04 +00:00
|
|
|
AVX_XOR_SPEED; \
|
2012-11-02 14:20:24 +00:00
|
|
|
xor_speed(&xor_block_sse_pf64); \
|
2008-03-23 08:04:03 +00:00
|
|
|
xor_speed(&xor_block_sse); \
|
|
|
|
} while (0)
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-10-23 05:26:29 +00:00
|
|
|
#endif /* _ASM_X86_XOR_64_H */
|