forked from Minki/linux
crypto: crypto_xor - use helpers for unaligned accesses
Dereferencing a misaligned pointer is undefined behavior in C, and may result in codegen on architectures such as ARM that trigger alignments traps and expensive fixups in software. Instead, use the get_aligned()/put_aligned() accessors, which are cheap or even completely free when CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y. In the converse case, the prior alignment checks ensure that the casts are safe, and so no unaligned accessors are necessary. Signed-off-by: Ard Biesheuvel <ardb@kernel.org> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
parent
4920a4a726
commit
7976c14925
@ -1022,7 +1022,13 @@ void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int len)
|
|||||||
}
|
}
|
||||||
|
|
||||||
while (IS_ENABLED(CONFIG_64BIT) && len >= 8 && !(relalign & 7)) {
|
while (IS_ENABLED(CONFIG_64BIT) && len >= 8 && !(relalign & 7)) {
|
||||||
*(u64 *)dst = *(u64 *)src1 ^ *(u64 *)src2;
|
if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {
|
||||||
|
u64 l = get_unaligned((u64 *)src1) ^
|
||||||
|
get_unaligned((u64 *)src2);
|
||||||
|
put_unaligned(l, (u64 *)dst);
|
||||||
|
} else {
|
||||||
|
*(u64 *)dst = *(u64 *)src1 ^ *(u64 *)src2;
|
||||||
|
}
|
||||||
dst += 8;
|
dst += 8;
|
||||||
src1 += 8;
|
src1 += 8;
|
||||||
src2 += 8;
|
src2 += 8;
|
||||||
@ -1030,7 +1036,13 @@ void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int len)
|
|||||||
}
|
}
|
||||||
|
|
||||||
while (len >= 4 && !(relalign & 3)) {
|
while (len >= 4 && !(relalign & 3)) {
|
||||||
*(u32 *)dst = *(u32 *)src1 ^ *(u32 *)src2;
|
if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {
|
||||||
|
u32 l = get_unaligned((u32 *)src1) ^
|
||||||
|
get_unaligned((u32 *)src2);
|
||||||
|
put_unaligned(l, (u32 *)dst);
|
||||||
|
} else {
|
||||||
|
*(u32 *)dst = *(u32 *)src1 ^ *(u32 *)src2;
|
||||||
|
}
|
||||||
dst += 4;
|
dst += 4;
|
||||||
src1 += 4;
|
src1 += 4;
|
||||||
src2 += 4;
|
src2 += 4;
|
||||||
@ -1038,7 +1050,13 @@ void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int len)
|
|||||||
}
|
}
|
||||||
|
|
||||||
while (len >= 2 && !(relalign & 1)) {
|
while (len >= 2 && !(relalign & 1)) {
|
||||||
*(u16 *)dst = *(u16 *)src1 ^ *(u16 *)src2;
|
if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {
|
||||||
|
u16 l = get_unaligned((u16 *)src1) ^
|
||||||
|
get_unaligned((u16 *)src2);
|
||||||
|
put_unaligned(l, (u16 *)dst);
|
||||||
|
} else {
|
||||||
|
*(u16 *)dst = *(u16 *)src1 ^ *(u16 *)src2;
|
||||||
|
}
|
||||||
dst += 2;
|
dst += 2;
|
||||||
src1 += 2;
|
src1 += 2;
|
||||||
src2 += 2;
|
src2 += 2;
|
||||||
|
@ -13,6 +13,8 @@
|
|||||||
#include <linux/list.h>
|
#include <linux/list.h>
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
|
|
||||||
|
#include <asm/unaligned.h>
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Maximum values for blocksize and alignmask, used to allocate
|
* Maximum values for blocksize and alignmask, used to allocate
|
||||||
* static buffers that are big enough for any combination of
|
* static buffers that are big enough for any combination of
|
||||||
@ -154,9 +156,11 @@ static inline void crypto_xor(u8 *dst, const u8 *src, unsigned int size)
|
|||||||
(size % sizeof(unsigned long)) == 0) {
|
(size % sizeof(unsigned long)) == 0) {
|
||||||
unsigned long *d = (unsigned long *)dst;
|
unsigned long *d = (unsigned long *)dst;
|
||||||
unsigned long *s = (unsigned long *)src;
|
unsigned long *s = (unsigned long *)src;
|
||||||
|
unsigned long l;
|
||||||
|
|
||||||
while (size > 0) {
|
while (size > 0) {
|
||||||
*d++ ^= *s++;
|
l = get_unaligned(d) ^ get_unaligned(s++);
|
||||||
|
put_unaligned(l, d++);
|
||||||
size -= sizeof(unsigned long);
|
size -= sizeof(unsigned long);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@ -173,9 +177,11 @@ static inline void crypto_xor_cpy(u8 *dst, const u8 *src1, const u8 *src2,
|
|||||||
unsigned long *d = (unsigned long *)dst;
|
unsigned long *d = (unsigned long *)dst;
|
||||||
unsigned long *s1 = (unsigned long *)src1;
|
unsigned long *s1 = (unsigned long *)src1;
|
||||||
unsigned long *s2 = (unsigned long *)src2;
|
unsigned long *s2 = (unsigned long *)src2;
|
||||||
|
unsigned long l;
|
||||||
|
|
||||||
while (size > 0) {
|
while (size > 0) {
|
||||||
*d++ = *s1++ ^ *s2++;
|
l = get_unaligned(s1++) ^ get_unaligned(s2++);
|
||||||
|
put_unaligned(l, d++);
|
||||||
size -= sizeof(unsigned long);
|
size -= sizeof(unsigned long);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
Loading…
Reference in New Issue
Block a user