mirror of
https://github.com/torvalds/linux.git
synced 2024-11-25 05:32:00 +00:00
s390/raid6: convert to use standard fpu_*() inline assemblies
Move the s390 specific raid6 inline assemblies, make them generic, and reuse them to implement the raid6 gen/xor implementation. Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
This commit is contained in:
parent
dcd3e1de9d
commit
c8dde11df1
@ -108,6 +108,14 @@ static __always_inline void fpu_stfpc(unsigned int *fpc)
|
|||||||
: "memory");
|
: "memory");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static __always_inline void fpu_vab(u8 v1, u8 v2, u8 v3)
|
||||||
|
{
|
||||||
|
asm volatile("VAB %[v1],%[v2],%[v3]"
|
||||||
|
:
|
||||||
|
: [v1] "I" (v1), [v2] "I" (v2), [v3] "I" (v3)
|
||||||
|
: "memory");
|
||||||
|
}
|
||||||
|
|
||||||
static __always_inline void fpu_vcksm(u8 v1, u8 v2, u8 v3)
|
static __always_inline void fpu_vcksm(u8 v1, u8 v2, u8 v3)
|
||||||
{
|
{
|
||||||
asm volatile("VCKSM %[v1],%[v2],%[v3]"
|
asm volatile("VCKSM %[v1],%[v2],%[v3]"
|
||||||
@ -116,6 +124,14 @@ static __always_inline void fpu_vcksm(u8 v1, u8 v2, u8 v3)
|
|||||||
: "memory");
|
: "memory");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static __always_inline void fpu_vesravb(u8 v1, u8 v2, u8 v3)
|
||||||
|
{
|
||||||
|
asm volatile("VESRAVB %[v1],%[v2],%[v3]"
|
||||||
|
:
|
||||||
|
: [v1] "I" (v1), [v2] "I" (v2), [v3] "I" (v3)
|
||||||
|
: "memory");
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_CC_IS_CLANG
|
#ifdef CONFIG_CC_IS_CLANG
|
||||||
|
|
||||||
static __always_inline void fpu_vl(u8 v1, const void *vxr)
|
static __always_inline void fpu_vl(u8 v1, const void *vxr)
|
||||||
@ -231,6 +247,14 @@ static __always_inline void fpu_vll(u8 v1, u32 index, const void *vxr)
|
|||||||
|
|
||||||
#endif /* CONFIG_CC_IS_CLANG */
|
#endif /* CONFIG_CC_IS_CLANG */
|
||||||
|
|
||||||
|
static __always_inline void fpu_vlr(u8 v1, u8 v2)
|
||||||
|
{
|
||||||
|
asm volatile("VLR %[v1],%[v2]"
|
||||||
|
:
|
||||||
|
: [v1] "I" (v1), [v2] "I" (v2)
|
||||||
|
: "memory");
|
||||||
|
}
|
||||||
|
|
||||||
static __always_inline void fpu_vlvgf(u8 v, u32 val, u16 index)
|
static __always_inline void fpu_vlvgf(u8 v, u32 val, u16 index)
|
||||||
{
|
{
|
||||||
asm volatile("VLVGF %[v],%[val],%[index]"
|
asm volatile("VLVGF %[v],%[val],%[index]"
|
||||||
@ -239,6 +263,22 @@ static __always_inline void fpu_vlvgf(u8 v, u32 val, u16 index)
|
|||||||
: "memory");
|
: "memory");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static __always_inline void fpu_vn(u8 v1, u8 v2, u8 v3)
|
||||||
|
{
|
||||||
|
asm volatile("VN %[v1],%[v2],%[v3]"
|
||||||
|
:
|
||||||
|
: [v1] "I" (v1), [v2] "I" (v2), [v3] "I" (v3)
|
||||||
|
: "memory");
|
||||||
|
}
|
||||||
|
|
||||||
|
static __always_inline void fpu_vrepib(u8 v1, s16 i2)
|
||||||
|
{
|
||||||
|
asm volatile("VREPIB %[v1],%[i2]"
|
||||||
|
:
|
||||||
|
: [v1] "I" (v1), [i2] "K" (i2)
|
||||||
|
: "memory");
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_CC_IS_CLANG
|
#ifdef CONFIG_CC_IS_CLANG
|
||||||
|
|
||||||
static __always_inline void fpu_vst(u8 v1, const void *vxr)
|
static __always_inline void fpu_vst(u8 v1, const void *vxr)
|
||||||
@ -335,6 +375,14 @@ static __always_inline void fpu_vstl(u8 v1, u32 index, const void *vxr)
|
|||||||
|
|
||||||
#endif /* CONFIG_CC_IS_CLANG */
|
#endif /* CONFIG_CC_IS_CLANG */
|
||||||
|
|
||||||
|
static __always_inline void fpu_vx(u8 v1, u8 v2, u8 v3)
|
||||||
|
{
|
||||||
|
asm volatile("VX %[v1],%[v2],%[v3]"
|
||||||
|
:
|
||||||
|
: [v1] "I" (v1), [v2] "I" (v2), [v3] "I" (v3)
|
||||||
|
: "memory");
|
||||||
|
}
|
||||||
|
|
||||||
static __always_inline void fpu_vzero(u8 v)
|
static __always_inline void fpu_vzero(u8 v)
|
||||||
{
|
{
|
||||||
asm volatile("VZERO %[v]"
|
asm volatile("VZERO %[v]"
|
||||||
|
@ -16,10 +16,10 @@
|
|||||||
|
|
||||||
#define NSIZE 16
|
#define NSIZE 16
|
||||||
|
|
||||||
static inline void LOAD_CONST(void)
|
static __always_inline void LOAD_CONST(void)
|
||||||
{
|
{
|
||||||
asm volatile("VREPIB %v24,7");
|
fpu_vrepib(24, 0x07);
|
||||||
asm volatile("VREPIB %v25,0x1d");
|
fpu_vrepib(25, 0x1d);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -27,10 +27,7 @@ static inline void LOAD_CONST(void)
|
|||||||
* vector register y left by 1 bit and stores the result in
|
* vector register y left by 1 bit and stores the result in
|
||||||
* vector register x.
|
* vector register x.
|
||||||
*/
|
*/
|
||||||
static inline void SHLBYTE(int x, int y)
|
#define SHLBYTE(x, y) fpu_vab(x, y, y)
|
||||||
{
|
|
||||||
asm volatile ("VAB %0,%1,%1" : : "i" (x), "i" (y));
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For each of the 16 bytes in the vector register y the MASK()
|
* For each of the 16 bytes in the vector register y the MASK()
|
||||||
@ -38,45 +35,13 @@ static inline void SHLBYTE(int x, int y)
|
|||||||
* or 0x00 if the high bit is 0. The result is stored in vector
|
* or 0x00 if the high bit is 0. The result is stored in vector
|
||||||
* register x.
|
* register x.
|
||||||
*/
|
*/
|
||||||
static inline void MASK(int x, int y)
|
#define MASK(x, y) fpu_vesravb(x, y, 24)
|
||||||
{
|
|
||||||
asm volatile ("VESRAVB %0,%1,24" : : "i" (x), "i" (y));
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void AND(int x, int y, int z)
|
#define AND(x, y, z) fpu_vn(x, y, z)
|
||||||
{
|
#define XOR(x, y, z) fpu_vx(x, y, z)
|
||||||
asm volatile ("VN %0,%1,%2" : : "i" (x), "i" (y), "i" (z));
|
#define LOAD_DATA(x, ptr) fpu_vlm(x, x + $# - 1, ptr)
|
||||||
}
|
#define STORE_DATA(x, ptr) fpu_vstm(x, x + $# - 1, ptr)
|
||||||
|
#define COPY_VEC(x, y) fpu_vlr(x, y)
|
||||||
static inline void XOR(int x, int y, int z)
|
|
||||||
{
|
|
||||||
asm volatile ("VX %0,%1,%2" : : "i" (x), "i" (y), "i" (z));
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void LOAD_DATA(int x, u8 *ptr)
|
|
||||||
{
|
|
||||||
typedef struct { u8 _[16 * $#]; } addrtype;
|
|
||||||
register addrtype *__ptr asm("1") = (addrtype *) ptr;
|
|
||||||
|
|
||||||
asm volatile ("VLM %2,%3,0,%1"
|
|
||||||
: : "m" (*__ptr), "a" (__ptr), "i" (x),
|
|
||||||
"i" (x + $# - 1));
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void STORE_DATA(int x, u8 *ptr)
|
|
||||||
{
|
|
||||||
typedef struct { u8 _[16 * $#]; } addrtype;
|
|
||||||
register addrtype *__ptr asm("1") = (addrtype *) ptr;
|
|
||||||
|
|
||||||
asm volatile ("VSTM %2,%3,0,1"
|
|
||||||
: "=m" (*__ptr) : "a" (__ptr), "i" (x),
|
|
||||||
"i" (x + $# - 1));
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void COPY_VEC(int x, int y)
|
|
||||||
{
|
|
||||||
asm volatile ("VLR %0,%1" : : "i" (x), "i" (y));
|
|
||||||
}
|
|
||||||
|
|
||||||
static void raid6_s390vx$#_gen_syndrome(int disks, size_t bytes, void **ptrs)
|
static void raid6_s390vx$#_gen_syndrome(int disks, size_t bytes, void **ptrs)
|
||||||
{
|
{
|
||||||
|
Loading…
Reference in New Issue
Block a user