2019-05-27 06:55:01 +00:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
2005-09-26 06:04:21 +00:00
|
|
|
/*
|
|
|
|
* Memory copy functions for 32-bit PowerPC.
|
|
|
|
*
|
|
|
|
* Copyright (C) 1996-2005 Paul Mackerras.
|
|
|
|
*/
|
|
|
|
#include <asm/processor.h>
|
|
|
|
#include <asm/cache.h>
|
|
|
|
#include <asm/errno.h>
|
|
|
|
#include <asm/ppc_asm.h>
|
2016-01-14 04:33:46 +00:00
|
|
|
#include <asm/export.h>
|
2018-08-09 08:14:41 +00:00
|
|
|
#include <asm/code-patching-asm.h>
|
2019-04-26 16:23:26 +00:00
|
|
|
#include <asm/kasan.h>
|
2005-09-26 06:04:21 +00:00
|
|
|
|
|
|
|
#define COPY_16_BYTES \
|
|
|
|
lwz r7,4(r4); \
|
|
|
|
lwz r8,8(r4); \
|
|
|
|
lwz r9,12(r4); \
|
|
|
|
lwzu r10,16(r4); \
|
|
|
|
stw r7,4(r6); \
|
|
|
|
stw r8,8(r6); \
|
|
|
|
stw r9,12(r6); \
|
|
|
|
stwu r10,16(r6)
|
|
|
|
|
|
|
|
#define COPY_16_BYTES_WITHEX(n) \
|
|
|
|
8 ## n ## 0: \
|
|
|
|
lwz r7,4(r4); \
|
|
|
|
8 ## n ## 1: \
|
|
|
|
lwz r8,8(r4); \
|
|
|
|
8 ## n ## 2: \
|
|
|
|
lwz r9,12(r4); \
|
|
|
|
8 ## n ## 3: \
|
|
|
|
lwzu r10,16(r4); \
|
|
|
|
8 ## n ## 4: \
|
|
|
|
stw r7,4(r6); \
|
|
|
|
8 ## n ## 5: \
|
|
|
|
stw r8,8(r6); \
|
|
|
|
8 ## n ## 6: \
|
|
|
|
stw r9,12(r6); \
|
|
|
|
8 ## n ## 7: \
|
|
|
|
stwu r10,16(r6)
|
|
|
|
|
|
|
|
#define COPY_16_BYTES_EXCODE(n) \
|
|
|
|
9 ## n ## 0: \
|
|
|
|
addi r5,r5,-(16 * n); \
|
|
|
|
b 104f; \
|
|
|
|
9 ## n ## 1: \
|
|
|
|
addi r5,r5,-(16 * n); \
|
|
|
|
b 105f; \
|
2016-10-13 05:42:53 +00:00
|
|
|
EX_TABLE(8 ## n ## 0b,9 ## n ## 0b); \
|
|
|
|
EX_TABLE(8 ## n ## 1b,9 ## n ## 0b); \
|
|
|
|
EX_TABLE(8 ## n ## 2b,9 ## n ## 0b); \
|
|
|
|
EX_TABLE(8 ## n ## 3b,9 ## n ## 0b); \
|
|
|
|
EX_TABLE(8 ## n ## 4b,9 ## n ## 1b); \
|
|
|
|
EX_TABLE(8 ## n ## 5b,9 ## n ## 1b); \
|
|
|
|
EX_TABLE(8 ## n ## 6b,9 ## n ## 1b); \
|
|
|
|
EX_TABLE(8 ## n ## 7b,9 ## n ## 1b)
|
2005-09-26 06:04:21 +00:00
|
|
|
|
|
|
|
.text
|
|
|
|
.stabs "arch/powerpc/lib/",N_SO,0,0,0f
|
2010-09-01 07:21:21 +00:00
|
|
|
.stabs "copy_32.S",N_SO,0,0,0f
|
2005-09-26 06:04:21 +00:00
|
|
|
0:
|
|
|
|
|
2005-10-17 01:50:32 +00:00
|
|
|
CACHELINE_BYTES = L1_CACHE_BYTES
|
|
|
|
LG_CACHELINE_BYTES = L1_CACHE_SHIFT
|
|
|
|
CACHELINE_MASK = (L1_CACHE_BYTES-1)
|
2005-09-26 06:04:21 +00:00
|
|
|
|
2019-04-26 16:23:26 +00:00
|
|
|
#ifndef CONFIG_KASAN
|
powerpc/32: add memset16()
Commit 694fc88ce271f ("powerpc/string: Implement optimized
memset variants") added memset16(), memset32() and memset64()
for the 64 bits PPC.
On 32 bits, memset64() is not relevant, and as shown below,
the generic version of memset32() gives a good code, so only
memset16() is candidate for an optimised version.
000009c0 <memset32>:
9c0: 2c 05 00 00 cmpwi r5,0
9c4: 39 23 ff fc addi r9,r3,-4
9c8: 4d 82 00 20 beqlr
9cc: 7c a9 03 a6 mtctr r5
9d0: 94 89 00 04 stwu r4,4(r9)
9d4: 42 00 ff fc bdnz 9d0 <memset32+0x10>
9d8: 4e 80 00 20 blr
The last part of memset() handling the not 4-bytes multiples
operates on bytes, making it unsuitable for handling word without
modification. As it would increase memset() complexity, it is
better to implement memset16() from scratch. In addition it
has the advantage of allowing a more optimised memset16() than what
we would have by using the memset() function.
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2017-08-23 14:54:32 +00:00
|
|
|
_GLOBAL(memset16)
|
|
|
|
rlwinm. r0 ,r5, 31, 1, 31
|
|
|
|
addi r6, r3, -4
|
|
|
|
beq- 2f
|
|
|
|
rlwimi r4 ,r4 ,16 ,0 ,15
|
|
|
|
mtctr r0
|
|
|
|
1: stwu r4, 4(r6)
|
|
|
|
bdnz 1b
|
|
|
|
2: andi. r0, r5, 1
|
|
|
|
beqlr
|
|
|
|
sth r4, 4(r6)
|
|
|
|
blr
|
|
|
|
EXPORT_SYMBOL(memset16)
|
2019-04-26 16:23:26 +00:00
|
|
|
#endif
|
powerpc/32: add memset16()
Commit 694fc88ce271f ("powerpc/string: Implement optimized
memset variants") added memset16(), memset32() and memset64()
for the 64 bits PPC.
On 32 bits, memset64() is not relevant, and as shown below,
the generic version of memset32() gives a good code, so only
memset16() is candidate for an optimised version.
000009c0 <memset32>:
9c0: 2c 05 00 00 cmpwi r5,0
9c4: 39 23 ff fc addi r9,r3,-4
9c8: 4d 82 00 20 beqlr
9cc: 7c a9 03 a6 mtctr r5
9d0: 94 89 00 04 stwu r4,4(r9)
9d4: 42 00 ff fc bdnz 9d0 <memset32+0x10>
9d8: 4e 80 00 20 blr
The last part of memset() handling the not 4-bytes multiples
operates on bytes, making it unsuitable for handling word without
modification. As it would increase memset() complexity, it is
better to implement memset16() from scratch. In addition it
has the advantage of allowing a more optimised memset16() than what
we would have by using the memset() function.
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2017-08-23 14:54:32 +00:00
|
|
|
|
2015-05-19 10:07:48 +00:00
|
|
|
/*
|
|
|
|
* Use dcbz on the complete cache lines in the destination
|
|
|
|
* to set them to zero. This requires that the destination
|
|
|
|
* area is cacheable. -- paulus
|
2015-09-16 10:04:53 +00:00
|
|
|
*
|
|
|
|
* During early init, cache might not be active yet, so dcbz cannot be used.
|
|
|
|
* We therefore skip the optimised bloc that uses dcbz. This jump is
|
|
|
|
* replaced by a nop once cache is active. This is done in machine_init()
|
2015-05-19 10:07:48 +00:00
|
|
|
*/
|
2019-04-26 16:23:26 +00:00
|
|
|
_GLOBAL_KASAN(memset)
|
2017-08-23 14:54:36 +00:00
|
|
|
cmplwi 0,r5,4
|
|
|
|
blt 7f
|
|
|
|
|
2015-05-19 10:07:52 +00:00
|
|
|
rlwimi r4,r4,8,16,23
|
|
|
|
rlwimi r4,r4,16,0,15
|
|
|
|
|
2017-08-23 14:54:36 +00:00
|
|
|
stw r4,0(r3)
|
2015-05-19 10:07:48 +00:00
|
|
|
beqlr
|
2017-08-23 14:54:36 +00:00
|
|
|
andi. r0,r3,3
|
2015-05-19 10:07:48 +00:00
|
|
|
add r5,r0,r5
|
2017-08-23 14:54:36 +00:00
|
|
|
subf r6,r0,r3
|
2015-05-19 10:07:52 +00:00
|
|
|
cmplwi 0,r4,0
|
2017-08-23 14:54:38 +00:00
|
|
|
/*
|
|
|
|
* Skip optimised bloc until cache is enabled. Will be replaced
|
|
|
|
* by 'bne' during boot to use normal procedure if r4 is not zero
|
|
|
|
*/
|
2018-08-09 08:14:41 +00:00
|
|
|
5: b 2f
|
|
|
|
patch_site 5b, patch__memset_nocache
|
2015-05-19 10:07:52 +00:00
|
|
|
|
2015-05-19 10:07:48 +00:00
|
|
|
clrlwi r7,r6,32-LG_CACHELINE_BYTES
|
|
|
|
add r8,r7,r5
|
|
|
|
srwi r9,r8,LG_CACHELINE_BYTES
|
|
|
|
addic. r9,r9,-1 /* total number of complete cachelines */
|
|
|
|
ble 2f
|
|
|
|
xori r0,r7,CACHELINE_MASK & ~3
|
|
|
|
srwi. r0,r0,2
|
|
|
|
beq 3f
|
|
|
|
mtctr r0
|
|
|
|
4: stwu r4,4(r6)
|
|
|
|
bdnz 4b
|
|
|
|
3: mtctr r9
|
|
|
|
li r7,4
|
|
|
|
10: dcbz r7,r6
|
|
|
|
addi r6,r6,CACHELINE_BYTES
|
|
|
|
bdnz 10b
|
|
|
|
clrlwi r5,r8,32-LG_CACHELINE_BYTES
|
|
|
|
addi r5,r5,4
|
|
|
|
|
2015-05-19 10:07:52 +00:00
|
|
|
2: srwi r0,r5,2
|
2005-09-26 06:04:21 +00:00
|
|
|
mtctr r0
|
|
|
|
bdz 6f
|
|
|
|
1: stwu r4,4(r6)
|
|
|
|
bdnz 1b
|
|
|
|
6: andi. r5,r5,3
|
|
|
|
beqlr
|
|
|
|
mtctr r5
|
|
|
|
addi r6,r6,3
|
|
|
|
8: stbu r4,1(r6)
|
|
|
|
bdnz 8b
|
|
|
|
blr
|
2017-08-23 14:54:36 +00:00
|
|
|
|
|
|
|
7: cmpwi 0,r5,0
|
|
|
|
beqlr
|
|
|
|
mtctr r5
|
|
|
|
addi r6,r3,-1
|
|
|
|
9: stbu r4,1(r6)
|
|
|
|
bdnz 9b
|
|
|
|
blr
|
2017-08-23 14:54:34 +00:00
|
|
|
EXPORT_SYMBOL(memset)
|
2019-04-26 16:23:26 +00:00
|
|
|
EXPORT_SYMBOL_KASAN(memset)
|
2005-09-26 06:04:21 +00:00
|
|
|
|
2015-05-19 10:07:48 +00:00
|
|
|
/*
|
|
|
|
* This version uses dcbz on the complete cache lines in the
|
|
|
|
* destination area to reduce memory traffic. This requires that
|
|
|
|
* the destination area is cacheable.
|
|
|
|
* We only use this version if the source and dest don't overlap.
|
|
|
|
* -- paulus.
|
2015-09-16 10:04:51 +00:00
|
|
|
*
|
|
|
|
* During early init, cache might not be active yet, so dcbz cannot be used.
|
|
|
|
* We therefore jump to generic_memcpy which doesn't use dcbz. This jump is
|
|
|
|
* replaced by a nop once cache is active. This is done in machine_init()
|
2015-05-19 10:07:48 +00:00
|
|
|
*/
|
2019-04-26 16:23:26 +00:00
|
|
|
_GLOBAL_KASAN(memmove)
|
2015-05-19 10:07:55 +00:00
|
|
|
cmplw 0,r3,r4
|
|
|
|
bgt backwards_memcpy
|
|
|
|
/* fall through */
|
|
|
|
|
2019-04-26 16:23:26 +00:00
|
|
|
_GLOBAL_KASAN(memcpy)
|
2018-08-09 08:14:41 +00:00
|
|
|
1: b generic_memcpy
|
|
|
|
patch_site 1b, patch__memcpy_nocache
|
|
|
|
|
2015-05-19 10:07:48 +00:00
|
|
|
add r7,r3,r5 /* test if the src & dst overlap */
|
|
|
|
add r8,r4,r5
|
|
|
|
cmplw 0,r4,r7
|
|
|
|
cmplw 1,r3,r8
|
|
|
|
crand 0,0,4 /* cr0.lt &= cr1.lt */
|
2015-05-19 10:07:55 +00:00
|
|
|
blt generic_memcpy /* if regions overlap */
|
2015-05-19 10:07:48 +00:00
|
|
|
|
|
|
|
addi r4,r4,-4
|
|
|
|
addi r6,r3,-4
|
|
|
|
neg r0,r3
|
|
|
|
andi. r0,r0,CACHELINE_MASK /* # bytes to start of cache line */
|
|
|
|
beq 58f
|
|
|
|
|
|
|
|
cmplw 0,r5,r0 /* is this more than total to do? */
|
|
|
|
blt 63f /* if not much to do */
|
|
|
|
andi. r8,r0,3 /* get it word-aligned first */
|
|
|
|
subf r5,r0,r5
|
|
|
|
mtctr r8
|
|
|
|
beq+ 61f
|
|
|
|
70: lbz r9,4(r4) /* do some bytes */
|
|
|
|
addi r4,r4,1
|
|
|
|
addi r6,r6,1
|
2015-05-19 10:07:57 +00:00
|
|
|
stb r9,3(r6)
|
2015-05-19 10:07:48 +00:00
|
|
|
bdnz 70b
|
|
|
|
61: srwi. r0,r0,2
|
|
|
|
mtctr r0
|
|
|
|
beq 58f
|
|
|
|
72: lwzu r9,4(r4) /* do some words */
|
|
|
|
stwu r9,4(r6)
|
|
|
|
bdnz 72b
|
|
|
|
|
|
|
|
58: srwi. r0,r5,LG_CACHELINE_BYTES /* # complete cachelines */
|
|
|
|
clrlwi r5,r5,32-LG_CACHELINE_BYTES
|
|
|
|
li r11,4
|
|
|
|
mtctr r0
|
|
|
|
beq 63f
|
|
|
|
53:
|
|
|
|
dcbz r11,r6
|
|
|
|
COPY_16_BYTES
|
|
|
|
#if L1_CACHE_BYTES >= 32
|
|
|
|
COPY_16_BYTES
|
|
|
|
#if L1_CACHE_BYTES >= 64
|
|
|
|
COPY_16_BYTES
|
|
|
|
COPY_16_BYTES
|
|
|
|
#if L1_CACHE_BYTES >= 128
|
|
|
|
COPY_16_BYTES
|
|
|
|
COPY_16_BYTES
|
|
|
|
COPY_16_BYTES
|
|
|
|
COPY_16_BYTES
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
bdnz 53b
|
|
|
|
|
|
|
|
63: srwi. r0,r5,2
|
|
|
|
mtctr r0
|
|
|
|
beq 64f
|
|
|
|
30: lwzu r0,4(r4)
|
|
|
|
stwu r0,4(r6)
|
|
|
|
bdnz 30b
|
|
|
|
|
|
|
|
64: andi. r0,r5,3
|
|
|
|
mtctr r0
|
|
|
|
beq+ 65f
|
2015-05-19 10:07:57 +00:00
|
|
|
addi r4,r4,3
|
|
|
|
addi r6,r6,3
|
|
|
|
40: lbzu r0,1(r4)
|
|
|
|
stbu r0,1(r6)
|
2015-05-19 10:07:48 +00:00
|
|
|
bdnz 40b
|
|
|
|
65: blr
|
2016-01-14 04:33:46 +00:00
|
|
|
EXPORT_SYMBOL(memcpy)
|
|
|
|
EXPORT_SYMBOL(memmove)
|
2019-04-26 16:23:26 +00:00
|
|
|
EXPORT_SYMBOL_KASAN(memcpy)
|
|
|
|
EXPORT_SYMBOL_KASAN(memmove)
|
2015-05-19 10:07:48 +00:00
|
|
|
|
2016-03-16 10:36:06 +00:00
|
|
|
generic_memcpy:
|
2005-09-26 06:04:21 +00:00
|
|
|
srwi. r7,r5,3
|
|
|
|
addi r6,r3,-4
|
|
|
|
addi r4,r4,-4
|
|
|
|
beq 2f /* if less than 8 bytes to do */
|
|
|
|
andi. r0,r6,3 /* get dest word aligned */
|
|
|
|
mtctr r7
|
|
|
|
bne 5f
|
|
|
|
1: lwz r7,4(r4)
|
|
|
|
lwzu r8,8(r4)
|
|
|
|
stw r7,4(r6)
|
|
|
|
stwu r8,8(r6)
|
|
|
|
bdnz 1b
|
|
|
|
andi. r5,r5,7
|
|
|
|
2: cmplwi 0,r5,4
|
|
|
|
blt 3f
|
|
|
|
lwzu r0,4(r4)
|
|
|
|
addi r5,r5,-4
|
|
|
|
stwu r0,4(r6)
|
|
|
|
3: cmpwi 0,r5,0
|
|
|
|
beqlr
|
|
|
|
mtctr r5
|
|
|
|
addi r4,r4,3
|
|
|
|
addi r6,r6,3
|
|
|
|
4: lbzu r0,1(r4)
|
|
|
|
stbu r0,1(r6)
|
|
|
|
bdnz 4b
|
|
|
|
blr
|
|
|
|
5: subfic r0,r0,4
|
|
|
|
mtctr r0
|
|
|
|
6: lbz r7,4(r4)
|
|
|
|
addi r4,r4,1
|
|
|
|
stb r7,4(r6)
|
|
|
|
addi r6,r6,1
|
|
|
|
bdnz 6b
|
|
|
|
subf r5,r0,r5
|
|
|
|
rlwinm. r7,r5,32-3,3,31
|
|
|
|
beq 2b
|
|
|
|
mtctr r7
|
|
|
|
b 1b
|
|
|
|
|
|
|
|
_GLOBAL(backwards_memcpy)
|
|
|
|
rlwinm. r7,r5,32-3,3,31 /* r0 = r5 >> 3 */
|
|
|
|
add r6,r3,r5
|
|
|
|
add r4,r4,r5
|
|
|
|
beq 2f
|
|
|
|
andi. r0,r6,3
|
|
|
|
mtctr r7
|
|
|
|
bne 5f
|
|
|
|
1: lwz r7,-4(r4)
|
|
|
|
lwzu r8,-8(r4)
|
|
|
|
stw r7,-4(r6)
|
|
|
|
stwu r8,-8(r6)
|
|
|
|
bdnz 1b
|
|
|
|
andi. r5,r5,7
|
|
|
|
2: cmplwi 0,r5,4
|
|
|
|
blt 3f
|
|
|
|
lwzu r0,-4(r4)
|
|
|
|
subi r5,r5,4
|
|
|
|
stwu r0,-4(r6)
|
|
|
|
3: cmpwi 0,r5,0
|
|
|
|
beqlr
|
|
|
|
mtctr r5
|
|
|
|
4: lbzu r0,-1(r4)
|
|
|
|
stbu r0,-1(r6)
|
|
|
|
bdnz 4b
|
|
|
|
blr
|
|
|
|
5: mtctr r0
|
|
|
|
6: lbzu r7,-1(r4)
|
|
|
|
stbu r7,-1(r6)
|
|
|
|
bdnz 6b
|
|
|
|
subf r5,r0,r5
|
|
|
|
rlwinm. r7,r5,32-3,3,31
|
|
|
|
beq 2b
|
|
|
|
mtctr r7
|
|
|
|
b 1b
|
|
|
|
|
|
|
|
_GLOBAL(__copy_tofrom_user)
|
|
|
|
addi r4,r4,-4
|
|
|
|
addi r6,r3,-4
|
|
|
|
neg r0,r3
|
|
|
|
andi. r0,r0,CACHELINE_MASK /* # bytes to start of cache line */
|
|
|
|
beq 58f
|
|
|
|
|
|
|
|
cmplw 0,r5,r0 /* is this more than total to do? */
|
|
|
|
blt 63f /* if not much to do */
|
|
|
|
andi. r8,r0,3 /* get it word-aligned first */
|
|
|
|
mtctr r8
|
|
|
|
beq+ 61f
|
|
|
|
70: lbz r9,4(r4) /* do some bytes */
|
|
|
|
71: stb r9,4(r6)
|
|
|
|
addi r4,r4,1
|
|
|
|
addi r6,r6,1
|
|
|
|
bdnz 70b
|
|
|
|
61: subf r5,r0,r5
|
|
|
|
srwi. r0,r0,2
|
|
|
|
mtctr r0
|
|
|
|
beq 58f
|
|
|
|
72: lwzu r9,4(r4) /* do some words */
|
|
|
|
73: stwu r9,4(r6)
|
|
|
|
bdnz 72b
|
|
|
|
|
2016-10-13 05:42:53 +00:00
|
|
|
EX_TABLE(70b,100f)
|
|
|
|
EX_TABLE(71b,101f)
|
|
|
|
EX_TABLE(72b,102f)
|
|
|
|
EX_TABLE(73b,103f)
|
2005-09-26 06:04:21 +00:00
|
|
|
|
|
|
|
58: srwi. r0,r5,LG_CACHELINE_BYTES /* # complete cachelines */
|
|
|
|
clrlwi r5,r5,32-LG_CACHELINE_BYTES
|
|
|
|
li r11,4
|
|
|
|
beq 63f
|
|
|
|
|
|
|
|
/* Here we decide how far ahead to prefetch the source */
|
|
|
|
li r3,4
|
|
|
|
cmpwi r0,1
|
|
|
|
li r7,0
|
|
|
|
ble 114f
|
|
|
|
li r7,1
|
|
|
|
#if MAX_COPY_PREFETCH > 1
|
|
|
|
/* Heuristically, for large transfers we prefetch
|
|
|
|
MAX_COPY_PREFETCH cachelines ahead. For small transfers
|
|
|
|
we prefetch 1 cacheline ahead. */
|
|
|
|
cmpwi r0,MAX_COPY_PREFETCH
|
|
|
|
ble 112f
|
|
|
|
li r7,MAX_COPY_PREFETCH
|
|
|
|
112: mtctr r7
|
|
|
|
111: dcbt r3,r4
|
|
|
|
addi r3,r3,CACHELINE_BYTES
|
|
|
|
bdnz 111b
|
|
|
|
#else
|
|
|
|
dcbt r3,r4
|
|
|
|
addi r3,r3,CACHELINE_BYTES
|
|
|
|
#endif /* MAX_COPY_PREFETCH > 1 */
|
|
|
|
|
|
|
|
114: subf r8,r7,r0
|
|
|
|
mr r0,r7
|
|
|
|
mtctr r8
|
|
|
|
|
|
|
|
53: dcbt r3,r4
|
|
|
|
54: dcbz r11,r6
|
2016-10-13 05:42:53 +00:00
|
|
|
EX_TABLE(54b,105f)
|
2005-09-26 06:04:21 +00:00
|
|
|
/* the main body of the cacheline loop */
|
|
|
|
COPY_16_BYTES_WITHEX(0)
|
2005-10-17 01:50:32 +00:00
|
|
|
#if L1_CACHE_BYTES >= 32
|
2005-09-26 06:04:21 +00:00
|
|
|
COPY_16_BYTES_WITHEX(1)
|
2005-10-17 01:50:32 +00:00
|
|
|
#if L1_CACHE_BYTES >= 64
|
2005-09-26 06:04:21 +00:00
|
|
|
COPY_16_BYTES_WITHEX(2)
|
|
|
|
COPY_16_BYTES_WITHEX(3)
|
2005-10-17 01:50:32 +00:00
|
|
|
#if L1_CACHE_BYTES >= 128
|
2005-09-26 06:04:21 +00:00
|
|
|
COPY_16_BYTES_WITHEX(4)
|
|
|
|
COPY_16_BYTES_WITHEX(5)
|
|
|
|
COPY_16_BYTES_WITHEX(6)
|
|
|
|
COPY_16_BYTES_WITHEX(7)
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
bdnz 53b
|
|
|
|
cmpwi r0,0
|
|
|
|
li r3,4
|
|
|
|
li r7,0
|
|
|
|
bne 114b
|
|
|
|
|
|
|
|
63: srwi. r0,r5,2
|
|
|
|
mtctr r0
|
|
|
|
beq 64f
|
|
|
|
30: lwzu r0,4(r4)
|
|
|
|
31: stwu r0,4(r6)
|
|
|
|
bdnz 30b
|
|
|
|
|
|
|
|
64: andi. r0,r5,3
|
|
|
|
mtctr r0
|
|
|
|
beq+ 65f
|
|
|
|
40: lbz r0,4(r4)
|
|
|
|
41: stb r0,4(r6)
|
|
|
|
addi r4,r4,1
|
|
|
|
addi r6,r6,1
|
|
|
|
bdnz 40b
|
|
|
|
65: li r3,0
|
|
|
|
blr
|
|
|
|
|
|
|
|
/* read fault, initial single-byte copy */
|
|
|
|
100: li r9,0
|
|
|
|
b 90f
|
|
|
|
/* write fault, initial single-byte copy */
|
|
|
|
101: li r9,1
|
|
|
|
90: subf r5,r8,r5
|
|
|
|
li r3,0
|
|
|
|
b 99f
|
|
|
|
/* read fault, initial word copy */
|
|
|
|
102: li r9,0
|
|
|
|
b 91f
|
|
|
|
/* write fault, initial word copy */
|
|
|
|
103: li r9,1
|
|
|
|
91: li r3,2
|
|
|
|
b 99f
|
|
|
|
|
|
|
|
/*
|
|
|
|
* this stuff handles faults in the cacheline loop and branches to either
|
|
|
|
* 104f (if in read part) or 105f (if in write part), after updating r5
|
|
|
|
*/
|
|
|
|
COPY_16_BYTES_EXCODE(0)
|
2005-10-17 01:50:32 +00:00
|
|
|
#if L1_CACHE_BYTES >= 32
|
2005-09-26 06:04:21 +00:00
|
|
|
COPY_16_BYTES_EXCODE(1)
|
2005-10-17 01:50:32 +00:00
|
|
|
#if L1_CACHE_BYTES >= 64
|
2005-09-26 06:04:21 +00:00
|
|
|
COPY_16_BYTES_EXCODE(2)
|
|
|
|
COPY_16_BYTES_EXCODE(3)
|
2005-10-17 01:50:32 +00:00
|
|
|
#if L1_CACHE_BYTES >= 128
|
2005-09-26 06:04:21 +00:00
|
|
|
COPY_16_BYTES_EXCODE(4)
|
|
|
|
COPY_16_BYTES_EXCODE(5)
|
|
|
|
COPY_16_BYTES_EXCODE(6)
|
|
|
|
COPY_16_BYTES_EXCODE(7)
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* read fault in cacheline loop */
|
|
|
|
104: li r9,0
|
|
|
|
b 92f
|
|
|
|
/* fault on dcbz (effectively a write fault) */
|
|
|
|
/* or write fault in cacheline loop */
|
|
|
|
105: li r9,1
|
|
|
|
92: li r3,LG_CACHELINE_BYTES
|
|
|
|
mfctr r8
|
|
|
|
add r0,r0,r8
|
|
|
|
b 106f
|
|
|
|
/* read fault in final word loop */
|
|
|
|
108: li r9,0
|
|
|
|
b 93f
|
|
|
|
/* write fault in final word loop */
|
|
|
|
109: li r9,1
|
|
|
|
93: andi. r5,r5,3
|
|
|
|
li r3,2
|
|
|
|
b 99f
|
|
|
|
/* read fault in final byte loop */
|
|
|
|
110: li r9,0
|
|
|
|
b 94f
|
|
|
|
/* write fault in final byte loop */
|
|
|
|
111: li r9,1
|
|
|
|
94: li r5,0
|
|
|
|
li r3,0
|
|
|
|
/*
|
|
|
|
* At this stage the number of bytes not copied is
|
|
|
|
* r5 + (ctr << r3), and r9 is 0 for read or 1 for write.
|
|
|
|
*/
|
|
|
|
99: mfctr r0
|
|
|
|
106: slw r3,r0,r3
|
|
|
|
add. r3,r3,r5
|
|
|
|
beq 120f /* shouldn't happen */
|
|
|
|
cmpwi 0,r9,0
|
|
|
|
bne 120f
|
|
|
|
/* for a read fault, first try to continue the copy one byte at a time */
|
|
|
|
mtctr r3
|
|
|
|
130: lbz r0,4(r4)
|
|
|
|
131: stb r0,4(r6)
|
|
|
|
addi r4,r4,1
|
|
|
|
addi r6,r6,1
|
|
|
|
bdnz 130b
|
|
|
|
/* then clear out the destination: r3 bytes starting at 4(r6) */
|
|
|
|
132: mfctr r3
|
|
|
|
120: blr
|
|
|
|
|
2016-10-13 05:42:53 +00:00
|
|
|
EX_TABLE(30b,108b)
|
|
|
|
EX_TABLE(31b,109b)
|
|
|
|
EX_TABLE(40b,110b)
|
|
|
|
EX_TABLE(41b,111b)
|
|
|
|
EX_TABLE(130b,132b)
|
|
|
|
EX_TABLE(131b,120b)
|
|
|
|
|
2016-01-14 04:33:46 +00:00
|
|
|
EXPORT_SYMBOL(__copy_tofrom_user)
|