mirror of
https://github.com/torvalds/linux.git
synced 2024-12-22 19:01:37 +00:00
8478132a87
This reverts commit 4dd1837d75
.
Moving the exports for assembly code into the assembly files breaks
KSYM trimming, but also breaks modversions.
While fixing the KSYM trimming is trivial, fixing modversions brings
us to a technically worse position that we had prior to the above
change:
- We end up with the prototype definitions divorsed from everything
else, which means that adding or removing assembly level ksyms
become more fragile:
* if adding a new assembly ksyms export, a missed prototype in
asm-prototypes.h results in a successful build if no module in
the selected configuration makes use of the symbol.
* when removing a ksyms export, asm-prototypes.h will get forgotten,
with armksyms.c, you'll get a build error if you forget to touch
the file.
- We end up with the same amount of include files and prototypes,
they're just in a header file instead of a .c file with their
exports.
As for lines of code, we don't get much of a size reduction:
(original commit)
47 files changed, 131 insertions(+), 208 deletions(-)
(fix for ksyms trimming)
7 files changed, 18 insertions(+), 5 deletions(-)
(two fixes for modversions)
1 file changed, 34 insertions(+)
3 files changed, 7 insertions(+), 2 deletions(-)
which results in a net total of only 25 lines deleted.
As there does not seem to be much benefit from this change of approach,
revert the change.
Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
48 lines
1.3 KiB
ArmAsm
48 lines
1.3 KiB
ArmAsm
/*
|
|
* linux/arch/arm/lib/copypage.S
|
|
*
|
|
* Copyright (C) 1995-1999 Russell King
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*
|
|
* ASM optimised string functions
|
|
*/
|
|
#include <linux/linkage.h>
|
|
#include <asm/assembler.h>
|
|
#include <asm/asm-offsets.h>
|
|
#include <asm/cache.h>
|
|
|
|
#define COPY_COUNT (PAGE_SZ / (2 * L1_CACHE_BYTES) PLD( -1 ))
|
|
|
|
.text
|
|
.align 5
|
|
/*
|
|
* StrongARM optimised copy_page routine
|
|
* now 1.78bytes/cycle, was 1.60 bytes/cycle (50MHz bus -> 89MB/s)
|
|
* Note that we probably achieve closer to the 100MB/s target with
|
|
* the core clock switching.
|
|
*/
|
|
ENTRY(copy_page)
|
|
stmfd sp!, {r4, lr} @ 2
|
|
PLD( pld [r1, #0] )
|
|
PLD( pld [r1, #L1_CACHE_BYTES] )
|
|
mov r2, #COPY_COUNT @ 1
|
|
ldmia r1!, {r3, r4, ip, lr} @ 4+1
|
|
1: PLD( pld [r1, #2 * L1_CACHE_BYTES])
|
|
PLD( pld [r1, #3 * L1_CACHE_BYTES])
|
|
2:
|
|
.rept (2 * L1_CACHE_BYTES / 16 - 1)
|
|
stmia r0!, {r3, r4, ip, lr} @ 4
|
|
ldmia r1!, {r3, r4, ip, lr} @ 4
|
|
.endr
|
|
subs r2, r2, #1 @ 1
|
|
stmia r0!, {r3, r4, ip, lr} @ 4
|
|
ldmgtia r1!, {r3, r4, ip, lr} @ 4
|
|
bgt 1b @ 1
|
|
PLD( ldmeqia r1!, {r3, r4, ip, lr} )
|
|
PLD( beq 2b )
|
|
ldmfd sp!, {r4, pc} @ 3
|
|
ENDPROC(copy_page)
|