xtensa: clean up functions in assembly code

Use ENTRY and ENDPROC throughout arch/xtensa/lib assembly sources.
Introduce asm/linkage.h and define xtensa-specific __ALIGN macro there.

Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
This commit is contained in:
Max Filippov 2017-12-09 21:22:37 -08:00
parent fbb871e220
commit 5cf97ebd8b
6 changed files with 36 additions and 36 deletions

View File

@ -0,0 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_LINKAGE_H
#define __ASM_LINKAGE_H
#define __ALIGN .align 4
#define __ALIGN_STR ".align 4"
#endif

View File

@ -9,6 +9,7 @@
* Copyright (C) 2002 - 2012 Tensilica Inc. * Copyright (C) 2002 - 2012 Tensilica Inc.
*/ */
#include <linux/linkage.h>
#include <variant/core.h> #include <variant/core.h>
#include <asm/asmmacro.h> #include <asm/asmmacro.h>
@ -108,10 +109,7 @@
addi a5, a5, 2 addi a5, a5, 2
j .Ldstaligned # dst is now aligned, return to main algorithm j .Ldstaligned # dst is now aligned, return to main algorithm
.align 4 ENTRY(memcpy)
.global memcpy
.type memcpy,@function
memcpy:
entry sp, 16 # minimal stack frame entry sp, 16 # minimal stack frame
# a2/ dst, a3/ src, a4/ len # a2/ dst, a3/ src, a4/ len
@ -273,14 +271,14 @@ memcpy:
s8i a6, a5, 0 s8i a6, a5, 0
retw retw
ENDPROC(memcpy)
/* /*
* void bcopy(const void *src, void *dest, size_t n); * void bcopy(const void *src, void *dest, size_t n);
*/ */
.align 4
.global bcopy ENTRY(bcopy)
.type bcopy,@function
bcopy:
entry sp, 16 # minimal stack frame entry sp, 16 # minimal stack frame
# a2=src, a3=dst, a4=len # a2=src, a3=dst, a4=len
mov a5, a3 mov a5, a3
@ -288,6 +286,8 @@ bcopy:
mov a2, a5 mov a2, a5
j .Lmovecommon # go to common code for memmove+bcopy j .Lmovecommon # go to common code for memmove+bcopy
ENDPROC(bcopy)
/* /*
* void *memmove(void *dst, const void *src, size_t len); * void *memmove(void *dst, const void *src, size_t len);
* *
@ -376,10 +376,7 @@ bcopy:
j .Lbackdstaligned # dst is now aligned, j .Lbackdstaligned # dst is now aligned,
# return to main algorithm # return to main algorithm
.align 4 ENTRY(memmove)
.global memmove
.type memmove,@function
memmove:
entry sp, 16 # minimal stack frame entry sp, 16 # minimal stack frame
# a2/ dst, a3/ src, a4/ len # a2/ dst, a3/ src, a4/ len
@ -551,11 +548,4 @@ memmove:
s8i a6, a5, 0 s8i a6, a5, 0
retw retw
ENDPROC(memmove)
/*
* Local Variables:
* mode:fundamental
* comment-start: "# "
* comment-start-skip: "# *"
* End:
*/

View File

@ -11,6 +11,7 @@
* Copyright (C) 2002 Tensilica Inc. * Copyright (C) 2002 Tensilica Inc.
*/ */
#include <linux/linkage.h>
#include <variant/core.h> #include <variant/core.h>
#include <asm/asmmacro.h> #include <asm/asmmacro.h>
@ -30,10 +31,8 @@
*/ */
.text .text
.align 4 ENTRY(memset)
.global memset
.type memset,@function
memset:
entry sp, 16 # minimal stack frame entry sp, 16 # minimal stack frame
# a2/ dst, a3/ c, a4/ length # a2/ dst, a3/ c, a4/ length
extui a3, a3, 0, 8 # mask to just 8 bits extui a3, a3, 0, 8 # mask to just 8 bits
@ -141,6 +140,7 @@ EX(10f) s8i a3, a5, 0
.Lbytesetdone: .Lbytesetdone:
retw retw
ENDPROC(memset)
.section .fixup, "ax" .section .fixup, "ax"
.align 4 .align 4

View File

@ -12,6 +12,7 @@
*/ */
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/linkage.h>
#include <variant/core.h> #include <variant/core.h>
#include <asm/asmmacro.h> #include <asm/asmmacro.h>
@ -47,10 +48,8 @@
# a12/ tmp # a12/ tmp
.text .text
.align 4 ENTRY(__strncpy_user)
.global __strncpy_user
.type __strncpy_user,@function
__strncpy_user:
entry sp, 16 # minimal stack frame entry sp, 16 # minimal stack frame
# a2/ dst, a3/ src, a4/ len # a2/ dst, a3/ src, a4/ len
mov a11, a2 # leave dst in return value register mov a11, a2 # leave dst in return value register
@ -202,6 +201,7 @@ EX(10f) s8i a9, a11, 0
sub a2, a11, a2 # compute strlen sub a2, a11, a2 # compute strlen
retw retw
ENDPROC(__strncpy_user)
.section .fixup, "ax" .section .fixup, "ax"
.align 4 .align 4

View File

@ -11,6 +11,7 @@
* Copyright (C) 2002 Tensilica Inc. * Copyright (C) 2002 Tensilica Inc.
*/ */
#include <linux/linkage.h>
#include <variant/core.h> #include <variant/core.h>
#include <asm/asmmacro.h> #include <asm/asmmacro.h>
@ -42,10 +43,8 @@
# a10/ tmp # a10/ tmp
.text .text
.align 4 ENTRY(__strnlen_user)
.global __strnlen_user
.type __strnlen_user,@function
__strnlen_user:
entry sp, 16 # minimal stack frame entry sp, 16 # minimal stack frame
# a2/ s, a3/ len # a2/ s, a3/ len
addi a4, a2, -4 # because we overincrement at the end; addi a4, a2, -4 # because we overincrement at the end;
@ -133,6 +132,8 @@ EX(10f) l32i a9, a4, 0 # get word with first two bytes of string
sub a2, a4, a2 # subtract to get length sub a2, a4, a2 # subtract to get length
retw retw
ENDPROC(__strnlen_user)
.section .fixup, "ax" .section .fixup, "ax"
.align 4 .align 4
10: 10:

View File

@ -53,14 +53,13 @@
* a11/ original length * a11/ original length
*/ */
#include <linux/linkage.h>
#include <variant/core.h> #include <variant/core.h>
#include <asm/asmmacro.h> #include <asm/asmmacro.h>
.text .text
.align 4 ENTRY(__xtensa_copy_user)
.global __xtensa_copy_user
.type __xtensa_copy_user,@function
__xtensa_copy_user:
entry sp, 16 # minimal stack frame entry sp, 16 # minimal stack frame
# a2/ dst, a3/ src, a4/ len # a2/ dst, a3/ src, a4/ len
mov a5, a2 # copy dst so that a2 is return value mov a5, a2 # copy dst so that a2 is return value
@ -267,6 +266,7 @@ EX(10f) s8i a6, a5, 0
movi a2, 0 # return success for len bytes copied movi a2, 0 # return success for len bytes copied
retw retw
ENDPROC(__xtensa_copy_user)
.section .fixup, "ax" .section .fixup, "ax"
.align 4 .align 4