From 049520dcb3966ee0471f2757ca3f5f5e317f33cd Mon Sep 17 00:00:00 2001
From: James Hogan <james.hogan@imgtec.com>
Date: Tue, 4 Apr 2017 07:53:38 +0100
Subject: [PATCH 1/6] metag/usercopy: Reformat rapf loop inline asm

Reformat rapf loop inline assembly to make it more readable and easier
to modify in future.

Signed-off-by: James Hogan <james.hogan@imgtec.com>
Cc: linux-metag@vger.kernel.org
---
 arch/metag/lib/usercopy.c | 214 +++++++++++++++-----------------------
 1 file changed, 85 insertions(+), 129 deletions(-)

diff --git a/arch/metag/lib/usercopy.c b/arch/metag/lib/usercopy.c
index 2792fc621088..7abed2f45c83 100644
--- a/arch/metag/lib/usercopy.c
+++ b/arch/metag/lib/usercopy.c
@@ -246,65 +246,49 @@
 #define __asm_copy_user_64bit_rapf_loop(				\
 		to, from, ret, n, id, FIXUP)				\
 	asm volatile (							\
-		".balign 8\n"						\
-		"MOV	RAPF, %1\n"					\
-		"MSETL	[A0StP++], D0Ar6, D0FrT, D0.5, D0.6, D0.7\n"	\
-		"MOV	D0Ar6, #0\n"					\
-		"LSR	D1Ar5, %3, #6\n"				\
-		"SUB	TXRPT, D1Ar5, #2\n"				\
-		"MOV	RAPF, %1\n"					\
+			".balign 8\n"					\
+		"	MOV	RAPF, %1\n"				\
+		"	MSETL	[A0StP++], D0Ar6, D0FrT, D0.5, D0.6, D0.7\n" \
+		"	MOV	D0Ar6, #0\n"				\
+		"	LSR	D1Ar5, %3, #6\n"			\
+		"	SUB	TXRPT, D1Ar5, #2\n"			\
+		"	MOV	RAPF, %1\n"				\
 		"$Lloop"id":\n"						\
-		"ADD	RAPF, %1, #64\n"				\
-		"21:\n"							\
-		"MGETL	D0FrT, D0.5, D0.6, D0.7, [%1++]\n"		\
-		"22:\n"							\
-		"MSETL	[%0++], D0FrT, D0.5, D0.6, D0.7\n"		\
-		"23:\n"							\
-		"SUB	%3, %3, #32\n"					\
-		"24:\n"							\
-		"MGETL	D0FrT, D0.5, D0.6, D0.7, [%1++]\n"		\
-		"25:\n"							\
-		"MSETL	[%0++], D0FrT, D0.5, D0.6, D0.7\n"		\
-		"26:\n"							\
-		"SUB	%3, %3, #32\n"					\
-		"DCACHE	[%1+#-64], D0Ar6\n"				\
-		"BR	$Lloop"id"\n"					\
+		"	ADD	RAPF, %1, #64\n"			\
+		"21:	MGETL	D0FrT, D0.5, D0.6, D0.7, [%1++]\n"	\
+		"22:	MSETL	[%0++], D0FrT, D0.5, D0.6, D0.7\n"	\
+		"23:	SUB	%3, %3, #32\n"				\
+		"24:	MGETL	D0FrT, D0.5, D0.6, D0.7, [%1++]\n"	\
+		"25:	MSETL	[%0++], D0FrT, D0.5, D0.6, D0.7\n"	\
+		"26:	SUB	%3, %3, #32\n"				\
+		"	DCACHE	[%1+#-64], D0Ar6\n"			\
+		"	BR	$Lloop"id"\n"				\
 									\
-		"MOV	RAPF, %1\n"					\
-		"27:\n"							\
-		"MGETL	D0FrT, D0.5, D0.6, D0.7, [%1++]\n"		\
-		"28:\n"							\
-		"MSETL	[%0++], D0FrT, D0.5, D0.6, D0.7\n"		\
-		"29:\n"							\
-		"SUB	%3, %3, #32\n"					\
-		"30:\n"							\
-		"MGETL	D0FrT, D0.5, D0.6, D0.7, [%1++]\n"		\
-		"31:\n"							\
-		"MSETL	[%0++], D0FrT, D0.5, D0.6, D0.7\n"		\
-		"32:\n"							\
-		"SUB	%0, %0, #8\n"					\
-		"33:\n"							\
-		"SETL	[%0++], D0.7, D1.7\n"				\
-		"SUB	%3, %3, #32\n"					\
-		"1:"							\
-		"DCACHE	[%1+#-64], D0Ar6\n"				\
-		"GETL    D0Ar6, D1Ar5, [A0StP+#-40]\n"			\
-		"GETL    D0FrT, D1RtP, [A0StP+#-32]\n"			\
-		"GETL    D0.5, D1.5, [A0StP+#-24]\n"			\
-		"GETL    D0.6, D1.6, [A0StP+#-16]\n"			\
-		"GETL    D0.7, D1.7, [A0StP+#-8]\n"			\
-		"SUB A0StP, A0StP, #40\n"				\
+		"	MOV	RAPF, %1\n"				\
+		"27:	MGETL	D0FrT, D0.5, D0.6, D0.7, [%1++]\n"	\
+		"28:	MSETL	[%0++], D0FrT, D0.5, D0.6, D0.7\n"	\
+		"29:	SUB	%3, %3, #32\n"				\
+		"30:	MGETL	D0FrT, D0.5, D0.6, D0.7, [%1++]\n"	\
+		"31:	MSETL	[%0++], D0FrT, D0.5, D0.6, D0.7\n"	\
+		"32:	SUB	%0, %0, #8\n"				\
+		"33:	SETL	[%0++], D0.7, D1.7\n"			\
+		"	SUB	%3, %3, #32\n"				\
+		"1:	DCACHE	[%1+#-64], D0Ar6\n"			\
+		"	GETL	D0Ar6, D1Ar5, [A0StP+#-40]\n"		\
+		"	GETL	D0FrT, D1RtP, [A0StP+#-32]\n"		\
+		"	GETL	D0.5, D1.5, [A0StP+#-24]\n"		\
+		"	GETL	D0.6, D1.6, [A0StP+#-16]\n"		\
+		"	GETL	D0.7, D1.7, [A0StP+#-8]\n"		\
+		"	SUB	A0StP, A0StP, #40\n"			\
 		"	.section .fixup,\"ax\"\n"			\
-		"4:\n"							\
-		"	ADD	%0, %0, #8\n"				\
-		"3:\n"							\
-		"	MOV	D0Ar2, TXSTATUS\n"			\
+		"4:	ADD	%0, %0, #8\n"				\
+		"3:	MOV	D0Ar2, TXSTATUS\n"			\
 		"	MOV	D1Ar1, TXSTATUS\n"			\
 		"	AND	D1Ar1, D1Ar1, #0xFFFFF8FF\n"		\
 		"	MOV	TXSTATUS, D1Ar1\n"			\
 			FIXUP						\
-		"	MOVT    D0Ar2,#HI(1b)\n"			\
-		"	JUMP    D0Ar2,#LO(1b)\n"			\
+		"	MOVT	D0Ar2, #HI(1b)\n"			\
+		"	JUMP	D0Ar2, #LO(1b)\n"			\
 		"	.previous\n"					\
 		"	.section __ex_table,\"a\"\n"			\
 		"	.long 21b,3b\n"					\
@@ -397,89 +381,61 @@
 #define __asm_copy_user_32bit_rapf_loop(				\
 			to,	from, ret, n, id, FIXUP)		\
 	asm volatile (							\
-		".balign 8\n"						\
-		"MOV	RAPF, %1\n"					\
-		"MSETL	[A0StP++], D0Ar6, D0FrT, D0.5, D0.6, D0.7\n"	\
-		"MOV	D0Ar6, #0\n"					\
-		"LSR	D1Ar5, %3, #6\n"				\
-		"SUB	TXRPT, D1Ar5, #2\n"				\
-		"MOV	RAPF, %1\n"					\
-	"$Lloop"id":\n"							\
-		"ADD	RAPF, %1, #64\n"				\
-		"21:\n"							\
-		"MGETD	D0FrT, D0.5, D0.6, D0.7, [%1++]\n"		\
-		"22:\n"							\
-		"MSETD	[%0++], D0FrT, D0.5, D0.6, D0.7\n"		\
-		"23:\n"							\
-		"SUB	%3, %3, #16\n"					\
-		"24:\n"							\
-		"MGETD	D0FrT, D0.5, D0.6, D0.7, [%1++]\n"		\
-		"25:\n"							\
-		"MSETD	[%0++], D0FrT, D0.5, D0.6, D0.7\n"		\
-		"26:\n"							\
-		"SUB	%3, %3, #16\n"					\
-		"27:\n"							\
-		"MGETD	D0FrT, D0.5, D0.6, D0.7, [%1++]\n"		\
-		"28:\n"							\
-		"MSETD	[%0++], D0FrT, D0.5, D0.6, D0.7\n"		\
-		"29:\n"							\
-		"SUB	%3, %3, #16\n"					\
-		"30:\n"							\
-		"MGETD	D0FrT, D0.5, D0.6, D0.7, [%1++]\n"		\
-		"31:\n"							\
-		"MSETD	[%0++], D0FrT, D0.5, D0.6, D0.7\n"		\
-		"32:\n"							\
-		"SUB	%3, %3, #16\n"					\
-		"DCACHE	[%1+#-64], D0Ar6\n"				\
-		"BR	$Lloop"id"\n"					\
+			".balign 8\n"					\
+		"	MOV	RAPF, %1\n"				\
+		"	MSETL	[A0StP++], D0Ar6, D0FrT, D0.5, D0.6, D0.7\n" \
+		"	MOV	D0Ar6, #0\n"				\
+		"	LSR	D1Ar5, %3, #6\n"			\
+		"	SUB	TXRPT, D1Ar5, #2\n"			\
+		"	MOV	RAPF, %1\n"				\
+		"$Lloop"id":\n"						\
+		"	ADD	RAPF, %1, #64\n"			\
+		"21:	MGETD	D0FrT, D0.5, D0.6, D0.7, [%1++]\n"	\
+		"22:	MSETD	[%0++], D0FrT, D0.5, D0.6, D0.7\n"	\
+		"23:	SUB	%3, %3, #16\n"				\
+		"24:	MGETD	D0FrT, D0.5, D0.6, D0.7, [%1++]\n"	\
+		"25:	MSETD	[%0++], D0FrT, D0.5, D0.6, D0.7\n"	\
+		"26:	SUB	%3, %3, #16\n"				\
+		"27:	MGETD	D0FrT, D0.5, D0.6, D0.7, [%1++]\n"	\
+		"28:	MSETD	[%0++], D0FrT, D0.5, D0.6, D0.7\n"	\
+		"29:	SUB	%3, %3, #16\n"				\
+		"30:	MGETD	D0FrT, D0.5, D0.6, D0.7, [%1++]\n"	\
+		"31:	MSETD	[%0++], D0FrT, D0.5, D0.6, D0.7\n"	\
+		"32:	SUB	%3, %3, #16\n"				\
+		"	DCACHE	[%1+#-64], D0Ar6\n"			\
+		"	BR	$Lloop"id"\n"				\
 									\
-		"MOV	RAPF, %1\n"					\
-		"33:\n"							\
-		"MGETD	D0FrT, D0.5, D0.6, D0.7, [%1++]\n"		\
-		"34:\n"							\
-		"MSETD	[%0++], D0FrT, D0.5, D0.6, D0.7\n"		\
-		"35:\n"							\
-		"SUB	%3, %3, #16\n"					\
-		"36:\n"							\
-		"MGETD	D0FrT, D0.5, D0.6, D0.7, [%1++]\n"		\
-		"37:\n"							\
-		"MSETD	[%0++], D0FrT, D0.5, D0.6, D0.7\n"		\
-		"38:\n"							\
-		"SUB	%3, %3, #16\n"					\
-		"39:\n"							\
-		"MGETD	D0FrT, D0.5, D0.6, D0.7, [%1++]\n"		\
-		"40:\n"							\
-		"MSETD	[%0++], D0FrT, D0.5, D0.6, D0.7\n"		\
-		"41:\n"							\
-		"SUB	%3, %3, #16\n"					\
-		"42:\n"							\
-		"MGETD	D0FrT, D0.5, D0.6, D0.7, [%1++]\n"		\
-		"43:\n"							\
-		"MSETD	[%0++], D0FrT, D0.5, D0.6, D0.7\n"		\
-		"44:\n"							\
-		"SUB	%0, %0, #4\n"					\
-		"45:\n"							\
-		"SETD	[%0++], D0.7\n"					\
-		"SUB	%3, %3, #16\n"					\
-		"1:"							\
-		"DCACHE	[%1+#-64], D0Ar6\n"				\
-		"GETL    D0Ar6, D1Ar5, [A0StP+#-40]\n"			\
-		"GETL    D0FrT, D1RtP, [A0StP+#-32]\n"			\
-		"GETL    D0.5, D1.5, [A0StP+#-24]\n"			\
-		"GETL    D0.6, D1.6, [A0StP+#-16]\n"			\
-		"GETL    D0.7, D1.7, [A0StP+#-8]\n"			\
-		"SUB A0StP, A0StP, #40\n"				\
+		"	MOV	RAPF, %1\n"				\
+		"33:	MGETD	D0FrT, D0.5, D0.6, D0.7, [%1++]\n"	\
+		"34:	MSETD	[%0++], D0FrT, D0.5, D0.6, D0.7\n"	\
+		"35:	SUB	%3, %3, #16\n"				\
+		"36:	MGETD	D0FrT, D0.5, D0.6, D0.7, [%1++]\n"	\
+		"37:	MSETD	[%0++], D0FrT, D0.5, D0.6, D0.7\n"	\
+		"38:	SUB	%3, %3, #16\n"				\
+		"39:	MGETD	D0FrT, D0.5, D0.6, D0.7, [%1++]\n"	\
+		"40:	MSETD	[%0++], D0FrT, D0.5, D0.6, D0.7\n"	\
+		"41:	SUB	%3, %3, #16\n"				\
+		"42:	MGETD	D0FrT, D0.5, D0.6, D0.7, [%1++]\n"	\
+		"43:	MSETD	[%0++], D0FrT, D0.5, D0.6, D0.7\n"	\
+		"44:	SUB	%0, %0, #4\n"				\
+		"45:	SETD	[%0++], D0.7\n"				\
+		"	SUB	%3, %3, #16\n"				\
+		"1:	DCACHE	[%1+#-64], D0Ar6\n"			\
+		"	GETL	D0Ar6, D1Ar5, [A0StP+#-40]\n"		\
+		"	GETL	D0FrT, D1RtP, [A0StP+#-32]\n"		\
+		"	GETL	D0.5, D1.5, [A0StP+#-24]\n"		\
+		"	GETL	D0.6, D1.6, [A0StP+#-16]\n"		\
+		"	GETL	D0.7, D1.7, [A0StP+#-8]\n"		\
+		"	SUB A0StP, A0StP, #40\n"			\
 		"	.section .fixup,\"ax\"\n"			\
-		"4:\n"							\
-		"	ADD		%0, %0, #4\n"			\
-		"3:\n"							\
-		"	MOV	D0Ar2, TXSTATUS\n"			\
+		"4:	ADD	%0, %0, #4\n"				\
+		"3:	MOV	D0Ar2, TXSTATUS\n"			\
 		"	MOV	D1Ar1, TXSTATUS\n"			\
 		"	AND	D1Ar1, D1Ar1, #0xFFFFF8FF\n"		\
 		"	MOV	TXSTATUS, D1Ar1\n"			\
 			FIXUP						\
-		"	MOVT    D0Ar2,#HI(1b)\n"			\
-		"	JUMP    D0Ar2,#LO(1b)\n"			\
+		"	MOVT	D0Ar2, #HI(1b)\n"			\
+		"	JUMP	D0Ar2, #LO(1b)\n"			\
 		"	.previous\n"					\
 		"	.section __ex_table,\"a\"\n"			\
 		"	.long 21b,3b\n"					\

From fc1b759ae4e0f636c56ca8410207a8a36630a96e Mon Sep 17 00:00:00 2001
From: James Hogan <james.hogan@imgtec.com>
Date: Tue, 4 Apr 2017 11:42:35 +0100
Subject: [PATCH 2/6] metag/usercopy: Simplify rapf loop fixup corner case

The final fixup in the rapf loops must handle a corner case due to the
intermediate decrementing of the destination pointer before writing the
last element to it again and re-incrementing it. This decrement (and the
associated increment in the fixup code) can be easily avoided by using
SETL/SETD with an offset of -8/-4.

Signed-off-by: James Hogan <james.hogan@imgtec.com>
Cc: linux-metag@vger.kernel.org
---
 arch/metag/lib/usercopy.c | 10 ++--------
 1 file changed, 2 insertions(+), 8 deletions(-)

diff --git a/arch/metag/lib/usercopy.c b/arch/metag/lib/usercopy.c
index 7abed2f45c83..ceb4590fbca5 100644
--- a/arch/metag/lib/usercopy.c
+++ b/arch/metag/lib/usercopy.c
@@ -270,8 +270,7 @@
 		"29:	SUB	%3, %3, #32\n"				\
 		"30:	MGETL	D0FrT, D0.5, D0.6, D0.7, [%1++]\n"	\
 		"31:	MSETL	[%0++], D0FrT, D0.5, D0.6, D0.7\n"	\
-		"32:	SUB	%0, %0, #8\n"				\
-		"33:	SETL	[%0++], D0.7, D1.7\n"			\
+		"32:	SETL	[%0+#-8], D0.7, D1.7\n"			\
 		"	SUB	%3, %3, #32\n"				\
 		"1:	DCACHE	[%1+#-64], D0Ar6\n"			\
 		"	GETL	D0Ar6, D1Ar5, [A0StP+#-40]\n"		\
@@ -281,7 +280,6 @@
 		"	GETL	D0.7, D1.7, [A0StP+#-8]\n"		\
 		"	SUB	A0StP, A0StP, #40\n"			\
 		"	.section .fixup,\"ax\"\n"			\
-		"4:	ADD	%0, %0, #8\n"				\
 		"3:	MOV	D0Ar2, TXSTATUS\n"			\
 		"	MOV	D1Ar1, TXSTATUS\n"			\
 		"	AND	D1Ar1, D1Ar1, #0xFFFFF8FF\n"		\
@@ -303,7 +301,6 @@
 		"	.long 30b,3b\n"					\
 		"	.long 31b,3b\n"					\
 		"	.long 32b,3b\n"					\
-		"	.long 33b,4b\n"					\
 		"	.previous\n"					\
 		: "=r" (to), "=r" (from), "=r" (ret), "=d" (n)		\
 		: "0" (to), "1" (from), "2" (ret), "3" (n)		\
@@ -417,8 +414,7 @@
 		"41:	SUB	%3, %3, #16\n"				\
 		"42:	MGETD	D0FrT, D0.5, D0.6, D0.7, [%1++]\n"	\
 		"43:	MSETD	[%0++], D0FrT, D0.5, D0.6, D0.7\n"	\
-		"44:	SUB	%0, %0, #4\n"				\
-		"45:	SETD	[%0++], D0.7\n"				\
+		"44:	SETD	[%0+#-4], D0.7\n"			\
 		"	SUB	%3, %3, #16\n"				\
 		"1:	DCACHE	[%1+#-64], D0Ar6\n"			\
 		"	GETL	D0Ar6, D1Ar5, [A0StP+#-40]\n"		\
@@ -428,7 +424,6 @@
 		"	GETL	D0.7, D1.7, [A0StP+#-8]\n"		\
 		"	SUB A0StP, A0StP, #40\n"			\
 		"	.section .fixup,\"ax\"\n"			\
-		"4:	ADD	%0, %0, #4\n"				\
 		"3:	MOV	D0Ar2, TXSTATUS\n"			\
 		"	MOV	D1Ar1, TXSTATUS\n"			\
 		"	AND	D1Ar1, D1Ar1, #0xFFFFF8FF\n"		\
@@ -462,7 +457,6 @@
 		"	.long 42b,3b\n"					\
 		"	.long 43b,3b\n"					\
 		"	.long 44b,3b\n"					\
-		"	.long 45b,4b\n"					\
 		"	.previous\n"					\
 		: "=r" (to), "=r" (from), "=r" (ret), "=d" (n)		\
 		: "0" (to), "1" (from), "2" (ret), "3" (n)		\

From d3ba2e922d4d1d61806fcb6e09512d2bee734d06 Mon Sep 17 00:00:00 2001
From: James Hogan <james.hogan@imgtec.com>
Date: Fri, 31 Mar 2017 15:40:52 +0100
Subject: [PATCH 3/6] metag/usercopy: Add 64-bit get_user support

Metag already supports 64-bit put_user, so add support for 64-bit
get_user too so that the test_user_copy module can test both.

Signed-off-by: James Hogan <james.hogan@imgtec.com>
Cc: linux-metag@vger.kernel.org
---
 arch/metag/include/asm/uaccess.h |  9 +++++++--
 arch/metag/lib/usercopy.c        | 24 ++++++++++++++++++++++++
 2 files changed, 31 insertions(+), 2 deletions(-)

diff --git a/arch/metag/include/asm/uaccess.h b/arch/metag/include/asm/uaccess.h
index 07238b39638c..469a2f1393d3 100644
--- a/arch/metag/include/asm/uaccess.h
+++ b/arch/metag/include/asm/uaccess.h
@@ -138,7 +138,8 @@ extern long __get_user_bad(void);
 
 #define __get_user_nocheck(x, ptr, size)			\
 ({                                                              \
-	long __gu_err, __gu_val;                                \
+	long __gu_err;						\
+	long long __gu_val;					\
 	__get_user_size(__gu_val, (ptr), (size), __gu_err);	\
 	(x) = (__force __typeof__(*(ptr)))__gu_val;             \
 	__gu_err;                                               \
@@ -146,7 +147,8 @@ extern long __get_user_bad(void);
 
 #define __get_user_check(x, ptr, size)					\
 ({                                                                      \
-	long __gu_err = -EFAULT, __gu_val = 0;                          \
+	long __gu_err = -EFAULT;					\
+	long long __gu_val = 0;						\
 	const __typeof__(*(ptr)) __user *__gu_addr = (ptr);		\
 	if (access_ok(VERIFY_READ, __gu_addr, size))			\
 		__get_user_size(__gu_val, __gu_addr, (size), __gu_err);	\
@@ -157,6 +159,7 @@ extern long __get_user_bad(void);
 extern unsigned char __get_user_asm_b(const void __user *addr, long *err);
 extern unsigned short __get_user_asm_w(const void __user *addr, long *err);
 extern unsigned int __get_user_asm_d(const void __user *addr, long *err);
+extern unsigned long long __get_user_asm_l(const void __user *addr, long *err);
 
 #define __get_user_size(x, ptr, size, retval)			\
 do {                                                            \
@@ -168,6 +171,8 @@ do {                                                            \
 		x = __get_user_asm_w(ptr, &retval); break;	\
 	case 4:							\
 		x = __get_user_asm_d(ptr, &retval); break;	\
+	case 8:							\
+		x = __get_user_asm_l(ptr, &retval); break;	\
 	default:						\
 		(x) = __get_user_bad();				\
 	}                                                       \
diff --git a/arch/metag/lib/usercopy.c b/arch/metag/lib/usercopy.c
index ceb4590fbca5..45e7b79eca19 100644
--- a/arch/metag/lib/usercopy.c
+++ b/arch/metag/lib/usercopy.c
@@ -1044,6 +1044,30 @@ unsigned int __get_user_asm_d(const void __user *addr, long *err)
 }
 EXPORT_SYMBOL(__get_user_asm_d);
 
+unsigned long long __get_user_asm_l(const void __user *addr, long *err)
+{
+	register unsigned long long x asm ("D0Re0") = 0;
+	asm volatile (
+		"	GETL %0,%t0,[%2]\n"
+		"1:\n"
+		"	GETL %0,%t0,[%2]\n"
+		"2:\n"
+		"	.section .fixup,\"ax\"\n"
+		"3:	MOV     D0FrT,%3\n"
+		"	SETD    [%1],D0FrT\n"
+		"	MOVT    D0FrT,#HI(2b)\n"
+		"	JUMP    D0FrT,#LO(2b)\n"
+		"	.previous\n"
+		"	.section __ex_table,\"a\"\n"
+		"	.long 1b,3b\n"
+		"	.previous\n"
+		: "=r" (x)
+		: "r" (err), "r" (addr), "P" (-EFAULT)
+		: "D0FrT");
+	return x;
+}
+EXPORT_SYMBOL(__get_user_asm_l);
+
 long __put_user_asm_b(unsigned int x, void __user *addr)
 {
 	register unsigned int err asm ("D0Re0") = 0;

From 8a8b56638bcac4e64cccc88bf95a0f9f4b19a2fb Mon Sep 17 00:00:00 2001
From: James Hogan <james.hogan@imgtec.com>
Date: Fri, 28 Apr 2017 10:50:26 +0100
Subject: [PATCH 4/6] metag/uaccess: Fix access_ok()

The __user_bad() macro used by access_ok() has a few corner cases
noticed by Al Viro where it doesn't behave correctly:

 - The kernel range check has off by 1 errors which permit access to the
   first and last byte of the kernel mapped range.

 - The kernel range check ends at LINCORE_BASE rather than
   META_MEMORY_LIMIT, which is ineffective when the kernel is in global
   space (an extremely uncommon configuration).

There are a couple of other shortcomings here too:

 - Access to the whole of the other address space is permitted (i.e. the
   global half of the address space when the kernel is in local space).
   This isn't ideal as it could theoretically still contain privileged
   mappings set up by the bootloader.

 - The size argument is unused, permitting user copies which start on
   valid pages at the end of the user address range and cross the
   boundary into the kernel address space (e.g. addr = 0x3ffffff0, size
   > 0x10).

It isn't very convenient to add size checks when disallowing certain
regions, and it seems far safer to be sure and explicit about what
userland is able to access, so invert the logic to allow certain regions
instead, and fix the off by 1 errors and missing size checks. This also
allows the get_fs() == KERNEL_DS check to be more easily optimised into
the user address range case.

We now have 3 such allowed regions:

 - The user address range (incorporating the get_fs() == KERNEL_DS
   check).

 - NULL (some kernel code expects this to work, and we'll always catch
   the fault anyway).

 - The core code memory region.

Fixes: 373cd784d0fc ("metag: Memory handling")
Reported-by: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: James Hogan <james.hogan@imgtec.com>
Cc: linux-metag@vger.kernel.org
Cc: stable@vger.kernel.org
---
 arch/metag/include/asm/uaccess.h | 40 +++++++++++++++++++-------------
 1 file changed, 24 insertions(+), 16 deletions(-)

diff --git a/arch/metag/include/asm/uaccess.h b/arch/metag/include/asm/uaccess.h
index 469a2f1393d3..1e5f26d2dce8 100644
--- a/arch/metag/include/asm/uaccess.h
+++ b/arch/metag/include/asm/uaccess.h
@@ -28,24 +28,32 @@
 
 #define segment_eq(a, b)	((a).seg == (b).seg)
 
-#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
-/*
- * Explicitly allow NULL pointers here. Parts of the kernel such
- * as readv/writev use access_ok to validate pointers, but want
- * to allow NULL pointers for various reasons. NULL pointers are
- * safe to allow through because the first page is not mappable on
- * Meta.
- *
- * We also wish to avoid letting user code access the system area
- * and the kernel half of the address space.
- */
-#define __user_bad(addr, size) (((addr) > 0 && (addr) < META_MEMORY_BASE) || \
-				((addr) > PAGE_OFFSET &&		\
-				 (addr) < LINCORE_BASE))
-
 static inline int __access_ok(unsigned long addr, unsigned long size)
 {
-	return __kernel_ok || !__user_bad(addr, size);
+	/*
+	 * Allow access to the user mapped memory area, but not the system area
+	 * before it. The check extends to the top of the address space when
+	 * kernel access is allowed (there's no real reason to user copy to the
+	 * system area in any case).
+	 */
+	if (likely(addr >= META_MEMORY_BASE && addr < get_fs().seg &&
+		   size <= get_fs().seg - addr))
+		return true;
+	/*
+	 * Explicitly allow NULL pointers here. Parts of the kernel such
+	 * as readv/writev use access_ok to validate pointers, but want
+	 * to allow NULL pointers for various reasons. NULL pointers are
+	 * safe to allow through because the first page is not mappable on
+	 * Meta.
+	 */
+	if (!addr)
+		return true;
+	/* Allow access to core code memory area... */
+	if (addr >= LINCORE_CODE_BASE && addr <= LINCORE_CODE_LIMIT &&
+	    size <= LINCORE_CODE_LIMIT + 1 - addr)
+		return true;
+	/* ... but no other areas. */
+	return false;
 }
 
 #define access_ok(type, addr, size) __access_ok((unsigned long)(addr),	\

From 3a158a62da0673db918b53ac1440845a5b64fd90 Mon Sep 17 00:00:00 2001
From: James Hogan <james.hogan@imgtec.com>
Date: Tue, 2 May 2017 19:41:06 +0100
Subject: [PATCH 5/6] metag/uaccess: Check access_ok in strncpy_from_user

The metag implementation of strncpy_from_user() doesn't validate the src
pointer, which could allow reading of arbitrary kernel memory. Add a
short access_ok() check to prevent that.

Its still possible for it to read across the user/kernel boundary, but
it will invariably reach a NUL character after only 9 bytes, leaking
only a static kernel address being loaded into D0Re0 at the beginning of
__start, which is acceptable for the immediate fix.

Reported-by: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: James Hogan <james.hogan@imgtec.com>
Cc: linux-metag@vger.kernel.org
Cc: stable@vger.kernel.org
---
 arch/metag/include/asm/uaccess.h | 9 +++++++--
 1 file changed, 7 insertions(+), 2 deletions(-)

diff --git a/arch/metag/include/asm/uaccess.h b/arch/metag/include/asm/uaccess.h
index 1e5f26d2dce8..500f1be6e0fe 100644
--- a/arch/metag/include/asm/uaccess.h
+++ b/arch/metag/include/asm/uaccess.h
@@ -199,8 +199,13 @@ do {                                                            \
 extern long __must_check __strncpy_from_user(char *dst, const char __user *src,
 					     long count);
 
-#define strncpy_from_user(dst, src, count) __strncpy_from_user(dst, src, count)
-
+static inline long
+strncpy_from_user(char *dst, const char __user *src, long count)
+{
+	if (!access_ok(VERIFY_READ, src, 1))
+		return -EFAULT;
+	return __strncpy_from_user(dst, src, count);
+}
 /*
  * Return the size of a string (including the ending 0)
  *

From e3cd7f013bac3105d44b8bd5a90359989d45b5a5 Mon Sep 17 00:00:00 2001
From: James Hogan <james.hogan@imgtec.com>
Date: Wed, 3 May 2017 09:54:20 +0100
Subject: [PATCH 6/6] metag/mm: Drop pointless increment

The increment of entry in the loop in mmu_init for meta1 is redundant as
it isn't used again, so drop it.

Reported-by: David Binderman <dcb314@hotmail.com>
Signed-off-by: James Hogan <james.hogan@imgtec.com>
---
 arch/metag/mm/mmu-meta1.c | 1 -
 1 file changed, 1 deletion(-)

diff --git a/arch/metag/mm/mmu-meta1.c b/arch/metag/mm/mmu-meta1.c
index 91f4255bcb5c..62ebab90924d 100644
--- a/arch/metag/mm/mmu-meta1.c
+++ b/arch/metag/mm/mmu-meta1.c
@@ -152,6 +152,5 @@ void __init mmu_init(unsigned long mem_end)
 
 		p_swapper_pg_dir++;
 		addr += PGDIR_SIZE;
-		entry++;
 	}
 }