mirror of
https://github.com/torvalds/linux.git
synced 2024-12-12 22:23:55 +00:00
x86: fix SMAP in 32-bit environments
In commit 11f1a4b975
("x86: reorganize SMAP handling in user space
accesses") I changed how the stac/clac instructions were generated
around the user space accesses, which then made it possible to do
batched accesses efficiently for user string copies etc.
However, in doing so, I completely spaced out, and didn't even think
about the 32-bit case. And nobody really even seemed to notice, because
SMAP doesn't even exist until modern Skylake processors, and you'd have
to be crazy to run 32-bit kernels on a modern CPU.
Which brings us to Andy Lutomirski.
He actually tested the 32-bit kernel on new hardware, and noticed that
it doesn't work. My bad. The trivial fix is to add the required
uaccess begin/end markers around the raw accesses in <asm/uaccess_32.h>.
I feel a bit bad about this patch, just because that header file really
should be cleaned up to avoid all the duplicated code in it, and this
commit just expands on the problem. But this just fixes the bug without
any bigger cleanup surgery.
Reported-and-tested-by: Andy Lutomirski <luto@kernel.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
4de8ebeff8
commit
de9e478b9d
@ -48,20 +48,28 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
|
||||
|
||||
switch (n) {
|
||||
case 1:
|
||||
__uaccess_begin();
|
||||
__put_user_size(*(u8 *)from, (u8 __user *)to,
|
||||
1, ret, 1);
|
||||
__uaccess_end();
|
||||
return ret;
|
||||
case 2:
|
||||
__uaccess_begin();
|
||||
__put_user_size(*(u16 *)from, (u16 __user *)to,
|
||||
2, ret, 2);
|
||||
__uaccess_end();
|
||||
return ret;
|
||||
case 4:
|
||||
__uaccess_begin();
|
||||
__put_user_size(*(u32 *)from, (u32 __user *)to,
|
||||
4, ret, 4);
|
||||
__uaccess_end();
|
||||
return ret;
|
||||
case 8:
|
||||
__uaccess_begin();
|
||||
__put_user_size(*(u64 *)from, (u64 __user *)to,
|
||||
8, ret, 8);
|
||||
__uaccess_end();
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
@ -103,13 +111,19 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
|
||||
|
||||
switch (n) {
|
||||
case 1:
|
||||
__uaccess_begin();
|
||||
__get_user_size(*(u8 *)to, from, 1, ret, 1);
|
||||
__uaccess_end();
|
||||
return ret;
|
||||
case 2:
|
||||
__uaccess_begin();
|
||||
__get_user_size(*(u16 *)to, from, 2, ret, 2);
|
||||
__uaccess_end();
|
||||
return ret;
|
||||
case 4:
|
||||
__uaccess_begin();
|
||||
__get_user_size(*(u32 *)to, from, 4, ret, 4);
|
||||
__uaccess_end();
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
@ -148,13 +162,19 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||
|
||||
switch (n) {
|
||||
case 1:
|
||||
__uaccess_begin();
|
||||
__get_user_size(*(u8 *)to, from, 1, ret, 1);
|
||||
__uaccess_end();
|
||||
return ret;
|
||||
case 2:
|
||||
__uaccess_begin();
|
||||
__get_user_size(*(u16 *)to, from, 2, ret, 2);
|
||||
__uaccess_end();
|
||||
return ret;
|
||||
case 4:
|
||||
__uaccess_begin();
|
||||
__get_user_size(*(u32 *)to, from, 4, ret, 4);
|
||||
__uaccess_end();
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
@ -170,13 +190,19 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
|
||||
|
||||
switch (n) {
|
||||
case 1:
|
||||
__uaccess_begin();
|
||||
__get_user_size(*(u8 *)to, from, 1, ret, 1);
|
||||
__uaccess_end();
|
||||
return ret;
|
||||
case 2:
|
||||
__uaccess_begin();
|
||||
__get_user_size(*(u16 *)to, from, 2, ret, 2);
|
||||
__uaccess_end();
|
||||
return ret;
|
||||
case 4:
|
||||
__uaccess_begin();
|
||||
__get_user_size(*(u32 *)to, from, 4, ret, 4);
|
||||
__uaccess_end();
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user