Merge branch 'work.uaccess' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
Pull uaccess unification updates from Al Viro:
"This is the uaccess unification pile. It's _not_ the end of uaccess
work, but the next batch of that will go into the next cycle. This one
mostly takes copy_from_user() and friends out of arch/* and gets the
zero-padding behaviour in sync for all architectures.
Dealing with the nocache/writethrough mess is for the next cycle;
fortunately, that's x86-only. Same for cleanups in iov_iter.c (I am
sold on access_ok() in there, BTW; just not in this pile), same for
reducing __copy_... callsites, strn*... stuff, etc. - there will be a
pile about as large as this one in the next merge window.
This one sat in -next for weeks. -3KLoC"
* 'work.uaccess' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: (96 commits)
HAVE_ARCH_HARDENED_USERCOPY is unconditional now
CONFIG_ARCH_HAS_RAW_COPY_USER is unconditional now
m32r: switch to RAW_COPY_USER
hexagon: switch to RAW_COPY_USER
microblaze: switch to RAW_COPY_USER
get rid of padding, switch to RAW_COPY_USER
ia64: get rid of copy_in_user()
ia64: sanitize __access_ok()
ia64: get rid of 'segment' argument of __do_{get,put}_user()
ia64: get rid of 'segment' argument of __{get,put}_user_check()
ia64: add extable.h
powerpc: get rid of zeroing, switch to RAW_COPY_USER
esas2r: don't open-code memdup_user()
alpha: fix stack smashing in old_adjtimex(2)
don't open-code kernel_setsockopt()
mips: switch to RAW_COPY_USER
mips: get rid of tail-zeroing in primitives
mips: make copy_from_user() zero tail explicitly
mips: clean and reorder the forest of macros...
mips: consolidate __invoke_... wrappers
...
This commit is contained in:
@@ -12,13 +12,9 @@
|
||||
/*
|
||||
* User space memory access functions
|
||||
*/
|
||||
#include <linux/sched.h>
|
||||
#include <linux/errno.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/ctl_reg.h>
|
||||
|
||||
#define VERIFY_READ 0
|
||||
#define VERIFY_WRITE 1
|
||||
#include <asm/extable.h>
|
||||
|
||||
|
||||
/*
|
||||
@@ -42,7 +38,7 @@
|
||||
static inline void set_fs(mm_segment_t fs)
|
||||
{
|
||||
current->thread.mm_segment = fs;
|
||||
if (segment_eq(fs, KERNEL_DS)) {
|
||||
if (uaccess_kernel()) {
|
||||
set_cpu_flag(CIF_ASCE_SECONDARY);
|
||||
__ctl_load(S390_lowcore.kernel_asce, 7, 7);
|
||||
} else {
|
||||
@@ -64,72 +60,14 @@ static inline int __range_ok(unsigned long addr, unsigned long size)
|
||||
|
||||
#define access_ok(type, addr, size) __access_ok(addr, size)
|
||||
|
||||
/*
|
||||
* The exception table consists of pairs of addresses: the first is the
|
||||
* address of an instruction that is allowed to fault, and the second is
|
||||
* the address at which the program should continue. No registers are
|
||||
* modified, so it is entirely up to the continuation code to figure out
|
||||
* what to do.
|
||||
*
|
||||
* All the routines below use bits of fixup code that are out of line
|
||||
* with the main instruction path. This means when everything is well,
|
||||
* we don't even have to jump over them. Further, they do not intrude
|
||||
* on our cache or tlb entries.
|
||||
*/
|
||||
unsigned long __must_check
|
||||
raw_copy_from_user(void *to, const void __user *from, unsigned long n);
|
||||
|
||||
struct exception_table_entry
|
||||
{
|
||||
int insn, fixup;
|
||||
};
|
||||
unsigned long __must_check
|
||||
raw_copy_to_user(void __user *to, const void *from, unsigned long n);
|
||||
|
||||
static inline unsigned long extable_fixup(const struct exception_table_entry *x)
|
||||
{
|
||||
return (unsigned long)&x->fixup + x->fixup;
|
||||
}
|
||||
|
||||
#define ARCH_HAS_RELATIVE_EXTABLE
|
||||
|
||||
/**
|
||||
* __copy_from_user: - Copy a block of data from user space, with less checking.
|
||||
* @to: Destination address, in kernel space.
|
||||
* @from: Source address, in user space.
|
||||
* @n: Number of bytes to copy.
|
||||
*
|
||||
* Context: User context only. This function may sleep if pagefaults are
|
||||
* enabled.
|
||||
*
|
||||
* Copy data from user space to kernel space. Caller must check
|
||||
* the specified block with access_ok() before calling this function.
|
||||
*
|
||||
* Returns number of bytes that could not be copied.
|
||||
* On success, this will be zero.
|
||||
*
|
||||
* If some data could not be copied, this function will pad the copied
|
||||
* data to the requested size using zero bytes.
|
||||
*/
|
||||
unsigned long __must_check __copy_from_user(void *to, const void __user *from,
|
||||
unsigned long n);
|
||||
|
||||
/**
|
||||
* __copy_to_user: - Copy a block of data into user space, with less checking.
|
||||
* @to: Destination address, in user space.
|
||||
* @from: Source address, in kernel space.
|
||||
* @n: Number of bytes to copy.
|
||||
*
|
||||
* Context: User context only. This function may sleep if pagefaults are
|
||||
* enabled.
|
||||
*
|
||||
* Copy data from kernel space to user space. Caller must check
|
||||
* the specified block with access_ok() before calling this function.
|
||||
*
|
||||
* Returns number of bytes that could not be copied.
|
||||
* On success, this will be zero.
|
||||
*/
|
||||
unsigned long __must_check __copy_to_user(void __user *to, const void *from,
|
||||
unsigned long n);
|
||||
|
||||
#define __copy_to_user_inatomic __copy_to_user
|
||||
#define __copy_from_user_inatomic __copy_from_user
|
||||
#define INLINE_COPY_FROM_USER
|
||||
#define INLINE_COPY_TO_USER
|
||||
|
||||
#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
|
||||
|
||||
@@ -218,13 +156,13 @@ static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long s
|
||||
|
||||
static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
|
||||
{
|
||||
size = __copy_to_user(ptr, x, size);
|
||||
size = raw_copy_to_user(ptr, x, size);
|
||||
return size ? -EFAULT : 0;
|
||||
}
|
||||
|
||||
static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
|
||||
{
|
||||
size = __copy_from_user(x, ptr, size);
|
||||
size = raw_copy_from_user(x, ptr, size);
|
||||
return size ? -EFAULT : 0;
|
||||
}
|
||||
|
||||
@@ -314,77 +252,8 @@ int __get_user_bad(void) __attribute__((noreturn));
|
||||
#define __put_user_unaligned __put_user
|
||||
#define __get_user_unaligned __get_user
|
||||
|
||||
extern void __compiletime_error("usercopy buffer size is too small")
|
||||
__bad_copy_user(void);
|
||||
|
||||
static inline void copy_user_overflow(int size, unsigned long count)
|
||||
{
|
||||
WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
|
||||
}
|
||||
|
||||
/**
|
||||
* copy_to_user: - Copy a block of data into user space.
|
||||
* @to: Destination address, in user space.
|
||||
* @from: Source address, in kernel space.
|
||||
* @n: Number of bytes to copy.
|
||||
*
|
||||
* Context: User context only. This function may sleep if pagefaults are
|
||||
* enabled.
|
||||
*
|
||||
* Copy data from kernel space to user space.
|
||||
*
|
||||
* Returns number of bytes that could not be copied.
|
||||
* On success, this will be zero.
|
||||
*/
|
||||
static inline unsigned long __must_check
|
||||
copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||
{
|
||||
might_fault();
|
||||
return __copy_to_user(to, from, n);
|
||||
}
|
||||
|
||||
/**
|
||||
* copy_from_user: - Copy a block of data from user space.
|
||||
* @to: Destination address, in kernel space.
|
||||
* @from: Source address, in user space.
|
||||
* @n: Number of bytes to copy.
|
||||
*
|
||||
* Context: User context only. This function may sleep if pagefaults are
|
||||
* enabled.
|
||||
*
|
||||
* Copy data from user space to kernel space.
|
||||
*
|
||||
* Returns number of bytes that could not be copied.
|
||||
* On success, this will be zero.
|
||||
*
|
||||
* If some data could not be copied, this function will pad the copied
|
||||
* data to the requested size using zero bytes.
|
||||
*/
|
||||
static inline unsigned long __must_check
|
||||
copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||
{
|
||||
unsigned int sz = __compiletime_object_size(to);
|
||||
|
||||
might_fault();
|
||||
if (unlikely(sz != -1 && sz < n)) {
|
||||
if (!__builtin_constant_p(n))
|
||||
copy_user_overflow(sz, n);
|
||||
else
|
||||
__bad_copy_user();
|
||||
return n;
|
||||
}
|
||||
return __copy_from_user(to, from, n);
|
||||
}
|
||||
|
||||
unsigned long __must_check
|
||||
__copy_in_user(void __user *to, const void __user *from, unsigned long n);
|
||||
|
||||
static inline unsigned long __must_check
|
||||
copy_in_user(void __user *to, const void __user *from, unsigned long n)
|
||||
{
|
||||
might_fault();
|
||||
return __copy_in_user(to, from, n);
|
||||
}
|
||||
raw_copy_in_user(void __user *to, const void __user *from, unsigned long n);
|
||||
|
||||
/*
|
||||
* Copy a null terminated string from userspace.
|
||||
|
||||
Reference in New Issue
Block a user