#ifndef _ASM_X86_UACCESS_H #define _ASM_X86_UACCESS_H /* * User space memory access functions */ #include #include #include #include #include #include #include #define VERIFY_READ 0 #define VERIFY_WRITE 1 /* * The fs value determines whether argument validity checking should be * performed or not. If get_fs() == USER_DS, checking is performed, with * get_fs() == KERNEL_DS, checking is bypassed. * * For historical reasons, these macros are grossly misnamed. */ #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) #define KERNEL_DS MAKE_MM_SEG(-1UL) #define USER_DS MAKE_MM_SEG(PAGE_OFFSET) #define get_ds() (KERNEL_DS) #define get_fs() (current_thread_info()->addr_limit) #define set_fs(x) (current_thread_info()->addr_limit = (x)) #define segment_eq(a, b) ((a).seg == (b).seg) #define __addr_ok(addr) \ ((unsigned long __force)(addr) < \ (current_thread_info()->addr_limit.seg)) /* * Test whether a block of memory is a valid user space address. * Returns 0 if the range is valid, nonzero otherwise. * * This is equivalent to the following test: * (u33)addr + (u33)size >= (u33)current->addr_limit.seg (u65 for x86_64) * * This needs 33-bit (65-bit for x86_64) arithmetic. We have a carry... */ #define __range_not_ok(addr, size) \ ({ \ unsigned long flag, roksum; \ __chk_user_ptr(addr); \ asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" \ : "=&r" (flag), "=r" (roksum) \ : "1" (addr), "g" ((long)(size)), \ "rm" (current_thread_info()->addr_limit.seg)); \ flag; \ }) /** * access_ok: - Checks if a user space pointer is valid * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe * to write to a block, it is always safe to read from it. * @addr: User space pointer to start of block to check * @size: Size of block to check * * Context: User context only. This function may sleep. * * Checks if a pointer to a block of memory in user space is valid. * * Returns true (nonzero) if the memory block may be valid, false (zero) * if it is definitely invalid. * * Note that, depending on architecture, this function probably just * checks that the pointer is in the user space range - after calling * this function, memory access functions may still return -EFAULT. */ #define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0)) /* * The exception table consists of pairs of addresses: the first is the * address of an instruction that is allowed to fault, and the second is * the address at which the program should continue. No registers are * modified, so it is entirely up to the continuation code to figure out * what to do. * * All the routines below use bits of fixup code that are out of line * with the main instruction path. This means when everything is well, * we don't even have to jump over them. Further, they do not intrude * on our cache or tlb entries. */ struct exception_table_entry { unsigned long insn, fixup; }; extern int fixup_exception(struct pt_regs *regs); /* * These are the main single-value transfer routines. They automatically * use the right size if we just have the right pointer type. * * This gets kind of ugly. We want to return _two_ values in "get_user()" * and yet we don't want to do any pointers, because that is too much * of a performance impact. Thus we have a few rather ugly macros here, * and hide all the ugliness from the user. * * The "__xxx" versions of the user access functions are versions that * do not verify the address space, that must have been done previously * with a separate "access_ok()" call (this is used when we do multiple * accesses to the same area of user memory). */ extern int __get_user_1(void); extern int __get_user_2(void); extern int __get_user_4(void); extern int __get_user_8(void); extern int __get_user_bad(void); #define __get_user_x(size, ret, x, ptr) \ asm volatile("call __get_user_" #size \ : "=a" (ret), "=d" (x) \ : "0" (ptr)) \ /* Careful: we have to cast the result to the type of the pointer * for sign reasons */ /** * get_user: - Get a simple variable from user space. * @x: Variable to store result. * @ptr: Source address, in user space. * * Context: User context only. This function may sleep. * * This macro copies a single simple variable from user space to kernel * space. It supports simple types like char and int, but not larger * data types like structures or arrays. * * @ptr must have pointer-to-simple-variable type, and the result of * dereferencing @ptr must be assignable to @x without a cast. * * Returns zero on success, or -EFAULT on error. * On error, the variable @x is set to zero. */ #ifdef CONFIG_X86_32 #define __get_user_8(__ret_gu, __val_gu, ptr) \ __get_user_x(X, __ret_gu, __val_gu, ptr) #else #define __get_user_8(__ret_gu, __val_gu, ptr) \ __get_user_x(8, __ret_gu, __val_gu, ptr) #endif #define get_user(x, ptr) \ ({ \ int __ret_gu; \ unsigned long __val_gu; \ __chk_user_ptr(ptr); \ might_fault(); \ switch (sizeof(*(ptr))) { \ case 1: \ __get_user_x(1, __ret_gu, __val_gu, ptr); \ break; \ case 2: \ __get_user_x(2, __ret_gu, __val_gu, ptr); \ break; \ case 4: \ __get_user_x(4, __ret_gu, __val_gu, ptr); \ break; \ case 8: \ __get_user_8(__ret_gu, __val_gu, ptr); \ break; \ default: \ __get_user_x(X, __ret_gu, __val_gu, ptr); \ break; \ } \ (x) = (__typeof__(*(ptr)))__val_gu; \ __ret_gu; \ }) #define __put_user_x(size, x, ptr, __ret_pu) \ asm volatile("call __put_user_" #size : "=a" (__ret_pu) \ : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") #ifdef CONFIG_X86_32 #define __put_user_u64(x, addr, err) \ asm volatile("1: movl %%eax,0(%2)\n" \ "2: movl %%edx,4(%2)\n" \ "3:\n" \ ".section .fixup,\"ax\"\n" \ "4: movl %3,%0\n" \ " jmp 3b\n" \ ".previous\n" \ _ASM_EXTABLE(1b, 4b) \ _ASM_EXTABLE(2b, 4b) \ : "=r" (err) \ : "A" (x), "r" (addr), "i" (-EFAULT), "0" (err)) #define __put_user_x8(x, ptr, __ret_pu) \ asm volatile("call __put_user_8" : "=a" (__ret_pu) \ : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") #else #define __put_user_u64(x, ptr, retval) \ __put_user_asm(x, ptr, retval, "q", "", "Zr", -EFAULT) #define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu) #endif extern void __put_user_bad(void); /* * Strange magic calling convention: pointer in %ecx, * value in %eax(:%edx), return value in %eax. clobbers %rbx */ extern void __put_user_1(void); extern void __put_user_2(void); extern void __put_user_4(void); extern void __put_user_8(void); #ifdef CONFIG_X86_WP_WORKS_OK /** * put_user: - Write a simple value into user space. * @x: Value to copy to user space. * @ptr: Destination address, in user space. * * Context: User context only. This function may sleep. * * This macro copies a single simple value from kernel space to user * space. It supports simple types like char and int, but not larger * data types like structures or arrays. * * @ptr must have pointer-to-simple-variable type, and @x must be assignable * to the result of dereferencing @ptr. * * Returns zero on success, or -EFAULT on error. */ #define put_user(x, ptr) \ ({ \ int __ret_pu; \ __typeof__(*(ptr)) __pu_val; \ __chk_user_ptr(ptr); \ might_fault(); \ __pu_val = x; \ switch (sizeof(*(ptr))) { \ case 1: \ __put_user_x(1, __pu_val, ptr, __ret_pu); \ break; \ case 2: \ __put_user_x(2, __pu_val, ptr, __ret_pu); \ break; \ case 4: \ __put_user_x(4, __pu_val, ptr, __ret_pu); \ break; \ case 8: \ __put_user_x8(__pu_val, ptr, __ret_pu); \ break; \ default: \ __put_user_x(X, __pu_val, ptr, __ret_pu); \ break; \ } \ __ret_pu; \ }) #define __put_user_size(x, ptr, size, retval, errret) \ do { \ retval = 0; \ __chk_user_ptr(ptr); \ switch (size) { \ case 1: \ __put_user_asm(x, ptr, retval, "b", "b", "iq", errret); \ break; \ case 2: \ __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \ break; \ case 4: \ __put_user_asm(x, ptr, retval, "l", "k", "ir", errret); \ break; \ case 8: \ __put_user_u64((__typeof__(*ptr))(x), ptr, retval); \ break; \ default: \ __put_user_bad(); \ } \ } while (0) #else #define __put_user_size(x, ptr, size, retval, errret) \ do { \ __typeof__(*(ptr))__pus_tmp = x; \ retval = 0; \ \ if (unlikely(__copy_to_user_ll(ptr, &__pus_tmp, size) != 0)) \ retval = errret; \ } while (0) #define put_user(x, ptr) \ ({ \ int __ret_pu; \ __typeof__(*(ptr))__pus_tmp = x; \ __ret_pu = 0; \ if (unlikely(__copy_to_user_ll(ptr, &__pus_tmp, \ sizeof(*(ptr))) != 0)) \ __ret_pu = -EFAULT; \ __ret_pu; \ }) #endif #ifdef CONFIG_X86_32 #define __get_user_asm_u64(x, ptr, retval, errret) (x) = __get_user_bad() #else #define __get_user_asm_u64(x, ptr, retval, errret) \ __get_user_asm(x, ptr, retval, "q", "", "=r", errret) #endif #define __get_user_size(x, ptr, size, retval, errret) \ do { \ retval = 0; \ __chk_user_ptr(ptr); \ switch (size) { \ case 1: \ __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \ break; \ case 2: \ __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \ break; \ case 4: \ __get_user_asm(x, ptr, retval, "l", "k", "=r", errret); \ break; \ case 8: \ __get_user_asm_u64(x, ptr, retval, errret); \ break; \ default: \ (x) = __get_user_bad(); \ } \ } while (0) #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \ asm volatile("1: mov"itype" %2,%"rtype"1\n" \ "2:\n" \ ".section .fixup,\"ax\"\n" \ "3: mov %3,%0\n" \ " xor"itype" %"rtype"1,%"rtype"1\n" \ " jmp 2b\n" \ ".previous\n" \ _ASM_EXTABLE(1b, 3b) \ : "=r" (err), ltype(x) \ : "m" (__m(addr)), "i" (errret), "0" (err)) #define __put_user_nocheck(x, ptr, size) \ ({ \ int __pu_err; \ __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \ __pu_err; \ }) #define __get_user_nocheck(x, ptr, size) \ ({ \ int __gu_err; \ unsigned long __gu_val; \ __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \ (x) = (__force __typeof__(*(ptr)))__gu_val; \ __gu_err; \ }) /* FIXME: this hack is definitely wrong -AK */ struct __large_struct { unsigned long buf[100]; }; #define __m(x) (*(struct __large_struct __user *)(x)) /* * Tell gcc we read from memory instead of writing: this is because * we do not write to any memory gcc knows about, so there are no * aliasing issues. */ #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \ asm volatile("1: mov"itype" %"rtype"1,%2\n" \ "2:\n" \ ".section .fixup,\"ax\"\n" \ "3: mov %3,%0\n" \ " jmp 2b\n" \ ".previous\n" \ _ASM_EXTABLE(1b, 3b) \ : "=r"(err) \ : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err)) /** * __get_user: - Get a simple variable from user space, with less checking. * @x: Variable to store result. * @ptr: Source address, in user space. * * Context: User context only. This function may sleep. * * This macro copies a single simple variable from user space to kernel * space. It supports simple types like char and int, but not larger * data types like structures or arrays. * * @ptr must have pointer-to-simple-variable type, and the result of * dereferencing @ptr must be assignable to @x without a cast. * * Caller must check the pointer with access_ok() before calling this * function. * * Returns zero on success, or -EFAULT on error. * On error, the variable @x is set to zero. */ #define __get_user(x, ptr) \ __get_user_nocheck((x), (ptr), sizeof(*(ptr))) /** * __put_user: - Write a simple value into user space, with less checking. * @x: Value to copy to user space. * @ptr: Destination address, in user space. * * Context: User context only. This function may sleep. * * This macro copies a single simple value from kernel space to user * space. It supports simple types like char and int, but not larger * data types like structures or arrays. * * @ptr must have pointer-to-simple-variable type, and @x must be assignable * to the result of dereferencing @ptr. * * Caller must check the pointer with access_ok() before calling this * function. * * Returns zero on success, or -EFAULT on error. */ #define __put_user(x, ptr) \ __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) #define __get_user_unaligned __get_user #define __put_user_unaligned __put_user /* * movsl can be slow when source and dest are not both 8-byte aligned */ #ifdef CONFIG_X86_INTEL_USERCOPY extern struct movsl_mask { int mask; } ____cacheline_aligned_in_smp movsl_mask; #endif #define ARCH_HAS_NOCACHE_UACCESS 1 #ifdef CONFIG_X86_32 # include "uaccess_32.h" #else # define ARCH_HAS_SEARCH_EXTABLE # include "uaccess_64.h" #endif #endif /* _ASM_X86_UACCESS_H */