forked from Minki/linux
87d5986345
Remove set_fs support from s390. With doing this rework address space handling and simplify it. As a result address spaces are now setup like this: CPU running in | %cr1 ASCE | %cr7 ASCE | %cr13 ASCE ----------------------------|-----------|-----------|----------- user space | user | user | kernel kernel, normal execution | kernel | user | kernel kernel, kvm guest execution | gmap | user | kernel To achieve this the getcpu vdso syscall is removed in order to avoid secondary address mode and a separate vdso address space in for user space. The getcpu vdso syscall will be implemented differently with a subsequent patch. The kernel accesses user space always via secondary address space. This happens in different ways: - with mvcos in home space mode and directly read/write to secondary address space - with mvcs/mvcp in primary space mode and copy from primary space to secondary space or vice versa - with e.g. cs in secondary space mode and access secondary space Switching translation modes happens with sacf before and after instructions which access user space, like before. Lazy handling of control register reloading is removed in the hope to make everything simpler, but at the cost of making kernel entry and exit a bit slower. That is: on kernel entry the primary asce is always changed to contain the kernel asce, and on kernel exit the primary asce is changed again so it contains the user asce. In kernel mode there is only one exception to the primary asce: when kvm guests are executed the primary asce contains the gmap asce (which describes the guest address space). The primary asce is reset to kernel asce whenever kvm guest execution is interrupted, so that this doesn't has to be taken into account for any user space accesses. Reviewed-by: Sven Schnelle <svens@linux.ibm.com> Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
80 lines
1.9 KiB
C
80 lines
1.9 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _ASM_S390_FUTEX_H
|
|
#define _ASM_S390_FUTEX_H
|
|
|
|
#include <linux/uaccess.h>
|
|
#include <linux/futex.h>
|
|
#include <asm/mmu_context.h>
|
|
#include <asm/errno.h>
|
|
|
|
#define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \
|
|
asm volatile( \
|
|
" sacf 256\n" \
|
|
"0: l %1,0(%6)\n" \
|
|
"1:"insn \
|
|
"2: cs %1,%2,0(%6)\n" \
|
|
"3: jl 1b\n" \
|
|
" lhi %0,0\n" \
|
|
"4: sacf 768\n" \
|
|
EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \
|
|
: "=d" (ret), "=&d" (oldval), "=&d" (newval), \
|
|
"=m" (*uaddr) \
|
|
: "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
|
|
"m" (*uaddr) : "cc");
|
|
|
|
static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
|
|
u32 __user *uaddr)
|
|
{
|
|
int oldval = 0, newval, ret;
|
|
|
|
switch (op) {
|
|
case FUTEX_OP_SET:
|
|
__futex_atomic_op("lr %2,%5\n",
|
|
ret, oldval, newval, uaddr, oparg);
|
|
break;
|
|
case FUTEX_OP_ADD:
|
|
__futex_atomic_op("lr %2,%1\nar %2,%5\n",
|
|
ret, oldval, newval, uaddr, oparg);
|
|
break;
|
|
case FUTEX_OP_OR:
|
|
__futex_atomic_op("lr %2,%1\nor %2,%5\n",
|
|
ret, oldval, newval, uaddr, oparg);
|
|
break;
|
|
case FUTEX_OP_ANDN:
|
|
__futex_atomic_op("lr %2,%1\nnr %2,%5\n",
|
|
ret, oldval, newval, uaddr, oparg);
|
|
break;
|
|
case FUTEX_OP_XOR:
|
|
__futex_atomic_op("lr %2,%1\nxr %2,%5\n",
|
|
ret, oldval, newval, uaddr, oparg);
|
|
break;
|
|
default:
|
|
ret = -ENOSYS;
|
|
}
|
|
|
|
if (!ret)
|
|
*oval = oldval;
|
|
|
|
return ret;
|
|
}
|
|
|
|
static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
|
u32 oldval, u32 newval)
|
|
{
|
|
int ret;
|
|
|
|
asm volatile(
|
|
" sacf 256\n"
|
|
"0: cs %1,%4,0(%5)\n"
|
|
"1: la %0,0\n"
|
|
"2: sacf 768\n"
|
|
EX_TABLE(0b,2b) EX_TABLE(1b,2b)
|
|
: "=d" (ret), "+d" (oldval), "=m" (*uaddr)
|
|
: "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
|
|
: "cc", "memory");
|
|
*uval = oldval;
|
|
return ret;
|
|
}
|
|
|
|
#endif /* _ASM_S390_FUTEX_H */
|