forked from Minki/linux
723cacbd9d
There is a race with multi-threaded applications between context switch and pagetable upgrade. In switch_mm() a new user_asce is built from mm->pgd and mm->context.asce_bits, w/o holding any locks. A concurrent mmap with a pagetable upgrade on another thread in crst_table_upgrade() could already have set new asce_bits, but not yet the new mm->pgd. This would result in a corrupt user_asce in switch_mm(), and eventually in a kernel panic from a translation exception. Fix this by storing the complete asce instead of just the asce_bits, which can then be read atomically from switch_mm(), so that it either sees the old value or the new value, but no mixture. Both cases are OK. Having the old value would result in a page fault on access to the higher level memory, but the fault handler would see the new mm->pgd, if it was a valid access after the mmap on the other thread has completed. So as worst-case scenario we would have a page fault loop for the racing thread until the next time slice. Also remove dead code and simplify the upgrade/downgrade path, there are no upgrades from 2 levels, and only downgrades from 3 levels for compat tasks. There are also no concurrent upgrades, because the mmap_sem is held with down_write() in do_mmap, so the flush and table checks during upgrade can be removed. Reported-by: Michael Munday <munday@ca.ibm.com> Reviewed-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Signed-off-by: Gerald Schaefer <gerald.schaefer@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
45 lines
1.0 KiB
C
45 lines
1.0 KiB
C
#ifndef __MMU_H
|
|
#define __MMU_H
|
|
|
|
#include <linux/cpumask.h>
|
|
#include <linux/errno.h>
|
|
|
|
typedef struct {
|
|
cpumask_t cpu_attach_mask;
|
|
atomic_t attach_count;
|
|
unsigned int flush_mm;
|
|
spinlock_t list_lock;
|
|
struct list_head pgtable_list;
|
|
struct list_head gmap_list;
|
|
unsigned long asce;
|
|
unsigned long asce_limit;
|
|
unsigned long vdso_base;
|
|
/* The mmu context allocates 4K page tables. */
|
|
unsigned int alloc_pgste:1;
|
|
/* The mmu context uses extended page tables. */
|
|
unsigned int has_pgste:1;
|
|
/* The mmu context uses storage keys. */
|
|
unsigned int use_skey:1;
|
|
} mm_context_t;
|
|
|
|
#define INIT_MM_CONTEXT(name) \
|
|
.context.list_lock = __SPIN_LOCK_UNLOCKED(name.context.list_lock), \
|
|
.context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list), \
|
|
.context.gmap_list = LIST_HEAD_INIT(name.context.gmap_list),
|
|
|
|
static inline int tprot(unsigned long addr)
|
|
{
|
|
int rc = -EFAULT;
|
|
|
|
asm volatile(
|
|
" tprot 0(%1),0\n"
|
|
"0: ipm %0\n"
|
|
" srl %0,28\n"
|
|
"1:\n"
|
|
EX_TABLE(0b,1b)
|
|
: "+d" (rc) : "a" (addr) : "cc");
|
|
return rc;
|
|
}
|
|
|
|
#endif
|