forked from Minki/linux
8a4e3a9ead
mm->context.id is updated under asid_lock when a new ASID is allocated to an mm_struct. However, it is also read without the lock when a task is being scheduled and checking whether or not the current ASID generation is up-to-date. If two threads of the same process are being scheduled in parallel and the bottom bits of the generation in their mm->context.id match the current generation (that is, the mm_struct has not been used for ~2^24 rollovers) then the non-atomic, lockless access to mm->context.id may yield the incorrect ASID. This patch fixes this issue by making mm->context.id and atomic64_t, ensuring that the generation is always read consistently. For code that only requires access to the ASID bits (e.g. TLB flushing by mm), then the value is accessed directly, which GCC converts to an ldrb. Cc: <stable@vger.kernel.org> # 3.8 Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
35 lines
579 B
C
35 lines
579 B
C
#ifndef __ARM_MMU_H
|
|
#define __ARM_MMU_H
|
|
|
|
#ifdef CONFIG_MMU
|
|
|
|
typedef struct {
|
|
#ifdef CONFIG_CPU_HAS_ASID
|
|
atomic64_t id;
|
|
#endif
|
|
unsigned int vmalloc_seq;
|
|
} mm_context_t;
|
|
|
|
#ifdef CONFIG_CPU_HAS_ASID
|
|
#define ASID_BITS 8
|
|
#define ASID_MASK ((~0ULL) << ASID_BITS)
|
|
#define ASID(mm) ((mm)->context.id.counter & ~ASID_MASK)
|
|
#else
|
|
#define ASID(mm) (0)
|
|
#endif
|
|
|
|
#else
|
|
|
|
/*
|
|
* From nommu.h:
|
|
* Copyright (C) 2002, David McCullough <davidm@snapgear.com>
|
|
* modified for 2.6 by Hyok S. Choi <hyok.choi@samsung.com>
|
|
*/
|
|
typedef struct {
|
|
unsigned long end_brk;
|
|
} mm_context_t;
|
|
|
|
#endif
|
|
|
|
#endif
|