forked from Minki/linux
b5466f8728
ASIDs are allocated to MMU contexts based on a rolling counter. This means that after 255 allocations we must invalidate all existing ASIDs via an expensive IPI mechanism to synchronise all of the online CPUs and ensure that all tasks execute with an ASID from the new generation. This patch changes the rollover behaviour so that we rely instead on the hardware broadcasting of the TLB invalidation to avoid the IPI calls. This works by keeping track of the active ASID on each core, which is then reserved in the case of a rollover so that currently scheduled tasks can continue to run. For cores without hardware TLB broadcasting, we keep track of pending flushes in a cpumask, so cores can flush their local TLB before scheduling a new mm. Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Tested-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
35 lines
561 B
C
35 lines
561 B
C
#ifndef __ARM_MMU_H
|
|
#define __ARM_MMU_H
|
|
|
|
#ifdef CONFIG_MMU
|
|
|
|
typedef struct {
|
|
#ifdef CONFIG_CPU_HAS_ASID
|
|
u64 id;
|
|
#endif
|
|
unsigned int kvm_seq;
|
|
} mm_context_t;
|
|
|
|
#ifdef CONFIG_CPU_HAS_ASID
|
|
#define ASID_BITS 8
|
|
#define ASID_MASK ((~0ULL) << ASID_BITS)
|
|
#define ASID(mm) ((mm)->context.id & ~ASID_MASK)
|
|
#else
|
|
#define ASID(mm) (0)
|
|
#endif
|
|
|
|
#else
|
|
|
|
/*
|
|
* From nommu.h:
|
|
* Copyright (C) 2002, David McCullough <davidm@snapgear.com>
|
|
* modified for 2.6 by Hyok S. Choi <hyok.choi@samsung.com>
|
|
*/
|
|
typedef struct {
|
|
unsigned long end_brk;
|
|
} mm_context_t;
|
|
|
|
#endif
|
|
|
|
#endif
|