mirror of
https://github.com/torvalds/linux.git
synced 2024-12-15 15:41:58 +00:00
[ARM] Fix ASID version switch
Close a hole in the ASID version switch, particularly the following scenario: CPU0 MM PID CPU1 MM PID idle A pid(A) A idle(lazy tlb) * new asid version triggered by B * B pid(B) A pid(A) * MM A gets new asid version * A idle(lazy tlb) A pid(A) * CPU1 doesn't see the new ASID * The result is that CPU1 continues running with the hardware set for the original (stale) ASID value, but mm->context.id contains the new ASID value. The result is that the next MM fault on CPU1 updates the page table entries, but flush_tlb_page() fails due to wrong ASID. There is a related case with a threaded application is allocated a new ASID on one CPU while another of its threads is running on some different CPU. This scenario is not fixed by this commit. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
This commit is contained in:
parent
08fdffd4cf
commit
8678c1f042
@ -14,7 +14,8 @@
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
unsigned int cpu_last_asid = { 1 << ASID_BITS };
|
||||
static DEFINE_SPINLOCK(cpu_asid_lock);
|
||||
unsigned int cpu_last_asid = ASID_FIRST_VERSION;
|
||||
|
||||
/*
|
||||
* We fork()ed a process, and we need a new context for the child
|
||||
@ -31,15 +32,16 @@ void __new_context(struct mm_struct *mm)
|
||||
{
|
||||
unsigned int asid;
|
||||
|
||||
spin_lock(&cpu_asid_lock);
|
||||
asid = ++cpu_last_asid;
|
||||
if (asid == 0)
|
||||
asid = cpu_last_asid = 1 << ASID_BITS;
|
||||
asid = cpu_last_asid = ASID_FIRST_VERSION;
|
||||
|
||||
/*
|
||||
* If we've used up all our ASIDs, we need
|
||||
* to start a new version and flush the TLB.
|
||||
*/
|
||||
if ((asid & ~ASID_MASK) == 0) {
|
||||
if (unlikely((asid & ~ASID_MASK) == 0)) {
|
||||
asid = ++cpu_last_asid;
|
||||
/* set the reserved ASID before flushing the TLB */
|
||||
asm("mcr p15, 0, %0, c13, c0, 1 @ set reserved context ID\n"
|
||||
@ -48,6 +50,8 @@ void __new_context(struct mm_struct *mm)
|
||||
isb();
|
||||
flush_tlb_all();
|
||||
}
|
||||
spin_unlock(&cpu_asid_lock);
|
||||
|
||||
mm->cpu_vm_mask = cpumask_of_cpu(smp_processor_id());
|
||||
mm->context.id = asid;
|
||||
}
|
||||
|
@ -36,8 +36,9 @@ void __check_kvm_seq(struct mm_struct *mm);
|
||||
* The context ID is used by debuggers and trace logic, and
|
||||
* should be unique within all running processes.
|
||||
*/
|
||||
#define ASID_BITS 8
|
||||
#define ASID_MASK ((~0) << ASID_BITS)
|
||||
#define ASID_BITS 8
|
||||
#define ASID_MASK ((~0) << ASID_BITS)
|
||||
#define ASID_FIRST_VERSION (1 << ASID_BITS)
|
||||
|
||||
extern unsigned int cpu_last_asid;
|
||||
|
||||
@ -96,8 +97,7 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
||||
#ifdef CONFIG_MMU
|
||||
unsigned int cpu = smp_processor_id();
|
||||
|
||||
if (prev != next) {
|
||||
cpu_set(cpu, next->cpu_vm_mask);
|
||||
if (!cpu_test_and_set(cpu, next->cpu_vm_mask) || prev != next) {
|
||||
check_context(next);
|
||||
cpu_switch_mm(next->pgd, next);
|
||||
if (cache_is_vivt())
|
||||
|
Loading…
Reference in New Issue
Block a user