forked from Minki/linux
e85a47106a
For KVM we need to allocate a new context id, but don't really care about all the mm context around it. So let's split the alloc and destroy functions for the context id, so we can grab one without allocating an mm context. Signed-off-by: Alexander Graf <agraf@suse.de> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
97 lines
2.2 KiB
C
97 lines
2.2 KiB
C
/*
|
|
* MMU context allocation for 64-bit kernels.
|
|
*
|
|
* Copyright (C) 2004 Anton Blanchard, IBM Corp. <anton@samba.org>
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public License
|
|
* as published by the Free Software Foundation; either version
|
|
* 2 of the License, or (at your option) any later version.
|
|
*
|
|
*/
|
|
|
|
#include <linux/sched.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/errno.h>
|
|
#include <linux/string.h>
|
|
#include <linux/types.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/spinlock.h>
|
|
#include <linux/idr.h>
|
|
#include <linux/module.h>
|
|
|
|
#include <asm/mmu_context.h>
|
|
|
|
static DEFINE_SPINLOCK(mmu_context_lock);
|
|
static DEFINE_IDR(mmu_context_idr);
|
|
|
|
/*
|
|
* The proto-VSID space has 2^35 - 1 segments available for user mappings.
|
|
* Each segment contains 2^28 bytes. Each context maps 2^44 bytes,
|
|
* so we can support 2^19-1 contexts (19 == 35 + 28 - 44).
|
|
*/
|
|
#define NO_CONTEXT 0
|
|
#define MAX_CONTEXT ((1UL << 19) - 1)
|
|
|
|
int __init_new_context(void)
|
|
{
|
|
int index;
|
|
int err;
|
|
|
|
again:
|
|
if (!idr_pre_get(&mmu_context_idr, GFP_KERNEL))
|
|
return -ENOMEM;
|
|
|
|
spin_lock(&mmu_context_lock);
|
|
err = idr_get_new_above(&mmu_context_idr, NULL, 1, &index);
|
|
spin_unlock(&mmu_context_lock);
|
|
|
|
if (err == -EAGAIN)
|
|
goto again;
|
|
else if (err)
|
|
return err;
|
|
|
|
if (index > MAX_CONTEXT) {
|
|
spin_lock(&mmu_context_lock);
|
|
idr_remove(&mmu_context_idr, index);
|
|
spin_unlock(&mmu_context_lock);
|
|
return -ENOMEM;
|
|
}
|
|
|
|
return index;
|
|
}
|
|
EXPORT_SYMBOL_GPL(__init_new_context);
|
|
|
|
int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
|
|
{
|
|
int index;
|
|
|
|
index = __init_new_context();
|
|
if (index < 0)
|
|
return index;
|
|
|
|
/* The old code would re-promote on fork, we don't do that
|
|
* when using slices as it could cause problem promoting slices
|
|
* that have been forced down to 4K
|
|
*/
|
|
if (slice_mm_new_context(mm))
|
|
slice_set_user_psize(mm, mmu_virtual_psize);
|
|
mm->context.id = index;
|
|
|
|
return 0;
|
|
}
|
|
|
|
void __destroy_context(int context_id)
|
|
{
|
|
spin_lock(&mmu_context_lock);
|
|
idr_remove(&mmu_context_idr, context_id);
|
|
spin_unlock(&mmu_context_lock);
|
|
}
|
|
EXPORT_SYMBOL_GPL(__destroy_context);
|
|
|
|
void destroy_context(struct mm_struct *mm)
|
|
{
|
|
__destroy_context(mm->context.id);
|
|
mm->context.id = NO_CONTEXT;
|
|
}
|