mirror of
https://github.com/torvalds/linux.git
synced 2024-12-22 10:56:40 +00:00
59f0cb0fdd
As suggested by Andrew Morton, remove memzero() - it's not supported on other architectures so use of it is a potential build breaking bug. Since the compiler optimizes memset(x,0,n) to __memzero() perfectly well, we don't miss out on the underlying benefits of memzero(). Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
100 lines
2.0 KiB
C
100 lines
2.0 KiB
C
/*
|
|
* linux/arch/arm/mm/pgd.c
|
|
*
|
|
* Copyright (C) 1998-2005 Russell King
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*/
|
|
#include <linux/mm.h>
|
|
#include <linux/highmem.h>
|
|
|
|
#include <asm/pgalloc.h>
|
|
#include <asm/page.h>
|
|
#include <asm/tlbflush.h>
|
|
|
|
#include "mm.h"
|
|
|
|
#define FIRST_KERNEL_PGD_NR (FIRST_USER_PGD_NR + USER_PTRS_PER_PGD)
|
|
|
|
/*
|
|
* need to get a 16k page for level 1
|
|
*/
|
|
pgd_t *get_pgd_slow(struct mm_struct *mm)
|
|
{
|
|
pgd_t *new_pgd, *init_pgd;
|
|
pmd_t *new_pmd, *init_pmd;
|
|
pte_t *new_pte, *init_pte;
|
|
|
|
new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 2);
|
|
if (!new_pgd)
|
|
goto no_pgd;
|
|
|
|
memset(new_pgd, 0, FIRST_KERNEL_PGD_NR * sizeof(pgd_t));
|
|
|
|
/*
|
|
* Copy over the kernel and IO PGD entries
|
|
*/
|
|
init_pgd = pgd_offset_k(0);
|
|
memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR,
|
|
(PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t));
|
|
|
|
clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));
|
|
|
|
if (!vectors_high()) {
|
|
/*
|
|
* On ARM, first page must always be allocated since it
|
|
* contains the machine vectors.
|
|
*/
|
|
new_pmd = pmd_alloc(mm, new_pgd, 0);
|
|
if (!new_pmd)
|
|
goto no_pmd;
|
|
|
|
new_pte = pte_alloc_map(mm, new_pmd, 0);
|
|
if (!new_pte)
|
|
goto no_pte;
|
|
|
|
init_pmd = pmd_offset(init_pgd, 0);
|
|
init_pte = pte_offset_map_nested(init_pmd, 0);
|
|
set_pte_ext(new_pte, *init_pte, 0);
|
|
pte_unmap_nested(init_pte);
|
|
pte_unmap(new_pte);
|
|
}
|
|
|
|
return new_pgd;
|
|
|
|
no_pte:
|
|
pmd_free(mm, new_pmd);
|
|
no_pmd:
|
|
free_pages((unsigned long)new_pgd, 2);
|
|
no_pgd:
|
|
return NULL;
|
|
}
|
|
|
|
void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd)
|
|
{
|
|
pmd_t *pmd;
|
|
pgtable_t pte;
|
|
|
|
if (!pgd)
|
|
return;
|
|
|
|
/* pgd is always present and good */
|
|
pmd = pmd_off(pgd, 0);
|
|
if (pmd_none(*pmd))
|
|
goto free;
|
|
if (pmd_bad(*pmd)) {
|
|
pmd_ERROR(*pmd);
|
|
pmd_clear(pmd);
|
|
goto free;
|
|
}
|
|
|
|
pte = pmd_pgtable(*pmd);
|
|
pmd_clear(pmd);
|
|
pte_free(mm, pte);
|
|
pmd_free(mm, pmd);
|
|
free:
|
|
free_pages((unsigned long) pgd, 2);
|
|
}
|