2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* PPC64 (POWER4) Huge TLB Page Support for Kernel.
|
|
|
|
*
|
|
|
|
* Copyright (C) 2003 David Gibson, IBM Corporation.
|
|
|
|
*
|
|
|
|
* Based on the IA-32 version:
|
|
|
|
* Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/fs.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/hugetlb.h>
|
|
|
|
#include <linux/pagemap.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/err.h>
|
|
|
|
#include <linux/sysctl.h>
|
|
|
|
#include <asm/mman.h>
|
|
|
|
#include <asm/pgalloc.h>
|
|
|
|
#include <asm/tlb.h>
|
|
|
|
#include <asm/tlbflush.h>
|
|
|
|
#include <asm/mmu_context.h>
|
|
|
|
#include <asm/machdep.h>
|
|
|
|
#include <asm/cputable.h>
|
|
|
|
#include <asm/tlb.h>
|
2007-03-09 23:05:37 +00:00
|
|
|
#include <asm/spu.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
#include <linux/sysctl.h>
|
|
|
|
|
2005-08-11 06:55:21 +00:00
|
|
|
#define NUM_LOW_AREAS (0x100000000UL >> SID_SHIFT)
|
|
|
|
#define NUM_HIGH_AREAS (PGTABLE_RANGE >> HTLB_AREA_SHIFT)
|
|
|
|
|
2006-04-28 05:02:51 +00:00
|
|
|
#ifdef CONFIG_PPC_64K_PAGES
|
|
|
|
#define HUGEPTE_INDEX_SIZE (PMD_SHIFT-HPAGE_SHIFT)
|
|
|
|
#else
|
|
|
|
#define HUGEPTE_INDEX_SIZE (PUD_SHIFT-HPAGE_SHIFT)
|
|
|
|
#endif
|
|
|
|
#define PTRS_PER_HUGEPTE (1 << HUGEPTE_INDEX_SIZE)
|
|
|
|
#define HUGEPTE_TABLE_SIZE (sizeof(pte_t) << HUGEPTE_INDEX_SIZE)
|
|
|
|
|
|
|
|
#define HUGEPD_SHIFT (HPAGE_SHIFT + HUGEPTE_INDEX_SIZE)
|
|
|
|
#define HUGEPD_SIZE (1UL << HUGEPD_SHIFT)
|
|
|
|
#define HUGEPD_MASK (~(HUGEPD_SIZE-1))
|
|
|
|
|
|
|
|
#define huge_pgtable_cache (pgtable_cache[HUGEPTE_CACHE_NUM])
|
|
|
|
|
|
|
|
/* Flag to mark huge PD pointers. This means pmd_bad() and pud_bad()
|
|
|
|
* will choke on pointers to hugepte tables, which is handy for
|
|
|
|
* catching screwups early. */
|
|
|
|
#define HUGEPD_OK 0x1
|
|
|
|
|
|
|
|
typedef struct { unsigned long pd; } hugepd_t;
|
|
|
|
|
|
|
|
#define hugepd_none(hpd) ((hpd).pd == 0)
|
|
|
|
|
|
|
|
static inline pte_t *hugepd_page(hugepd_t hpd)
|
|
|
|
{
|
|
|
|
BUG_ON(!(hpd.pd & HUGEPD_OK));
|
|
|
|
return (pte_t *)(hpd.pd & ~HUGEPD_OK);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr)
|
|
|
|
{
|
|
|
|
unsigned long idx = ((addr >> HPAGE_SHIFT) & (PTRS_PER_HUGEPTE-1));
|
|
|
|
pte_t *dir = hugepd_page(*hpdp);
|
|
|
|
|
|
|
|
return dir + idx;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
|
|
|
|
unsigned long address)
|
|
|
|
{
|
|
|
|
pte_t *new = kmem_cache_alloc(huge_pgtable_cache,
|
|
|
|
GFP_KERNEL|__GFP_REPEAT);
|
|
|
|
|
|
|
|
if (! new)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
spin_lock(&mm->page_table_lock);
|
|
|
|
if (!hugepd_none(*hpdp))
|
|
|
|
kmem_cache_free(huge_pgtable_cache, new);
|
|
|
|
else
|
|
|
|
hpdp->pd = (unsigned long)new | HUGEPD_OK;
|
|
|
|
spin_unlock(&mm->page_table_lock);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-08-05 09:39:06 +00:00
|
|
|
/* Modelled after find_linux_pte() */
|
|
|
|
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2005-08-05 09:39:06 +00:00
|
|
|
pgd_t *pg;
|
|
|
|
pud_t *pu;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-08-05 09:39:06 +00:00
|
|
|
BUG_ON(! in_hugepage_area(mm->context, addr));
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-08-05 09:39:06 +00:00
|
|
|
addr &= HPAGE_MASK;
|
|
|
|
|
|
|
|
pg = pgd_offset(mm, addr);
|
|
|
|
if (!pgd_none(*pg)) {
|
|
|
|
pu = pud_offset(pg, addr);
|
|
|
|
if (!pud_none(*pu)) {
|
2005-11-07 00:06:55 +00:00
|
|
|
#ifdef CONFIG_PPC_64K_PAGES
|
2006-04-28 05:02:51 +00:00
|
|
|
pmd_t *pm;
|
|
|
|
pm = pmd_offset(pu, addr);
|
|
|
|
if (!pmd_none(*pm))
|
|
|
|
return hugepte_offset((hugepd_t *)pm, addr);
|
|
|
|
#else
|
|
|
|
return hugepte_offset((hugepd_t *)pu, addr);
|
|
|
|
#endif
|
2005-08-05 09:39:06 +00:00
|
|
|
}
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-08-05 09:39:06 +00:00
|
|
|
return NULL;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2005-08-05 09:39:06 +00:00
|
|
|
pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2005-08-05 09:39:06 +00:00
|
|
|
pgd_t *pg;
|
|
|
|
pud_t *pu;
|
2006-04-28 05:02:51 +00:00
|
|
|
hugepd_t *hpdp = NULL;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
BUG_ON(! in_hugepage_area(mm->context, addr));
|
|
|
|
|
2005-08-05 09:39:06 +00:00
|
|
|
addr &= HPAGE_MASK;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-08-05 09:39:06 +00:00
|
|
|
pg = pgd_offset(mm, addr);
|
|
|
|
pu = pud_alloc(mm, pg, addr);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-08-05 09:39:06 +00:00
|
|
|
if (pu) {
|
2006-04-28 05:02:51 +00:00
|
|
|
#ifdef CONFIG_PPC_64K_PAGES
|
|
|
|
pmd_t *pm;
|
2005-08-05 09:39:06 +00:00
|
|
|
pm = pmd_alloc(mm, pu, addr);
|
2006-04-28 05:02:51 +00:00
|
|
|
if (pm)
|
|
|
|
hpdp = (hugepd_t *)pm;
|
|
|
|
#else
|
|
|
|
hpdp = (hugepd_t *)pu;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
if (! hpdp)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return hugepte_offset(hpdp, addr);
|
|
|
|
}
|
|
|
|
|
2006-12-07 04:32:03 +00:00
|
|
|
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2006-04-28 05:02:51 +00:00
|
|
|
static void free_hugepte_range(struct mmu_gather *tlb, hugepd_t *hpdp)
|
|
|
|
{
|
|
|
|
pte_t *hugepte = hugepd_page(*hpdp);
|
|
|
|
|
|
|
|
hpdp->pd = 0;
|
|
|
|
tlb->need_flush = 1;
|
|
|
|
pgtable_free_tlb(tlb, pgtable_free_cache(hugepte, HUGEPTE_CACHE_NUM,
|
2006-08-18 18:22:21 +00:00
|
|
|
PGF_CACHENUM_MASK));
|
2006-04-28 05:02:51 +00:00
|
|
|
}
|
|
|
|
|
2005-11-07 00:06:55 +00:00
|
|
|
#ifdef CONFIG_PPC_64K_PAGES
|
2006-04-28 05:02:51 +00:00
|
|
|
static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
|
|
|
|
unsigned long addr, unsigned long end,
|
|
|
|
unsigned long floor, unsigned long ceiling)
|
|
|
|
{
|
|
|
|
pmd_t *pmd;
|
|
|
|
unsigned long next;
|
|
|
|
unsigned long start;
|
|
|
|
|
|
|
|
start = addr;
|
|
|
|
pmd = pmd_offset(pud, addr);
|
|
|
|
do {
|
|
|
|
next = pmd_addr_end(addr, end);
|
|
|
|
if (pmd_none(*pmd))
|
|
|
|
continue;
|
|
|
|
free_hugepte_range(tlb, (hugepd_t *)pmd);
|
|
|
|
} while (pmd++, addr = next, addr != end);
|
|
|
|
|
|
|
|
start &= PUD_MASK;
|
|
|
|
if (start < floor)
|
|
|
|
return;
|
|
|
|
if (ceiling) {
|
|
|
|
ceiling &= PUD_MASK;
|
|
|
|
if (!ceiling)
|
|
|
|
return;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2006-04-28 05:02:51 +00:00
|
|
|
if (end - 1 > ceiling - 1)
|
|
|
|
return;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-04-28 05:02:51 +00:00
|
|
|
pmd = pmd_offset(pud, start);
|
|
|
|
pud_clear(pud);
|
|
|
|
pmd_free_tlb(tlb, pmd);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
|
|
|
|
unsigned long addr, unsigned long end,
|
|
|
|
unsigned long floor, unsigned long ceiling)
|
|
|
|
{
|
|
|
|
pud_t *pud;
|
|
|
|
unsigned long next;
|
|
|
|
unsigned long start;
|
|
|
|
|
|
|
|
start = addr;
|
|
|
|
pud = pud_offset(pgd, addr);
|
|
|
|
do {
|
|
|
|
next = pud_addr_end(addr, end);
|
|
|
|
#ifdef CONFIG_PPC_64K_PAGES
|
|
|
|
if (pud_none_or_clear_bad(pud))
|
|
|
|
continue;
|
|
|
|
hugetlb_free_pmd_range(tlb, pud, addr, next, floor, ceiling);
|
|
|
|
#else
|
|
|
|
if (pud_none(*pud))
|
|
|
|
continue;
|
|
|
|
free_hugepte_range(tlb, (hugepd_t *)pud);
|
|
|
|
#endif
|
|
|
|
} while (pud++, addr = next, addr != end);
|
|
|
|
|
|
|
|
start &= PGDIR_MASK;
|
|
|
|
if (start < floor)
|
|
|
|
return;
|
|
|
|
if (ceiling) {
|
|
|
|
ceiling &= PGDIR_MASK;
|
|
|
|
if (!ceiling)
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (end - 1 > ceiling - 1)
|
|
|
|
return;
|
|
|
|
|
|
|
|
pud = pud_offset(pgd, start);
|
|
|
|
pgd_clear(pgd);
|
|
|
|
pud_free_tlb(tlb, pud);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This function frees user-level page tables of a process.
|
|
|
|
*
|
|
|
|
* Must be called with pagetable lock held.
|
|
|
|
*/
|
|
|
|
void hugetlb_free_pgd_range(struct mmu_gather **tlb,
|
|
|
|
unsigned long addr, unsigned long end,
|
|
|
|
unsigned long floor, unsigned long ceiling)
|
|
|
|
{
|
|
|
|
pgd_t *pgd;
|
|
|
|
unsigned long next;
|
|
|
|
unsigned long start;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Comments below take from the normal free_pgd_range(). They
|
|
|
|
* apply here too. The tests against HUGEPD_MASK below are
|
|
|
|
* essential, because we *don't* test for this at the bottom
|
|
|
|
* level. Without them we'll attempt to free a hugepte table
|
|
|
|
* when we unmap just part of it, even if there are other
|
|
|
|
* active mappings using it.
|
|
|
|
*
|
|
|
|
* The next few lines have given us lots of grief...
|
|
|
|
*
|
|
|
|
* Why are we testing HUGEPD* at this top level? Because
|
|
|
|
* often there will be no work to do at all, and we'd prefer
|
|
|
|
* not to go all the way down to the bottom just to discover
|
|
|
|
* that.
|
|
|
|
*
|
|
|
|
* Why all these "- 1"s? Because 0 represents both the bottom
|
|
|
|
* of the address space and the top of it (using -1 for the
|
|
|
|
* top wouldn't help much: the masks would do the wrong thing).
|
|
|
|
* The rule is that addr 0 and floor 0 refer to the bottom of
|
|
|
|
* the address space, but end 0 and ceiling 0 refer to the top
|
|
|
|
* Comparisons need to use "end - 1" and "ceiling - 1" (though
|
|
|
|
* that end 0 case should be mythical).
|
|
|
|
*
|
|
|
|
* Wherever addr is brought up or ceiling brought down, we
|
|
|
|
* must be careful to reject "the opposite 0" before it
|
|
|
|
* confuses the subsequent tests. But what about where end is
|
|
|
|
* brought down by HUGEPD_SIZE below? no, end can't go down to
|
|
|
|
* 0 there.
|
|
|
|
*
|
|
|
|
* Whereas we round start (addr) and ceiling down, by different
|
|
|
|
* masks at different levels, in order to test whether a table
|
|
|
|
* now has no other vmas using it, so can be freed, we don't
|
|
|
|
* bother to round floor or end up - the tests don't need that.
|
|
|
|
*/
|
|
|
|
|
|
|
|
addr &= HUGEPD_MASK;
|
|
|
|
if (addr < floor) {
|
|
|
|
addr += HUGEPD_SIZE;
|
|
|
|
if (!addr)
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (ceiling) {
|
|
|
|
ceiling &= HUGEPD_MASK;
|
|
|
|
if (!ceiling)
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (end - 1 > ceiling - 1)
|
|
|
|
end -= HUGEPD_SIZE;
|
|
|
|
if (addr > end - 1)
|
|
|
|
return;
|
|
|
|
|
|
|
|
start = addr;
|
|
|
|
pgd = pgd_offset((*tlb)->mm, addr);
|
|
|
|
do {
|
|
|
|
BUG_ON(! in_hugepage_area((*tlb)->mm->context, addr));
|
|
|
|
next = pgd_addr_end(addr, end);
|
|
|
|
if (pgd_none_or_clear_bad(pgd))
|
|
|
|
continue;
|
|
|
|
hugetlb_free_pud_range(*tlb, pgd, addr, next, floor, ceiling);
|
|
|
|
} while (pgd++, addr = next, addr != end);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2005-08-05 09:39:06 +00:00
|
|
|
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
|
|
|
|
pte_t *ptep, pte_t pte)
|
|
|
|
{
|
|
|
|
if (pte_present(*ptep)) {
|
2005-11-07 00:06:55 +00:00
|
|
|
/* We open-code pte_clear because we need to pass the right
|
2007-04-10 07:09:37 +00:00
|
|
|
* argument to hpte_need_flush (huge / !huge). Might not be
|
|
|
|
* necessary anymore if we make hpte_need_flush() get the
|
|
|
|
* page size from the slices
|
2005-11-07 00:06:55 +00:00
|
|
|
*/
|
2007-04-10 07:09:37 +00:00
|
|
|
pte_update(mm, addr & HPAGE_MASK, ptep, ~0UL, 1);
|
2005-08-05 09:39:06 +00:00
|
|
|
}
|
2005-11-07 00:06:55 +00:00
|
|
|
*ptep = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2005-08-05 09:39:06 +00:00
|
|
|
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
|
|
|
|
pte_t *ptep)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2007-04-10 07:09:37 +00:00
|
|
|
unsigned long old = pte_update(mm, addr, ptep, ~0UL, 1);
|
2005-08-05 09:39:06 +00:00
|
|
|
return __pte(old);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2005-12-09 05:45:17 +00:00
|
|
|
struct slb_flush_info {
|
|
|
|
struct mm_struct *mm;
|
|
|
|
u16 newareas;
|
|
|
|
};
|
|
|
|
|
2005-08-11 06:55:21 +00:00
|
|
|
static void flush_low_segments(void *parm)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2005-12-09 05:45:17 +00:00
|
|
|
struct slb_flush_info *fi = parm;
|
2005-04-16 22:20:36 +00:00
|
|
|
unsigned long i;
|
|
|
|
|
2005-12-09 05:45:17 +00:00
|
|
|
BUILD_BUG_ON((sizeof(fi->newareas)*8) != NUM_LOW_AREAS);
|
|
|
|
|
|
|
|
if (current->active_mm != fi->mm)
|
|
|
|
return;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-12-09 05:45:17 +00:00
|
|
|
/* Only need to do anything if this CPU is working in the same
|
|
|
|
* mm as the one which has changed */
|
|
|
|
|
|
|
|
/* update the paca copy of the context struct */
|
|
|
|
get_paca()->context = current->active_mm->context;
|
2005-08-11 06:55:21 +00:00
|
|
|
|
2005-12-09 05:45:17 +00:00
|
|
|
asm volatile("isync" : : : "memory");
|
2005-08-11 06:55:21 +00:00
|
|
|
for (i = 0; i < NUM_LOW_AREAS; i++) {
|
2005-12-09 05:45:17 +00:00
|
|
|
if (! (fi->newareas & (1U << i)))
|
2005-04-16 22:20:36 +00:00
|
|
|
continue;
|
2005-09-06 04:59:47 +00:00
|
|
|
asm volatile("slbie %0"
|
|
|
|
: : "r" ((i << SID_SHIFT) | SLBIE_C));
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
asm volatile("isync" : : : "memory");
|
|
|
|
}
|
|
|
|
|
2005-08-11 06:55:21 +00:00
|
|
|
static void flush_high_segments(void *parm)
|
|
|
|
{
|
2005-12-09 05:45:17 +00:00
|
|
|
struct slb_flush_info *fi = parm;
|
2005-08-11 06:55:21 +00:00
|
|
|
unsigned long i, j;
|
|
|
|
|
|
|
|
|
2005-12-09 05:45:17 +00:00
|
|
|
BUILD_BUG_ON((sizeof(fi->newareas)*8) != NUM_HIGH_AREAS);
|
|
|
|
|
|
|
|
if (current->active_mm != fi->mm)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Only need to do anything if this CPU is working in the same
|
|
|
|
* mm as the one which has changed */
|
2005-08-11 06:55:21 +00:00
|
|
|
|
2005-12-09 05:45:17 +00:00
|
|
|
/* update the paca copy of the context struct */
|
|
|
|
get_paca()->context = current->active_mm->context;
|
|
|
|
|
|
|
|
asm volatile("isync" : : : "memory");
|
2005-08-11 06:55:21 +00:00
|
|
|
for (i = 0; i < NUM_HIGH_AREAS; i++) {
|
2005-12-09 05:45:17 +00:00
|
|
|
if (! (fi->newareas & (1U << i)))
|
2005-08-11 06:55:21 +00:00
|
|
|
continue;
|
|
|
|
for (j = 0; j < (1UL << (HTLB_AREA_SHIFT-SID_SHIFT)); j++)
|
|
|
|
asm volatile("slbie %0"
|
2005-09-06 04:59:47 +00:00
|
|
|
:: "r" (((i << HTLB_AREA_SHIFT)
|
2005-12-09 05:45:17 +00:00
|
|
|
+ (j << SID_SHIFT)) | SLBIE_C));
|
2005-08-11 06:55:21 +00:00
|
|
|
}
|
|
|
|
asm volatile("isync" : : : "memory");
|
|
|
|
}
|
|
|
|
|
|
|
|
static int prepare_low_area_for_htlb(struct mm_struct *mm, unsigned long area)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2005-08-11 06:55:21 +00:00
|
|
|
unsigned long start = area << SID_SHIFT;
|
|
|
|
unsigned long end = (area+1) << SID_SHIFT;
|
2005-04-16 22:20:36 +00:00
|
|
|
struct vm_area_struct *vma;
|
|
|
|
|
2005-08-11 06:55:21 +00:00
|
|
|
BUG_ON(area >= NUM_LOW_AREAS);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/* Check no VMAs are in the region */
|
|
|
|
vma = find_vma(mm, start);
|
|
|
|
if (vma && (vma->vm_start < end))
|
|
|
|
return -EBUSY;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-08-11 06:55:21 +00:00
|
|
|
static int prepare_high_area_for_htlb(struct mm_struct *mm, unsigned long area)
|
|
|
|
{
|
|
|
|
unsigned long start = area << HTLB_AREA_SHIFT;
|
|
|
|
unsigned long end = (area+1) << HTLB_AREA_SHIFT;
|
|
|
|
struct vm_area_struct *vma;
|
|
|
|
|
|
|
|
BUG_ON(area >= NUM_HIGH_AREAS);
|
|
|
|
|
[PATCH] ppc64: Fix bug in SLB miss handler for hugepages
This patch, however, should be applied on top of the 64k-page-size patch to
fix some problems with hugepage (some pre-existing, another introduced by
this patch).
The patch fixes a bug in the SLB miss handler for hugepages on ppc64
introduced by the dynamic hugepage patch (commit id
c594adad5653491813959277fb87a2fef54c4e05) due to a misunderstanding of the
srd instruction's behaviour (mea culpa). The problem arises when a 64-bit
process maps some hugepages in the low 4GB of the address space (unusual).
In this case, as well as the 256M segment in question being marked for
hugepages, other segments at 32G intervals will be incorrectly marked for
hugepages.
In the process, this patch tweaks the semantics of the hugepage bitmaps to
be more sensible. Previously, an address below 4G was marked for hugepages
if the appropriate segment bit in the "low areas" bitmask was set *or* if
the low bit in the "high areas" bitmap was set (which would mark all
addresses below 1TB for hugepage). With this patch, any given address is
governed by a single bitmap. Addresses below 4GB are marked for hugepage
if and only if their bit is set in the "low areas" bitmap (256M
granularity). Addresses between 4GB and 1TB are marked for hugepage iff
the low bit in the "high areas" bitmap is set. Higher addresses are marked
for hugepage iff their bit in the "high areas" bitmap is set (1TB
granularity).
To avoid conflicts, this patch must be applied on top of BenH's pending
patch for 64k base page size [0]. As such, this patch also addresses a
hugepage problem introduced by that patch. That patch allows hugepages of
1MB in size on hardware which supports it, however, that won't work when
using 4k pages (4 level pagetable), because in that case hugepage PTEs are
stored at the PMD level, and each PMD entry maps 2MB. This patch simply
disallows hugepages in that case (we can do something cleverer to re-enable
them some other day).
Built, booted, and a handful of hugepage related tests passed on POWER5
LPAR (both ARCH=powerpc and ARCH=ppc64).
[0] http://gate.crashing.org/~benh/ppc64-64k-pages.diff
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-11-07 08:57:52 +00:00
|
|
|
/* Hack, so that each addresses is controlled by exactly one
|
|
|
|
* of the high or low area bitmaps, the first high area starts
|
|
|
|
* at 4GB, not 0 */
|
|
|
|
if (start == 0)
|
|
|
|
start = 0x100000000UL;
|
|
|
|
|
2005-08-11 06:55:21 +00:00
|
|
|
/* Check no VMAs are in the region */
|
|
|
|
vma = find_vma(mm, start);
|
|
|
|
if (vma && (vma->vm_start < end))
|
|
|
|
return -EBUSY;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int open_low_hpage_areas(struct mm_struct *mm, u16 newareas)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
unsigned long i;
|
2005-12-09 05:45:17 +00:00
|
|
|
struct slb_flush_info fi;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-08-11 06:55:21 +00:00
|
|
|
BUILD_BUG_ON((sizeof(newareas)*8) != NUM_LOW_AREAS);
|
|
|
|
BUILD_BUG_ON((sizeof(mm->context.low_htlb_areas)*8) != NUM_LOW_AREAS);
|
|
|
|
|
|
|
|
newareas &= ~(mm->context.low_htlb_areas);
|
|
|
|
if (! newareas)
|
2005-04-16 22:20:36 +00:00
|
|
|
return 0; /* The segments we want are already open */
|
|
|
|
|
2005-08-11 06:55:21 +00:00
|
|
|
for (i = 0; i < NUM_LOW_AREAS; i++)
|
|
|
|
if ((1 << i) & newareas)
|
|
|
|
if (prepare_low_area_for_htlb(mm, i) != 0)
|
|
|
|
return -EBUSY;
|
|
|
|
|
|
|
|
mm->context.low_htlb_areas |= newareas;
|
|
|
|
|
|
|
|
/* the context change must make it to memory before the flush,
|
|
|
|
* so that further SLB misses do the right thing. */
|
|
|
|
mb();
|
2005-12-09 05:45:17 +00:00
|
|
|
|
|
|
|
fi.mm = mm;
|
|
|
|
fi.newareas = newareas;
|
|
|
|
on_each_cpu(flush_low_segments, &fi, 0, 1);
|
2005-08-11 06:55:21 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int open_high_hpage_areas(struct mm_struct *mm, u16 newareas)
|
|
|
|
{
|
2005-12-09 05:45:17 +00:00
|
|
|
struct slb_flush_info fi;
|
2005-08-11 06:55:21 +00:00
|
|
|
unsigned long i;
|
|
|
|
|
|
|
|
BUILD_BUG_ON((sizeof(newareas)*8) != NUM_HIGH_AREAS);
|
|
|
|
BUILD_BUG_ON((sizeof(mm->context.high_htlb_areas)*8)
|
|
|
|
!= NUM_HIGH_AREAS);
|
|
|
|
|
|
|
|
newareas &= ~(mm->context.high_htlb_areas);
|
|
|
|
if (! newareas)
|
|
|
|
return 0; /* The areas we want are already open */
|
|
|
|
|
|
|
|
for (i = 0; i < NUM_HIGH_AREAS; i++)
|
|
|
|
if ((1 << i) & newareas)
|
|
|
|
if (prepare_high_area_for_htlb(mm, i) != 0)
|
2005-04-16 22:20:36 +00:00
|
|
|
return -EBUSY;
|
|
|
|
|
2005-08-11 06:55:21 +00:00
|
|
|
mm->context.high_htlb_areas |= newareas;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/* the context change must make it to memory before the flush,
|
|
|
|
* so that further SLB misses do the right thing. */
|
|
|
|
mb();
|
2005-12-09 05:45:17 +00:00
|
|
|
|
|
|
|
fi.mm = mm;
|
|
|
|
fi.newareas = newareas;
|
|
|
|
on_each_cpu(flush_high_segments, &fi, 0, 1);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
[PATCH] hugetlb: prepare_hugepage_range check offset too
(David:)
If hugetlbfs_file_mmap() returns a failure to do_mmap_pgoff() - for example,
because the given file offset is not hugepage aligned - then do_mmap_pgoff
will go to the unmap_and_free_vma backout path.
But at this stage the vma hasn't been marked as hugepage, and the backout path
will call unmap_region() on it. That will eventually call down to the
non-hugepage version of unmap_page_range(). On ppc64, at least, that will
cause serious problems if there are any existing hugepage pagetable entries in
the vicinity - for example if there are any other hugepage mappings under the
same PUD. unmap_page_range() will trigger a bad_pud() on the hugepage pud
entries. I suspect this will also cause bad problems on ia64, though I don't
have a machine to test it on.
(Hugh:)
prepare_hugepage_range() should check file offset alignment when it checks
virtual address and length, to stop MAP_FIXED with a bad huge offset from
unmapping before it fails further down. PowerPC should apply the same
prepare_hugepage_range alignment checks as ia64 and all the others do.
Then none of the alignment checks in hugetlbfs_file_mmap are required (nor
is the check for too small a mapping); but even so, move up setting of
VM_HUGETLB and add a comment to warn of what David Gibson discovered - if
hugetlbfs_file_mmap fails before setting it, do_mmap_pgoff's unmap_region
when unwinding from error will go the non-huge way, which may cause bad
behaviour on architectures (powerpc and ia64) which segregate their huge
mappings into a separate region of the address space.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Cc: "Luck, Tony" <tony.luck@intel.com>
Cc: "David S. Miller" <davem@davemloft.net>
Acked-by: Adam Litke <agl@us.ibm.com>
Acked-by: David Gibson <david@gibson.dropbear.id.au>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-11-14 10:03:32 +00:00
|
|
|
int prepare_hugepage_range(unsigned long addr, unsigned long len, pgoff_t pgoff)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2005-11-23 21:37:45 +00:00
|
|
|
int err = 0;
|
2005-08-11 06:55:21 +00:00
|
|
|
|
[PATCH] hugetlb: prepare_hugepage_range check offset too
(David:)
If hugetlbfs_file_mmap() returns a failure to do_mmap_pgoff() - for example,
because the given file offset is not hugepage aligned - then do_mmap_pgoff
will go to the unmap_and_free_vma backout path.
But at this stage the vma hasn't been marked as hugepage, and the backout path
will call unmap_region() on it. That will eventually call down to the
non-hugepage version of unmap_page_range(). On ppc64, at least, that will
cause serious problems if there are any existing hugepage pagetable entries in
the vicinity - for example if there are any other hugepage mappings under the
same PUD. unmap_page_range() will trigger a bad_pud() on the hugepage pud
entries. I suspect this will also cause bad problems on ia64, though I don't
have a machine to test it on.
(Hugh:)
prepare_hugepage_range() should check file offset alignment when it checks
virtual address and length, to stop MAP_FIXED with a bad huge offset from
unmapping before it fails further down. PowerPC should apply the same
prepare_hugepage_range alignment checks as ia64 and all the others do.
Then none of the alignment checks in hugetlbfs_file_mmap are required (nor
is the check for too small a mapping); but even so, move up setting of
VM_HUGETLB and add a comment to warn of what David Gibson discovered - if
hugetlbfs_file_mmap fails before setting it, do_mmap_pgoff's unmap_region
when unwinding from error will go the non-huge way, which may cause bad
behaviour on architectures (powerpc and ia64) which segregate their huge
mappings into a separate region of the address space.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Cc: "Luck, Tony" <tony.luck@intel.com>
Cc: "David S. Miller" <davem@davemloft.net>
Acked-by: Adam Litke <agl@us.ibm.com>
Acked-by: David Gibson <david@gibson.dropbear.id.au>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-11-14 10:03:32 +00:00
|
|
|
if (pgoff & (~HPAGE_MASK >> PAGE_SHIFT))
|
|
|
|
return -EINVAL;
|
|
|
|
if (len & ~HPAGE_MASK)
|
|
|
|
return -EINVAL;
|
|
|
|
if (addr & ~HPAGE_MASK)
|
2005-08-11 06:55:21 +00:00
|
|
|
return -EINVAL;
|
|
|
|
|
2005-11-23 21:37:45 +00:00
|
|
|
if (addr < 0x100000000UL)
|
2005-08-11 06:55:21 +00:00
|
|
|
err = open_low_hpage_areas(current->mm,
|
2005-04-16 22:20:36 +00:00
|
|
|
LOW_ESID_MASK(addr, len));
|
2005-11-24 02:34:56 +00:00
|
|
|
if ((addr + len) > 0x100000000UL)
|
2005-08-11 06:55:21 +00:00
|
|
|
err = open_high_hpage_areas(current->mm,
|
|
|
|
HTLB_AREA_MASK(addr, len));
|
2007-03-09 23:05:37 +00:00
|
|
|
#ifdef CONFIG_SPE_BASE
|
|
|
|
spu_flush_all_slbs(current->mm);
|
|
|
|
#endif
|
2005-08-11 06:55:21 +00:00
|
|
|
if (err) {
|
|
|
|
printk(KERN_DEBUG "prepare_hugepage_range(%lx, %lx)"
|
|
|
|
" failed (lowmask: 0x%04hx, highmask: 0x%04hx)\n",
|
|
|
|
addr, len,
|
|
|
|
LOW_ESID_MASK(addr, len), HTLB_AREA_MASK(addr, len));
|
2005-04-16 22:20:36 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2005-08-11 06:55:21 +00:00
|
|
|
return 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
struct page *
|
|
|
|
follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
|
|
|
|
{
|
|
|
|
pte_t *ptep;
|
|
|
|
struct page *page;
|
|
|
|
|
|
|
|
if (! in_hugepage_area(mm->context, address))
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
|
|
|
|
ptep = huge_pte_offset(mm, address);
|
|
|
|
page = pte_page(*ptep);
|
|
|
|
if (page)
|
|
|
|
page += (address % HPAGE_SIZE) / PAGE_SIZE;
|
|
|
|
|
|
|
|
return page;
|
|
|
|
}
|
|
|
|
|
|
|
|
int pmd_huge(pmd_t pmd)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct page *
|
|
|
|
follow_huge_pmd(struct mm_struct *mm, unsigned long address,
|
|
|
|
pmd_t *pmd, int write)
|
|
|
|
{
|
|
|
|
BUG();
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Because we have an exclusive hugepage region which lies within the
|
|
|
|
* normal user address space, we have to take special measures to make
|
|
|
|
* non-huge mmap()s evade the hugepage reserved regions. */
|
|
|
|
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
|
|
|
|
unsigned long len, unsigned long pgoff,
|
|
|
|
unsigned long flags)
|
|
|
|
{
|
|
|
|
struct mm_struct *mm = current->mm;
|
|
|
|
struct vm_area_struct *vma;
|
|
|
|
unsigned long start_addr;
|
|
|
|
|
|
|
|
if (len > TASK_SIZE)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
get_unmapped_area handles MAP_FIXED on powerpc
The current get_unmapped_area code calls the f_ops->get_unmapped_area or the
arch one (via the mm) only when MAP_FIXED is not passed. That makes it
impossible for archs to impose proper constraints on regions of the virtual
address space. To work around that, get_unmapped_area() then calls some
hugetlbfs specific hacks.
This cause several problems, among others:
- It makes it impossible for a driver or filesystem to do the same thing
that hugetlbfs does (for example, to allow a driver to use larger page sizes
to map external hardware) if that requires applying a constraint on the
addresses (constraining that mapping in certain regions and other mappings
out of those regions).
- Some archs like arm, mips, sparc, sparc64, sh and sh64 already want
MAP_FIXED to be passed down in order to deal with aliasing issues. The code
is there to handle it... but is never called.
This series of patches moves the logic to handle MAP_FIXED down to the various
arch/driver get_unmapped_area() implementations, and then changes the generic
code to always call them. The hugetlbfs hacks then disappear from the generic
code.
Since I need to do some special 64K pages mappings for SPEs on cell, I need to
work around the first problem at least. I have further patches thus
implementing a "slices" layer that handles multiple page sizes through slices
of the address space for use by hugetlbfs, the SPE code, and possibly others,
but it requires that serie of patches first/
There is still a potential (but not practical) issue due to the fact that
filesystems/drivers implemeting g_u_a will effectively bypass all arch checks.
This is not an issue in practice as the only filesystems/drivers using that
hook are doing so for arch specific purposes in the first place.
There is also a problem with mremap that will completely bypass all arch
checks. I'll try to address that separately, I'm not 100% certain yet how,
possibly by making it not work when the vma has a file whose f_ops has a
get_unmapped_area callback, and by making it use is_hugepage_only_range()
before expanding into a new area.
Also, I want to turn is_hugepage_only_range() into a more generic
is_normal_page_range() as that's really what it will end up meaning when used
in stack grow, brk grow and mremap.
None of the above "issues" however are introduced by this patch, they are
already there, so I think the patch can go ini for 2.6.22.
This patch:
Handle MAP_FIXED in powerpc's arch_get_unmapped_area() in all 3
implementations of it.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Acked-by: William Irwin <bill.irwin@oracle.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Richard Henderson <rth@twiddle.net>
Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
Cc: Russell King <rmk+kernel@arm.linux.org.uk>
Cc: David Howells <dhowells@redhat.com>
Cc: Andi Kleen <ak@suse.de>
Cc: "Luck, Tony" <tony.luck@intel.com>
Cc: Kyle McMartin <kyle@mcmartin.ca>
Cc: Grant Grundler <grundler@parisc-linux.org>
Cc: Matthew Wilcox <willy@debian.org>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Adam Litke <agl@us.ibm.com>
Cc: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-06 21:50:02 +00:00
|
|
|
/* handle fixed mapping: prevent overlap with huge pages */
|
|
|
|
if (flags & MAP_FIXED) {
|
|
|
|
if (is_hugepage_only_range(mm, addr, len))
|
|
|
|
return -EINVAL;
|
|
|
|
return addr;
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
if (addr) {
|
|
|
|
addr = PAGE_ALIGN(addr);
|
|
|
|
vma = find_vma(mm, addr);
|
|
|
|
if (((TASK_SIZE - len) >= addr)
|
|
|
|
&& (!vma || (addr+len) <= vma->vm_start)
|
|
|
|
&& !is_hugepage_only_range(mm, addr,len))
|
|
|
|
return addr;
|
|
|
|
}
|
2005-06-22 00:14:49 +00:00
|
|
|
if (len > mm->cached_hole_size) {
|
|
|
|
start_addr = addr = mm->free_area_cache;
|
|
|
|
} else {
|
|
|
|
start_addr = addr = TASK_UNMAPPED_BASE;
|
|
|
|
mm->cached_hole_size = 0;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
full_search:
|
|
|
|
vma = find_vma(mm, addr);
|
|
|
|
while (TASK_SIZE - len >= addr) {
|
|
|
|
BUG_ON(vma && (addr >= vma->vm_end));
|
|
|
|
|
|
|
|
if (touches_hugepage_low_range(mm, addr, len)) {
|
|
|
|
addr = ALIGN(addr+1, 1<<SID_SHIFT);
|
|
|
|
vma = find_vma(mm, addr);
|
|
|
|
continue;
|
|
|
|
}
|
2005-08-11 06:55:21 +00:00
|
|
|
if (touches_hugepage_high_range(mm, addr, len)) {
|
|
|
|
addr = ALIGN(addr+1, 1UL<<HTLB_AREA_SHIFT);
|
2005-04-16 22:20:36 +00:00
|
|
|
vma = find_vma(mm, addr);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (!vma || addr + len <= vma->vm_start) {
|
|
|
|
/*
|
|
|
|
* Remember the place where we stopped the search:
|
|
|
|
*/
|
|
|
|
mm->free_area_cache = addr + len;
|
|
|
|
return addr;
|
|
|
|
}
|
2005-06-22 00:14:49 +00:00
|
|
|
if (addr + mm->cached_hole_size < vma->vm_start)
|
|
|
|
mm->cached_hole_size = vma->vm_start - addr;
|
2005-04-16 22:20:36 +00:00
|
|
|
addr = vma->vm_end;
|
|
|
|
vma = vma->vm_next;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Make sure we didn't miss any holes */
|
|
|
|
if (start_addr != TASK_UNMAPPED_BASE) {
|
|
|
|
start_addr = addr = TASK_UNMAPPED_BASE;
|
2005-06-22 00:14:49 +00:00
|
|
|
mm->cached_hole_size = 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
goto full_search;
|
|
|
|
}
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This mmap-allocator allocates new areas top-down from below the
|
|
|
|
* stack's low limit (the base):
|
|
|
|
*
|
|
|
|
* Because we have an exclusive hugepage region which lies within the
|
|
|
|
* normal user address space, we have to take special measures to make
|
|
|
|
* non-huge mmap()s evade the hugepage reserved regions.
|
|
|
|
*/
|
|
|
|
unsigned long
|
|
|
|
arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
|
|
|
|
const unsigned long len, const unsigned long pgoff,
|
|
|
|
const unsigned long flags)
|
|
|
|
{
|
|
|
|
struct vm_area_struct *vma, *prev_vma;
|
|
|
|
struct mm_struct *mm = current->mm;
|
|
|
|
unsigned long base = mm->mmap_base, addr = addr0;
|
2005-06-22 00:14:49 +00:00
|
|
|
unsigned long largest_hole = mm->cached_hole_size;
|
2005-04-16 22:20:36 +00:00
|
|
|
int first_time = 1;
|
|
|
|
|
|
|
|
/* requested length too big for entire address space */
|
|
|
|
if (len > TASK_SIZE)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
get_unmapped_area handles MAP_FIXED on powerpc
The current get_unmapped_area code calls the f_ops->get_unmapped_area or the
arch one (via the mm) only when MAP_FIXED is not passed. That makes it
impossible for archs to impose proper constraints on regions of the virtual
address space. To work around that, get_unmapped_area() then calls some
hugetlbfs specific hacks.
This cause several problems, among others:
- It makes it impossible for a driver or filesystem to do the same thing
that hugetlbfs does (for example, to allow a driver to use larger page sizes
to map external hardware) if that requires applying a constraint on the
addresses (constraining that mapping in certain regions and other mappings
out of those regions).
- Some archs like arm, mips, sparc, sparc64, sh and sh64 already want
MAP_FIXED to be passed down in order to deal with aliasing issues. The code
is there to handle it... but is never called.
This series of patches moves the logic to handle MAP_FIXED down to the various
arch/driver get_unmapped_area() implementations, and then changes the generic
code to always call them. The hugetlbfs hacks then disappear from the generic
code.
Since I need to do some special 64K pages mappings for SPEs on cell, I need to
work around the first problem at least. I have further patches thus
implementing a "slices" layer that handles multiple page sizes through slices
of the address space for use by hugetlbfs, the SPE code, and possibly others,
but it requires that serie of patches first/
There is still a potential (but not practical) issue due to the fact that
filesystems/drivers implemeting g_u_a will effectively bypass all arch checks.
This is not an issue in practice as the only filesystems/drivers using that
hook are doing so for arch specific purposes in the first place.
There is also a problem with mremap that will completely bypass all arch
checks. I'll try to address that separately, I'm not 100% certain yet how,
possibly by making it not work when the vma has a file whose f_ops has a
get_unmapped_area callback, and by making it use is_hugepage_only_range()
before expanding into a new area.
Also, I want to turn is_hugepage_only_range() into a more generic
is_normal_page_range() as that's really what it will end up meaning when used
in stack grow, brk grow and mremap.
None of the above "issues" however are introduced by this patch, they are
already there, so I think the patch can go ini for 2.6.22.
This patch:
Handle MAP_FIXED in powerpc's arch_get_unmapped_area() in all 3
implementations of it.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Acked-by: William Irwin <bill.irwin@oracle.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Richard Henderson <rth@twiddle.net>
Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
Cc: Russell King <rmk+kernel@arm.linux.org.uk>
Cc: David Howells <dhowells@redhat.com>
Cc: Andi Kleen <ak@suse.de>
Cc: "Luck, Tony" <tony.luck@intel.com>
Cc: Kyle McMartin <kyle@mcmartin.ca>
Cc: Grant Grundler <grundler@parisc-linux.org>
Cc: Matthew Wilcox <willy@debian.org>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Adam Litke <agl@us.ibm.com>
Cc: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-06 21:50:02 +00:00
|
|
|
/* handle fixed mapping: prevent overlap with huge pages */
|
|
|
|
if (flags & MAP_FIXED) {
|
|
|
|
if (is_hugepage_only_range(mm, addr, len))
|
|
|
|
return -EINVAL;
|
|
|
|
return addr;
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/* dont allow allocations above current base */
|
|
|
|
if (mm->free_area_cache > base)
|
|
|
|
mm->free_area_cache = base;
|
|
|
|
|
|
|
|
/* requesting a specific address */
|
|
|
|
if (addr) {
|
|
|
|
addr = PAGE_ALIGN(addr);
|
|
|
|
vma = find_vma(mm, addr);
|
|
|
|
if (TASK_SIZE - len >= addr &&
|
|
|
|
(!vma || addr + len <= vma->vm_start)
|
|
|
|
&& !is_hugepage_only_range(mm, addr,len))
|
|
|
|
return addr;
|
|
|
|
}
|
|
|
|
|
2005-06-22 00:14:49 +00:00
|
|
|
if (len <= largest_hole) {
|
|
|
|
largest_hole = 0;
|
|
|
|
mm->free_area_cache = base;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
try_again:
|
|
|
|
/* make sure it can fit in the remaining address space */
|
|
|
|
if (mm->free_area_cache < len)
|
|
|
|
goto fail;
|
|
|
|
|
|
|
|
/* either no address requested or cant fit in requested address hole */
|
|
|
|
addr = (mm->free_area_cache - len) & PAGE_MASK;
|
|
|
|
do {
|
|
|
|
hugepage_recheck:
|
|
|
|
if (touches_hugepage_low_range(mm, addr, len)) {
|
|
|
|
addr = (addr & ((~0) << SID_SHIFT)) - len;
|
|
|
|
goto hugepage_recheck;
|
2005-08-11 06:55:21 +00:00
|
|
|
} else if (touches_hugepage_high_range(mm, addr, len)) {
|
|
|
|
addr = (addr & ((~0UL) << HTLB_AREA_SHIFT)) - len;
|
|
|
|
goto hugepage_recheck;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Lookup failure means no vma is above this address,
|
|
|
|
* i.e. return with success:
|
|
|
|
*/
|
|
|
|
if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
|
|
|
|
return addr;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* new region fits between prev_vma->vm_end and
|
|
|
|
* vma->vm_start, use it:
|
|
|
|
*/
|
|
|
|
if (addr+len <= vma->vm_start &&
|
2005-06-22 00:14:49 +00:00
|
|
|
(!prev_vma || (addr >= prev_vma->vm_end))) {
|
2005-04-16 22:20:36 +00:00
|
|
|
/* remember the address as a hint for next time */
|
2005-06-22 00:14:49 +00:00
|
|
|
mm->cached_hole_size = largest_hole;
|
|
|
|
return (mm->free_area_cache = addr);
|
|
|
|
} else {
|
2005-04-16 22:20:36 +00:00
|
|
|
/* pull free_area_cache down to the first hole */
|
2005-06-22 00:14:49 +00:00
|
|
|
if (mm->free_area_cache == vma->vm_end) {
|
2005-04-16 22:20:36 +00:00
|
|
|
mm->free_area_cache = vma->vm_start;
|
2005-06-22 00:14:49 +00:00
|
|
|
mm->cached_hole_size = largest_hole;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* remember the largest hole we saw so far */
|
|
|
|
if (addr + largest_hole < vma->vm_start)
|
|
|
|
largest_hole = vma->vm_start - addr;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/* try just below the current vma->vm_start */
|
|
|
|
addr = vma->vm_start-len;
|
|
|
|
} while (len <= vma->vm_start);
|
|
|
|
|
|
|
|
fail:
|
|
|
|
/*
|
|
|
|
* if hint left us with no space for the requested
|
|
|
|
* mapping then try again:
|
|
|
|
*/
|
|
|
|
if (first_time) {
|
|
|
|
mm->free_area_cache = base;
|
2005-06-22 00:14:49 +00:00
|
|
|
largest_hole = 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
first_time = 0;
|
|
|
|
goto try_again;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* A failed mmap() very likely causes application failure,
|
|
|
|
* so fall back to the bottom-up function here. This scenario
|
|
|
|
* can happen with large stack limits and large mmap()
|
|
|
|
* allocations.
|
|
|
|
*/
|
|
|
|
mm->free_area_cache = TASK_UNMAPPED_BASE;
|
2005-06-22 00:14:49 +00:00
|
|
|
mm->cached_hole_size = ~0UL;
|
2005-04-16 22:20:36 +00:00
|
|
|
addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
|
|
|
|
/*
|
|
|
|
* Restore the topdown base:
|
|
|
|
*/
|
|
|
|
mm->free_area_cache = base;
|
2005-06-22 00:14:49 +00:00
|
|
|
mm->cached_hole_size = ~0UL;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
return addr;
|
|
|
|
}
|
|
|
|
|
2005-11-24 03:16:15 +00:00
|
|
|
static int htlb_check_hinted_area(unsigned long addr, unsigned long len)
|
|
|
|
{
|
|
|
|
struct vm_area_struct *vma;
|
|
|
|
|
|
|
|
vma = find_vma(current->mm, addr);
|
2006-12-21 22:23:03 +00:00
|
|
|
if (TASK_SIZE - len >= addr &&
|
|
|
|
(!vma || ((addr + len) <= vma->vm_start)))
|
2005-11-24 03:16:15 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
static unsigned long htlb_get_low_area(unsigned long len, u16 segmask)
|
|
|
|
{
|
|
|
|
unsigned long addr = 0;
|
|
|
|
struct vm_area_struct *vma;
|
|
|
|
|
|
|
|
vma = find_vma(current->mm, addr);
|
|
|
|
while (addr + len <= 0x100000000UL) {
|
|
|
|
BUG_ON(vma && (addr >= vma->vm_end)); /* invariant */
|
|
|
|
|
|
|
|
if (! __within_hugepage_low_range(addr, len, segmask)) {
|
|
|
|
addr = ALIGN(addr+1, 1<<SID_SHIFT);
|
|
|
|
vma = find_vma(current->mm, addr);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!vma || (addr + len) <= vma->vm_start)
|
|
|
|
return addr;
|
|
|
|
addr = ALIGN(vma->vm_end, HPAGE_SIZE);
|
|
|
|
/* Depending on segmask this might not be a confirmed
|
|
|
|
* hugepage region, so the ALIGN could have skipped
|
|
|
|
* some VMAs */
|
|
|
|
vma = find_vma(current->mm, addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2005-08-11 06:55:21 +00:00
|
|
|
static unsigned long htlb_get_high_area(unsigned long len, u16 areamask)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2005-08-11 06:55:21 +00:00
|
|
|
unsigned long addr = 0x100000000UL;
|
2005-04-16 22:20:36 +00:00
|
|
|
struct vm_area_struct *vma;
|
|
|
|
|
|
|
|
vma = find_vma(current->mm, addr);
|
2005-08-11 06:55:21 +00:00
|
|
|
while (addr + len <= TASK_SIZE_USER64) {
|
2005-04-16 22:20:36 +00:00
|
|
|
BUG_ON(vma && (addr >= vma->vm_end)); /* invariant */
|
2005-08-11 06:55:21 +00:00
|
|
|
|
|
|
|
if (! __within_hugepage_high_range(addr, len, areamask)) {
|
|
|
|
addr = ALIGN(addr+1, 1UL<<HTLB_AREA_SHIFT);
|
|
|
|
vma = find_vma(current->mm, addr);
|
|
|
|
continue;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
if (!vma || (addr + len) <= vma->vm_start)
|
|
|
|
return addr;
|
|
|
|
addr = ALIGN(vma->vm_end, HPAGE_SIZE);
|
2005-08-11 06:55:21 +00:00
|
|
|
/* Depending on segmask this might not be a confirmed
|
|
|
|
* hugepage region, so the ALIGN could have skipped
|
|
|
|
* some VMAs */
|
|
|
|
vma = find_vma(current->mm, addr);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
|
|
|
|
unsigned long len, unsigned long pgoff,
|
|
|
|
unsigned long flags)
|
|
|
|
{
|
2005-08-11 06:55:21 +00:00
|
|
|
int lastshift;
|
|
|
|
u16 areamask, curareas;
|
|
|
|
|
2005-11-07 00:06:55 +00:00
|
|
|
if (HPAGE_SHIFT == 0)
|
|
|
|
return -EINVAL;
|
2005-04-16 22:20:36 +00:00
|
|
|
if (len & ~HPAGE_MASK)
|
|
|
|
return -EINVAL;
|
2006-12-21 22:23:03 +00:00
|
|
|
if (len > TASK_SIZE)
|
|
|
|
return -ENOMEM;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
if (!cpu_has_feature(CPU_FTR_16M_PAGE))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2005-11-24 03:16:15 +00:00
|
|
|
/* Paranoia, caller should have dealt with this */
|
|
|
|
BUG_ON((addr + len) < addr);
|
|
|
|
|
get_unmapped_area handles MAP_FIXED on powerpc
The current get_unmapped_area code calls the f_ops->get_unmapped_area or the
arch one (via the mm) only when MAP_FIXED is not passed. That makes it
impossible for archs to impose proper constraints on regions of the virtual
address space. To work around that, get_unmapped_area() then calls some
hugetlbfs specific hacks.
This cause several problems, among others:
- It makes it impossible for a driver or filesystem to do the same thing
that hugetlbfs does (for example, to allow a driver to use larger page sizes
to map external hardware) if that requires applying a constraint on the
addresses (constraining that mapping in certain regions and other mappings
out of those regions).
- Some archs like arm, mips, sparc, sparc64, sh and sh64 already want
MAP_FIXED to be passed down in order to deal with aliasing issues. The code
is there to handle it... but is never called.
This series of patches moves the logic to handle MAP_FIXED down to the various
arch/driver get_unmapped_area() implementations, and then changes the generic
code to always call them. The hugetlbfs hacks then disappear from the generic
code.
Since I need to do some special 64K pages mappings for SPEs on cell, I need to
work around the first problem at least. I have further patches thus
implementing a "slices" layer that handles multiple page sizes through slices
of the address space for use by hugetlbfs, the SPE code, and possibly others,
but it requires that serie of patches first/
There is still a potential (but not practical) issue due to the fact that
filesystems/drivers implemeting g_u_a will effectively bypass all arch checks.
This is not an issue in practice as the only filesystems/drivers using that
hook are doing so for arch specific purposes in the first place.
There is also a problem with mremap that will completely bypass all arch
checks. I'll try to address that separately, I'm not 100% certain yet how,
possibly by making it not work when the vma has a file whose f_ops has a
get_unmapped_area callback, and by making it use is_hugepage_only_range()
before expanding into a new area.
Also, I want to turn is_hugepage_only_range() into a more generic
is_normal_page_range() as that's really what it will end up meaning when used
in stack grow, brk grow and mremap.
None of the above "issues" however are introduced by this patch, they are
already there, so I think the patch can go ini for 2.6.22.
This patch:
Handle MAP_FIXED in powerpc's arch_get_unmapped_area() in all 3
implementations of it.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Acked-by: William Irwin <bill.irwin@oracle.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Richard Henderson <rth@twiddle.net>
Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
Cc: Russell King <rmk+kernel@arm.linux.org.uk>
Cc: David Howells <dhowells@redhat.com>
Cc: Andi Kleen <ak@suse.de>
Cc: "Luck, Tony" <tony.luck@intel.com>
Cc: Kyle McMartin <kyle@mcmartin.ca>
Cc: Grant Grundler <grundler@parisc-linux.org>
Cc: Matthew Wilcox <willy@debian.org>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Adam Litke <agl@us.ibm.com>
Cc: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-06 21:50:02 +00:00
|
|
|
/* Handle MAP_FIXED */
|
|
|
|
if (flags & MAP_FIXED) {
|
|
|
|
if (prepare_hugepage_range(addr, len, pgoff))
|
|
|
|
return -EINVAL;
|
|
|
|
return addr;
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
if (test_thread_flag(TIF_32BIT)) {
|
2005-08-11 06:55:21 +00:00
|
|
|
curareas = current->mm->context.low_htlb_areas;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-11-24 03:16:15 +00:00
|
|
|
/* First see if we can use the hint address */
|
|
|
|
if (addr && (htlb_check_hinted_area(addr, len) == 0)) {
|
|
|
|
areamask = LOW_ESID_MASK(addr, len);
|
|
|
|
if (open_low_hpage_areas(current->mm, areamask) == 0)
|
|
|
|
return addr;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Next see if we can map in the existing low areas */
|
2005-08-11 06:55:21 +00:00
|
|
|
addr = htlb_get_low_area(len, curareas);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (addr != -ENOMEM)
|
|
|
|
return addr;
|
|
|
|
|
2005-11-24 03:16:15 +00:00
|
|
|
/* Finally go looking for areas to open */
|
2005-08-11 06:55:21 +00:00
|
|
|
lastshift = 0;
|
|
|
|
for (areamask = LOW_ESID_MASK(0x100000000UL-len, len);
|
|
|
|
! lastshift; areamask >>=1) {
|
|
|
|
if (areamask & 1)
|
2005-04-16 22:20:36 +00:00
|
|
|
lastshift = 1;
|
|
|
|
|
2005-08-11 06:55:21 +00:00
|
|
|
addr = htlb_get_low_area(len, curareas | areamask);
|
2005-04-16 22:20:36 +00:00
|
|
|
if ((addr != -ENOMEM)
|
2005-08-11 06:55:21 +00:00
|
|
|
&& open_low_hpage_areas(current->mm, areamask) == 0)
|
2005-04-16 22:20:36 +00:00
|
|
|
return addr;
|
|
|
|
}
|
|
|
|
} else {
|
2005-08-11 06:55:21 +00:00
|
|
|
curareas = current->mm->context.high_htlb_areas;
|
|
|
|
|
2005-11-24 03:16:15 +00:00
|
|
|
/* First see if we can use the hint address */
|
|
|
|
/* We discourage 64-bit processes from doing hugepage
|
|
|
|
* mappings below 4GB (must use MAP_FIXED) */
|
|
|
|
if ((addr >= 0x100000000UL)
|
|
|
|
&& (htlb_check_hinted_area(addr, len) == 0)) {
|
|
|
|
areamask = HTLB_AREA_MASK(addr, len);
|
|
|
|
if (open_high_hpage_areas(current->mm, areamask) == 0)
|
|
|
|
return addr;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Next see if we can map in the existing high areas */
|
2005-08-11 06:55:21 +00:00
|
|
|
addr = htlb_get_high_area(len, curareas);
|
|
|
|
if (addr != -ENOMEM)
|
|
|
|
return addr;
|
|
|
|
|
2005-11-24 03:16:15 +00:00
|
|
|
/* Finally go looking for areas to open */
|
2005-08-11 06:55:21 +00:00
|
|
|
lastshift = 0;
|
|
|
|
for (areamask = HTLB_AREA_MASK(TASK_SIZE_USER64-len, len);
|
|
|
|
! lastshift; areamask >>=1) {
|
|
|
|
if (areamask & 1)
|
|
|
|
lastshift = 1;
|
|
|
|
|
|
|
|
addr = htlb_get_high_area(len, curareas | areamask);
|
|
|
|
if ((addr != -ENOMEM)
|
|
|
|
&& open_high_hpage_areas(current->mm, areamask) == 0)
|
|
|
|
return addr;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2005-08-11 06:55:21 +00:00
|
|
|
printk(KERN_DEBUG "hugetlb_get_unmapped_area() unable to open"
|
|
|
|
" enough areas\n");
|
|
|
|
return -ENOMEM;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2005-12-09 03:20:52 +00:00
|
|
|
/*
|
|
|
|
* Called by asm hashtable.S for doing lazy icache flush
|
|
|
|
*/
|
|
|
|
static unsigned int hash_huge_page_do_lazy_icache(unsigned long rflags,
|
|
|
|
pte_t pte, int trap)
|
|
|
|
{
|
|
|
|
struct page *page;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!pfn_valid(pte_pfn(pte)))
|
|
|
|
return rflags;
|
|
|
|
|
|
|
|
page = pte_page(pte);
|
|
|
|
|
|
|
|
/* page is dirty */
|
|
|
|
if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) {
|
|
|
|
if (trap == 0x400) {
|
|
|
|
for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++)
|
|
|
|
__flush_dcache_icache(page_address(page+i));
|
|
|
|
set_bit(PG_arch_1, &page->flags);
|
|
|
|
} else {
|
|
|
|
rflags |= HPTE_R_N;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return rflags;
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
int hash_huge_page(struct mm_struct *mm, unsigned long access,
|
2005-12-09 03:20:52 +00:00
|
|
|
unsigned long ea, unsigned long vsid, int local,
|
|
|
|
unsigned long trap)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
pte_t *ptep;
|
2005-11-07 00:06:55 +00:00
|
|
|
unsigned long old_pte, new_pte;
|
|
|
|
unsigned long va, rflags, pa;
|
2005-04-16 22:20:36 +00:00
|
|
|
long slot;
|
|
|
|
int err = 1;
|
|
|
|
|
|
|
|
ptep = huge_pte_offset(mm, ea);
|
|
|
|
|
|
|
|
/* Search the Linux page table for a match with va */
|
|
|
|
va = (vsid << 28) | (ea & 0x0fffffff);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If no pte found or not present, send the problem up to
|
|
|
|
* do_page_fault
|
|
|
|
*/
|
|
|
|
if (unlikely(!ptep || pte_none(*ptep)))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check the user's access rights to the page. If access should be
|
|
|
|
* prevented then send the problem up to do_page_fault.
|
|
|
|
*/
|
|
|
|
if (unlikely(access & ~pte_val(*ptep)))
|
|
|
|
goto out;
|
|
|
|
/*
|
|
|
|
* At this point, we have a pte (old_pte) which can be used to build
|
|
|
|
* or update an HPTE. There are 2 cases:
|
|
|
|
*
|
|
|
|
* 1. There is a valid (present) pte with no associated HPTE (this is
|
|
|
|
* the most common case)
|
|
|
|
* 2. There is a valid (present) pte with an associated HPTE. The
|
|
|
|
* current values of the pp bits in the HPTE prevent access
|
|
|
|
* because we are doing software DIRTY bit management and the
|
|
|
|
* page is currently not DIRTY.
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
2005-11-07 00:06:55 +00:00
|
|
|
do {
|
|
|
|
old_pte = pte_val(*ptep);
|
|
|
|
if (old_pte & _PAGE_BUSY)
|
|
|
|
goto out;
|
|
|
|
new_pte = old_pte | _PAGE_BUSY |
|
|
|
|
_PAGE_ACCESSED | _PAGE_HASHPTE;
|
|
|
|
} while(old_pte != __cmpxchg_u64((unsigned long *)ptep,
|
|
|
|
old_pte, new_pte));
|
|
|
|
|
|
|
|
rflags = 0x2 | (!(new_pte & _PAGE_RW));
|
2005-04-16 22:20:36 +00:00
|
|
|
/* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */
|
2005-11-07 00:06:55 +00:00
|
|
|
rflags |= ((new_pte & _PAGE_EXEC) ? 0 : HPTE_R_N);
|
2005-12-09 03:20:52 +00:00
|
|
|
if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
|
|
|
|
/* No CPU has hugepages but lacks no execute, so we
|
|
|
|
* don't need to worry about that case */
|
|
|
|
rflags = hash_huge_page_do_lazy_icache(rflags, __pte(old_pte),
|
|
|
|
trap);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/* Check if pte already has an hpte (case 2) */
|
2005-11-07 00:06:55 +00:00
|
|
|
if (unlikely(old_pte & _PAGE_HASHPTE)) {
|
2005-04-16 22:20:36 +00:00
|
|
|
/* There MIGHT be an HPTE for this pte */
|
|
|
|
unsigned long hash, slot;
|
|
|
|
|
2005-11-07 00:06:55 +00:00
|
|
|
hash = hpt_hash(va, HPAGE_SHIFT);
|
|
|
|
if (old_pte & _PAGE_F_SECOND)
|
2005-04-16 22:20:36 +00:00
|
|
|
hash = ~hash;
|
|
|
|
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
|
2005-11-07 00:06:55 +00:00
|
|
|
slot += (old_pte & _PAGE_F_GIX) >> 12;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-12-08 05:51:44 +00:00
|
|
|
if (ppc_md.hpte_updatepp(slot, rflags, va, mmu_huge_psize,
|
|
|
|
local) == -1)
|
2005-11-07 00:06:55 +00:00
|
|
|
old_pte &= ~_PAGE_HPTEFLAGS;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2005-11-07 00:06:55 +00:00
|
|
|
if (likely(!(old_pte & _PAGE_HASHPTE))) {
|
|
|
|
unsigned long hash = hpt_hash(va, HPAGE_SHIFT);
|
2005-04-16 22:20:36 +00:00
|
|
|
unsigned long hpte_group;
|
|
|
|
|
2005-11-07 00:06:55 +00:00
|
|
|
pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
repeat:
|
|
|
|
hpte_group = ((hash & htab_hash_mask) *
|
|
|
|
HPTES_PER_GROUP) & ~0x7UL;
|
|
|
|
|
2005-11-07 00:06:55 +00:00
|
|
|
/* clear HPTE slot informations in new PTE */
|
|
|
|
new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/* Add in WIMG bits */
|
|
|
|
/* XXX We should store these in the pte */
|
2005-11-07 00:06:55 +00:00
|
|
|
/* --BenH: I think they are ... */
|
2005-07-13 08:11:42 +00:00
|
|
|
rflags |= _PAGE_COHERENT;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-11-07 00:06:55 +00:00
|
|
|
/* Insert into the hash table, primary slot */
|
|
|
|
slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags, 0,
|
|
|
|
mmu_huge_psize);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/* Primary is full, try the secondary */
|
|
|
|
if (unlikely(slot == -1)) {
|
|
|
|
hpte_group = ((~hash & htab_hash_mask) *
|
|
|
|
HPTES_PER_GROUP) & ~0x7UL;
|
2005-11-07 00:06:55 +00:00
|
|
|
slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags,
|
2005-09-23 20:24:07 +00:00
|
|
|
HPTE_V_SECONDARY,
|
2005-11-07 00:06:55 +00:00
|
|
|
mmu_huge_psize);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (slot == -1) {
|
|
|
|
if (mftb() & 0x1)
|
2005-09-23 20:24:07 +00:00
|
|
|
hpte_group = ((hash & htab_hash_mask) *
|
|
|
|
HPTES_PER_GROUP)&~0x7UL;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
ppc_md.hpte_remove(hpte_group);
|
|
|
|
goto repeat;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (unlikely(slot == -2))
|
|
|
|
panic("hash_huge_page: pte_insert failed\n");
|
|
|
|
|
2007-01-12 00:54:39 +00:00
|
|
|
new_pte |= (slot << 12) & (_PAGE_F_SECOND | _PAGE_F_GIX);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2005-11-07 00:06:55 +00:00
|
|
|
/*
|
2005-11-23 21:37:39 +00:00
|
|
|
* No need to use ldarx/stdcx here
|
2005-11-07 00:06:55 +00:00
|
|
|
*/
|
|
|
|
*ptep = __pte(new_pte & ~_PAGE_BUSY);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
err = 0;
|
|
|
|
|
|
|
|
out:
|
|
|
|
return err;
|
|
|
|
}
|
2006-04-28 05:02:51 +00:00
|
|
|
|
2006-12-07 04:33:20 +00:00
|
|
|
static void zero_ctor(void *addr, struct kmem_cache *cache, unsigned long flags)
|
2006-04-28 05:02:51 +00:00
|
|
|
{
|
|
|
|
memset(addr, 0, kmem_cache_size(cache));
|
|
|
|
}
|
|
|
|
|
|
|
|
static int __init hugetlbpage_init(void)
|
|
|
|
{
|
|
|
|
if (!cpu_has_feature(CPU_FTR_16M_PAGE))
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
huge_pgtable_cache = kmem_cache_create("hugepte_cache",
|
|
|
|
HUGEPTE_TABLE_SIZE,
|
|
|
|
HUGEPTE_TABLE_SIZE,
|
2007-05-06 21:49:58 +00:00
|
|
|
0,
|
2006-04-28 05:02:51 +00:00
|
|
|
zero_ctor, NULL);
|
|
|
|
if (! huge_pgtable_cache)
|
|
|
|
panic("hugetlbpage_init(): could not create hugepte cache\n");
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
module_init(hugetlbpage_init);
|