forked from Minki/linux
powerpc/mm/nohash: Remove pte fragment dependency from nohash
Now that we have removed 64K page size support, the RCU page table free can be much simpler for nohash. Make a copy of the the rcu callback to pgalloc.h header similar to nohash 32. We could possibly merge 32 and 64 bit there. But that is for a later patch We also move the book3s specific handler to pgtable_book3s64.c. This will be updated in a later patch to handle split pmd ptlock. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:
parent
7820856a4f
commit
702346768c
@ -84,6 +84,18 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
|
||||
|
||||
#define pmd_pgtable(pmd) pmd_page(pmd)
|
||||
|
||||
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
|
||||
{
|
||||
return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX),
|
||||
pgtable_gfp_flags(mm, GFP_KERNEL));
|
||||
}
|
||||
|
||||
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
|
||||
{
|
||||
kmem_cache_free(PGT_CACHE(PMD_CACHE_INDEX), pmd);
|
||||
}
|
||||
|
||||
|
||||
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
|
||||
unsigned long address)
|
||||
{
|
||||
@ -118,10 +130,42 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
|
||||
__free_page(ptepage);
|
||||
}
|
||||
|
||||
extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift);
|
||||
static inline void pgtable_free(void *table, int shift)
|
||||
{
|
||||
if (!shift) {
|
||||
pgtable_page_dtor(table);
|
||||
free_page((unsigned long)table);
|
||||
} else {
|
||||
BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
|
||||
kmem_cache_free(PGT_CACHE(shift), table);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
extern void __tlb_remove_table(void *_table);
|
||||
static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
|
||||
{
|
||||
unsigned long pgf = (unsigned long)table;
|
||||
|
||||
BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
|
||||
pgf |= shift;
|
||||
tlb_remove_table(tlb, (void *)pgf);
|
||||
}
|
||||
|
||||
static inline void __tlb_remove_table(void *_table)
|
||||
{
|
||||
void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
|
||||
unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
|
||||
|
||||
pgtable_free(table, shift);
|
||||
}
|
||||
|
||||
#else
|
||||
static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
|
||||
{
|
||||
pgtable_free(table, shift);
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
|
||||
unsigned long address)
|
||||
{
|
||||
@ -129,17 +173,6 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
|
||||
pgtable_free_tlb(tlb, page_address(table), 0);
|
||||
}
|
||||
|
||||
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
|
||||
{
|
||||
return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX),
|
||||
pgtable_gfp_flags(mm, GFP_KERNEL));
|
||||
}
|
||||
|
||||
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
|
||||
{
|
||||
kmem_cache_free(PGT_CACHE(PMD_CACHE_INDEX), pmd);
|
||||
}
|
||||
|
||||
#define __pmd_free_tlb(tlb, pmd, addr) \
|
||||
pgtable_free_tlb(tlb, pmd, PMD_CACHE_INDEX)
|
||||
#ifndef CONFIG_PPC_64K_PAGES
|
||||
|
@ -225,3 +225,117 @@ void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0,
|
||||
asm volatile("eieio; tlbsync; ptesync" : : : "memory");
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mmu_partition_table_set_entry);
|
||||
#ifdef CONFIG_PPC_64K_PAGES
|
||||
static pte_t *get_pte_from_cache(struct mm_struct *mm)
|
||||
{
|
||||
void *pte_frag, *ret;
|
||||
|
||||
spin_lock(&mm->page_table_lock);
|
||||
ret = mm->context.pte_frag;
|
||||
if (ret) {
|
||||
pte_frag = ret + PTE_FRAG_SIZE;
|
||||
/*
|
||||
* If we have taken up all the fragments mark PTE page NULL
|
||||
*/
|
||||
if (((unsigned long)pte_frag & ~PAGE_MASK) == 0)
|
||||
pte_frag = NULL;
|
||||
mm->context.pte_frag = pte_frag;
|
||||
}
|
||||
spin_unlock(&mm->page_table_lock);
|
||||
return (pte_t *)ret;
|
||||
}
|
||||
|
||||
static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel)
|
||||
{
|
||||
void *ret = NULL;
|
||||
struct page *page;
|
||||
|
||||
if (!kernel) {
|
||||
page = alloc_page(PGALLOC_GFP | __GFP_ACCOUNT);
|
||||
if (!page)
|
||||
return NULL;
|
||||
if (!pgtable_page_ctor(page)) {
|
||||
__free_page(page);
|
||||
return NULL;
|
||||
}
|
||||
} else {
|
||||
page = alloc_page(PGALLOC_GFP);
|
||||
if (!page)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ret = page_address(page);
|
||||
spin_lock(&mm->page_table_lock);
|
||||
/*
|
||||
* If we find pgtable_page set, we return
|
||||
* the allocated page with single fragement
|
||||
* count.
|
||||
*/
|
||||
if (likely(!mm->context.pte_frag)) {
|
||||
set_page_count(page, PTE_FRAG_NR);
|
||||
mm->context.pte_frag = ret + PTE_FRAG_SIZE;
|
||||
}
|
||||
spin_unlock(&mm->page_table_lock);
|
||||
|
||||
return (pte_t *)ret;
|
||||
}
|
||||
|
||||
pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel)
|
||||
{
|
||||
pte_t *pte;
|
||||
|
||||
pte = get_pte_from_cache(mm);
|
||||
if (pte)
|
||||
return pte;
|
||||
|
||||
return __alloc_for_ptecache(mm, kernel);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_PPC_64K_PAGES */
|
||||
|
||||
void pte_fragment_free(unsigned long *table, int kernel)
|
||||
{
|
||||
struct page *page = virt_to_page(table);
|
||||
|
||||
if (put_page_testzero(page)) {
|
||||
if (!kernel)
|
||||
pgtable_page_dtor(page);
|
||||
free_unref_page(page);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
|
||||
{
|
||||
unsigned long pgf = (unsigned long)table;
|
||||
|
||||
BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
|
||||
pgf |= shift;
|
||||
tlb_remove_table(tlb, (void *)pgf);
|
||||
}
|
||||
|
||||
void __tlb_remove_table(void *_table)
|
||||
{
|
||||
void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
|
||||
unsigned int shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
|
||||
|
||||
if (!shift)
|
||||
/* PTE page needs special handling */
|
||||
pte_fragment_free(table, 0);
|
||||
else {
|
||||
BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
|
||||
kmem_cache_free(PGT_CACHE(shift), table);
|
||||
}
|
||||
}
|
||||
#else
|
||||
void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
|
||||
{
|
||||
if (!shift) {
|
||||
/* PTE page needs special handling */
|
||||
pte_fragment_free(table, 0);
|
||||
} else {
|
||||
BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
|
||||
kmem_cache_free(PGT_CACHE(shift), table);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@ -313,120 +313,6 @@ struct page *pmd_page(pmd_t pmd)
|
||||
return virt_to_page(pmd_page_vaddr(pmd));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PPC_64K_PAGES
|
||||
static pte_t *get_pte_from_cache(struct mm_struct *mm)
|
||||
{
|
||||
void *pte_frag, *ret;
|
||||
|
||||
spin_lock(&mm->page_table_lock);
|
||||
ret = mm->context.pte_frag;
|
||||
if (ret) {
|
||||
pte_frag = ret + PTE_FRAG_SIZE;
|
||||
/*
|
||||
* If we have taken up all the fragments mark PTE page NULL
|
||||
*/
|
||||
if (((unsigned long)pte_frag & ~PAGE_MASK) == 0)
|
||||
pte_frag = NULL;
|
||||
mm->context.pte_frag = pte_frag;
|
||||
}
|
||||
spin_unlock(&mm->page_table_lock);
|
||||
return (pte_t *)ret;
|
||||
}
|
||||
|
||||
static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel)
|
||||
{
|
||||
void *ret = NULL;
|
||||
struct page *page;
|
||||
|
||||
if (!kernel) {
|
||||
page = alloc_page(PGALLOC_GFP | __GFP_ACCOUNT);
|
||||
if (!page)
|
||||
return NULL;
|
||||
if (!pgtable_page_ctor(page)) {
|
||||
__free_page(page);
|
||||
return NULL;
|
||||
}
|
||||
} else {
|
||||
page = alloc_page(PGALLOC_GFP);
|
||||
if (!page)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ret = page_address(page);
|
||||
spin_lock(&mm->page_table_lock);
|
||||
/*
|
||||
* If we find pgtable_page set, we return
|
||||
* the allocated page with single fragement
|
||||
* count.
|
||||
*/
|
||||
if (likely(!mm->context.pte_frag)) {
|
||||
set_page_count(page, PTE_FRAG_NR);
|
||||
mm->context.pte_frag = ret + PTE_FRAG_SIZE;
|
||||
}
|
||||
spin_unlock(&mm->page_table_lock);
|
||||
|
||||
return (pte_t *)ret;
|
||||
}
|
||||
|
||||
pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel)
|
||||
{
|
||||
pte_t *pte;
|
||||
|
||||
pte = get_pte_from_cache(mm);
|
||||
if (pte)
|
||||
return pte;
|
||||
|
||||
return __alloc_for_ptecache(mm, kernel);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_PPC_64K_PAGES */
|
||||
|
||||
void pte_fragment_free(unsigned long *table, int kernel)
|
||||
{
|
||||
struct page *page = virt_to_page(table);
|
||||
if (put_page_testzero(page)) {
|
||||
if (!kernel)
|
||||
pgtable_page_dtor(page);
|
||||
free_unref_page(page);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
|
||||
{
|
||||
unsigned long pgf = (unsigned long)table;
|
||||
|
||||
BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
|
||||
pgf |= shift;
|
||||
tlb_remove_table(tlb, (void *)pgf);
|
||||
}
|
||||
|
||||
void __tlb_remove_table(void *_table)
|
||||
{
|
||||
void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
|
||||
unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
|
||||
|
||||
if (!shift)
|
||||
/* PTE page needs special handling */
|
||||
pte_fragment_free(table, 0);
|
||||
else {
|
||||
BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
|
||||
kmem_cache_free(PGT_CACHE(shift), table);
|
||||
}
|
||||
}
|
||||
#else
|
||||
void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
|
||||
{
|
||||
if (!shift) {
|
||||
/* PTE page needs special handling */
|
||||
pte_fragment_free(table, 0);
|
||||
} else {
|
||||
BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
|
||||
kmem_cache_free(PGT_CACHE(shift), table);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_STRICT_KERNEL_RWX
|
||||
void mark_rodata_ro(void)
|
||||
{
|
||||
|
Loading…
Reference in New Issue
Block a user