forked from Minki/linux
e31cf2f4ca
Patch series "mm: consolidate definitions of page table accessors", v2. The low level page table accessors (pXY_index(), pXY_offset()) are duplicated across all architectures and sometimes more than once. For instance, we have 31 definition of pgd_offset() for 25 supported architectures. Most of these definitions are actually identical and typically it boils down to, e.g. static inline unsigned long pmd_index(unsigned long address) { return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1); } static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) { return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address); } These definitions can be shared among 90% of the arches provided XYZ_SHIFT, PTRS_PER_XYZ and xyz_page_vaddr() are defined. For architectures that really need a custom version there is always possibility to override the generic version with the usual ifdefs magic. These patches introduce include/linux/pgtable.h that replaces include/asm-generic/pgtable.h and add the definitions of the page table accessors to the new header. This patch (of 12): The linux/mm.h header includes <asm/pgtable.h> to allow inlining of the functions involving page table manipulations, e.g. pte_alloc() and pmd_alloc(). So, there is no point to explicitly include <asm/pgtable.h> in the files that include <linux/mm.h>. The include statements in such cases are remove with a simple loop: for f in $(git grep -l "include <linux/mm.h>") ; do sed -i -e '/include <asm\/pgtable.h>/ d' $f done Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Cain <bcain@codeaurora.org> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Chris Zankel <chris@zankel.net> Cc: "David S. Miller" <davem@davemloft.net> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Greentime Hu <green.hu@gmail.com> Cc: Greg Ungerer <gerg@linux-m68k.org> Cc: Guan Xuetao <gxt@pku.edu.cn> Cc: Guo Ren <guoren@kernel.org> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Helge Deller <deller@gmx.de> Cc: Ingo Molnar <mingo@redhat.com> Cc: Ley Foon Tan <ley.foon.tan@intel.com> Cc: Mark Salter <msalter@redhat.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Matt Turner <mattst88@gmail.com> Cc: Max Filippov <jcmvbkbc@gmail.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Simek <monstr@monstr.eu> Cc: Mike Rapoport <rppt@kernel.org> Cc: Nick Hu <nickhu@andestech.com> Cc: Paul Walmsley <paul.walmsley@sifive.com> Cc: Richard Weinberger <richard@nod.at> Cc: Rich Felker <dalias@libc.org> Cc: Russell King <linux@armlinux.org.uk> Cc: Stafford Horne <shorne@gmail.com> Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tony Luck <tony.luck@intel.com> Cc: Vincent Chen <deanbo422@gmail.com> Cc: Vineet Gupta <vgupta@synopsys.com> Cc: Will Deacon <will@kernel.org> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Link: http://lkml.kernel.org/r/20200514170327.31389-1-rppt@kernel.org Link: http://lkml.kernel.org/r/20200514170327.31389-2-rppt@kernel.org Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
381 lines
7.0 KiB
C
381 lines
7.0 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* linux/arch/m68k/sun3/sun3dvma.c
|
|
*
|
|
* Copyright (C) 2000 Sam Creasey
|
|
*
|
|
* Contains common routines for sun3/sun3x DVMA management.
|
|
*/
|
|
|
|
#include <linux/memblock.h>
|
|
#include <linux/init.h>
|
|
#include <linux/module.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/gfp.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/list.h>
|
|
|
|
#include <asm/page.h>
|
|
#include <asm/dvma.h>
|
|
|
|
#undef DVMA_DEBUG
|
|
|
|
#ifdef CONFIG_SUN3X
|
|
extern void dvma_unmap_iommu(unsigned long baddr, int len);
|
|
#else
|
|
static inline void dvma_unmap_iommu(unsigned long a, int b)
|
|
{
|
|
}
|
|
#endif
|
|
|
|
#ifdef CONFIG_SUN3
|
|
extern void sun3_dvma_init(void);
|
|
#endif
|
|
|
|
static unsigned long *iommu_use;
|
|
|
|
#define dvma_index(baddr) ((baddr - DVMA_START) >> DVMA_PAGE_SHIFT)
|
|
|
|
#define dvma_entry_use(baddr) (iommu_use[dvma_index(baddr)])
|
|
|
|
struct hole {
|
|
unsigned long start;
|
|
unsigned long end;
|
|
unsigned long size;
|
|
struct list_head list;
|
|
};
|
|
|
|
static struct list_head hole_list;
|
|
static struct list_head hole_cache;
|
|
static struct hole initholes[64];
|
|
|
|
#ifdef DVMA_DEBUG
|
|
|
|
static unsigned long dvma_allocs;
|
|
static unsigned long dvma_frees;
|
|
static unsigned long long dvma_alloc_bytes;
|
|
static unsigned long long dvma_free_bytes;
|
|
|
|
static void print_use(void)
|
|
{
|
|
|
|
int i;
|
|
int j = 0;
|
|
|
|
pr_info("dvma entry usage:\n");
|
|
|
|
for(i = 0; i < IOMMU_TOTAL_ENTRIES; i++) {
|
|
if(!iommu_use[i])
|
|
continue;
|
|
|
|
j++;
|
|
|
|
pr_info("dvma entry: %08x len %08lx\n",
|
|
(i << DVMA_PAGE_SHIFT) + DVMA_START, iommu_use[i]);
|
|
}
|
|
|
|
pr_info("%d entries in use total\n", j);
|
|
|
|
pr_info("allocation/free calls: %lu/%lu\n", dvma_allocs, dvma_frees);
|
|
pr_info("allocation/free bytes: %Lx/%Lx\n", dvma_alloc_bytes,
|
|
dvma_free_bytes);
|
|
}
|
|
|
|
static void print_holes(struct list_head *holes)
|
|
{
|
|
|
|
struct list_head *cur;
|
|
struct hole *hole;
|
|
|
|
pr_info("listing dvma holes\n");
|
|
list_for_each(cur, holes) {
|
|
hole = list_entry(cur, struct hole, list);
|
|
|
|
if((hole->start == 0) && (hole->end == 0) && (hole->size == 0))
|
|
continue;
|
|
|
|
pr_info("hole: start %08lx end %08lx size %08lx\n",
|
|
hole->start, hole->end, hole->size);
|
|
}
|
|
|
|
pr_info("end of hole listing...\n");
|
|
}
|
|
#endif /* DVMA_DEBUG */
|
|
|
|
static inline int refill(void)
|
|
{
|
|
|
|
struct hole *hole;
|
|
struct hole *prev = NULL;
|
|
struct list_head *cur;
|
|
int ret = 0;
|
|
|
|
list_for_each(cur, &hole_list) {
|
|
hole = list_entry(cur, struct hole, list);
|
|
|
|
if(!prev) {
|
|
prev = hole;
|
|
continue;
|
|
}
|
|
|
|
if(hole->end == prev->start) {
|
|
hole->size += prev->size;
|
|
hole->end = prev->end;
|
|
list_move(&(prev->list), &hole_cache);
|
|
ret++;
|
|
}
|
|
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
static inline struct hole *rmcache(void)
|
|
{
|
|
struct hole *ret;
|
|
|
|
if(list_empty(&hole_cache)) {
|
|
if(!refill()) {
|
|
pr_crit("out of dvma hole cache!\n");
|
|
BUG();
|
|
}
|
|
}
|
|
|
|
ret = list_entry(hole_cache.next, struct hole, list);
|
|
list_del(&(ret->list));
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
static inline unsigned long get_baddr(int len, unsigned long align)
|
|
{
|
|
|
|
struct list_head *cur;
|
|
struct hole *hole;
|
|
|
|
if(list_empty(&hole_list)) {
|
|
#ifdef DVMA_DEBUG
|
|
pr_crit("out of dvma holes! (printing hole cache)\n");
|
|
print_holes(&hole_cache);
|
|
print_use();
|
|
#endif
|
|
BUG();
|
|
}
|
|
|
|
list_for_each(cur, &hole_list) {
|
|
unsigned long newlen;
|
|
|
|
hole = list_entry(cur, struct hole, list);
|
|
|
|
if(align > DVMA_PAGE_SIZE)
|
|
newlen = len + ((hole->end - len) & (align-1));
|
|
else
|
|
newlen = len;
|
|
|
|
if(hole->size > newlen) {
|
|
hole->end -= newlen;
|
|
hole->size -= newlen;
|
|
dvma_entry_use(hole->end) = newlen;
|
|
#ifdef DVMA_DEBUG
|
|
dvma_allocs++;
|
|
dvma_alloc_bytes += newlen;
|
|
#endif
|
|
return hole->end;
|
|
} else if(hole->size == newlen) {
|
|
list_move(&(hole->list), &hole_cache);
|
|
dvma_entry_use(hole->start) = newlen;
|
|
#ifdef DVMA_DEBUG
|
|
dvma_allocs++;
|
|
dvma_alloc_bytes += newlen;
|
|
#endif
|
|
return hole->start;
|
|
}
|
|
|
|
}
|
|
|
|
pr_crit("unable to find dvma hole!\n");
|
|
BUG();
|
|
return 0;
|
|
}
|
|
|
|
static inline int free_baddr(unsigned long baddr)
|
|
{
|
|
|
|
unsigned long len;
|
|
struct hole *hole;
|
|
struct list_head *cur;
|
|
unsigned long orig_baddr;
|
|
|
|
orig_baddr = baddr;
|
|
len = dvma_entry_use(baddr);
|
|
dvma_entry_use(baddr) = 0;
|
|
baddr &= DVMA_PAGE_MASK;
|
|
dvma_unmap_iommu(baddr, len);
|
|
|
|
#ifdef DVMA_DEBUG
|
|
dvma_frees++;
|
|
dvma_free_bytes += len;
|
|
#endif
|
|
|
|
list_for_each(cur, &hole_list) {
|
|
hole = list_entry(cur, struct hole, list);
|
|
|
|
if(hole->end == baddr) {
|
|
hole->end += len;
|
|
hole->size += len;
|
|
return 0;
|
|
} else if(hole->start == (baddr + len)) {
|
|
hole->start = baddr;
|
|
hole->size += len;
|
|
return 0;
|
|
}
|
|
|
|
}
|
|
|
|
hole = rmcache();
|
|
|
|
hole->start = baddr;
|
|
hole->end = baddr + len;
|
|
hole->size = len;
|
|
|
|
// list_add_tail(&(hole->list), cur);
|
|
list_add(&(hole->list), cur);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
void __init dvma_init(void)
|
|
{
|
|
|
|
struct hole *hole;
|
|
int i;
|
|
|
|
INIT_LIST_HEAD(&hole_list);
|
|
INIT_LIST_HEAD(&hole_cache);
|
|
|
|
/* prepare the hole cache */
|
|
for(i = 0; i < 64; i++)
|
|
list_add(&(initholes[i].list), &hole_cache);
|
|
|
|
hole = rmcache();
|
|
hole->start = DVMA_START;
|
|
hole->end = DVMA_END;
|
|
hole->size = DVMA_SIZE;
|
|
|
|
list_add(&(hole->list), &hole_list);
|
|
|
|
iommu_use = memblock_alloc(IOMMU_TOTAL_ENTRIES * sizeof(unsigned long),
|
|
SMP_CACHE_BYTES);
|
|
if (!iommu_use)
|
|
panic("%s: Failed to allocate %zu bytes\n", __func__,
|
|
IOMMU_TOTAL_ENTRIES * sizeof(unsigned long));
|
|
|
|
dvma_unmap_iommu(DVMA_START, DVMA_SIZE);
|
|
|
|
#ifdef CONFIG_SUN3
|
|
sun3_dvma_init();
|
|
#endif
|
|
|
|
}
|
|
|
|
unsigned long dvma_map_align(unsigned long kaddr, int len, int align)
|
|
{
|
|
|
|
unsigned long baddr;
|
|
unsigned long off;
|
|
|
|
if(!len)
|
|
len = 0x800;
|
|
|
|
if(!kaddr || !len) {
|
|
// pr_err("error: kaddr %lx len %x\n", kaddr, len);
|
|
// *(int *)4 = 0;
|
|
return 0;
|
|
}
|
|
|
|
pr_debug("dvma_map request %08x bytes from %08lx\n", len, kaddr);
|
|
off = kaddr & ~DVMA_PAGE_MASK;
|
|
kaddr &= PAGE_MASK;
|
|
len += off;
|
|
len = ((len + (DVMA_PAGE_SIZE-1)) & DVMA_PAGE_MASK);
|
|
|
|
if(align == 0)
|
|
align = DVMA_PAGE_SIZE;
|
|
else
|
|
align = ((align + (DVMA_PAGE_SIZE-1)) & DVMA_PAGE_MASK);
|
|
|
|
baddr = get_baddr(len, align);
|
|
// pr_info("using baddr %lx\n", baddr);
|
|
|
|
if(!dvma_map_iommu(kaddr, baddr, len))
|
|
return (baddr + off);
|
|
|
|
pr_crit("dvma_map failed kaddr %lx baddr %lx len %x\n", kaddr, baddr,
|
|
len);
|
|
BUG();
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL(dvma_map_align);
|
|
|
|
void dvma_unmap(void *baddr)
|
|
{
|
|
unsigned long addr;
|
|
|
|
addr = (unsigned long)baddr;
|
|
/* check if this is a vme mapping */
|
|
if(!(addr & 0x00f00000))
|
|
addr |= 0xf00000;
|
|
|
|
free_baddr(addr);
|
|
|
|
return;
|
|
|
|
}
|
|
EXPORT_SYMBOL(dvma_unmap);
|
|
|
|
void *dvma_malloc_align(unsigned long len, unsigned long align)
|
|
{
|
|
unsigned long kaddr;
|
|
unsigned long baddr;
|
|
unsigned long vaddr;
|
|
|
|
if(!len)
|
|
return NULL;
|
|
|
|
pr_debug("dvma_malloc request %lx bytes\n", len);
|
|
len = ((len + (DVMA_PAGE_SIZE-1)) & DVMA_PAGE_MASK);
|
|
|
|
if((kaddr = __get_free_pages(GFP_ATOMIC, get_order(len))) == 0)
|
|
return NULL;
|
|
|
|
if((baddr = (unsigned long)dvma_map_align(kaddr, len, align)) == 0) {
|
|
free_pages(kaddr, get_order(len));
|
|
return NULL;
|
|
}
|
|
|
|
vaddr = dvma_btov(baddr);
|
|
|
|
if(dvma_map_cpu(kaddr, vaddr, len) < 0) {
|
|
dvma_unmap((void *)baddr);
|
|
free_pages(kaddr, get_order(len));
|
|
return NULL;
|
|
}
|
|
|
|
pr_debug("mapped %08lx bytes %08lx kern -> %08lx bus\n", len, kaddr,
|
|
baddr);
|
|
|
|
return (void *)vaddr;
|
|
|
|
}
|
|
EXPORT_SYMBOL(dvma_malloc_align);
|
|
|
|
void dvma_free(void *vaddr)
|
|
{
|
|
|
|
return;
|
|
|
|
}
|
|
EXPORT_SYMBOL(dvma_free);
|