mirror of
https://github.com/torvalds/linux.git
synced 2024-12-02 17:11:33 +00:00
e5080a9677
Every architecture that supports FLATMEM memory model defines its own version of pfn_valid() that essentially compares a pfn to max_mapnr. Use mips/powerpc version implemented as static inline as a generic implementation of pfn_valid() and drop its per-architecture definitions. [rppt@kernel.org: fix the generic pfn_valid()] Link: https://lkml.kernel.org/r/Y9lg7R1Yd931C+y5@kernel.org Link: https://lkml.kernel.org/r/20230129124235.209895-5-rppt@kernel.org Signed-off-by: Mike Rapoport (IBM) <rppt@kernel.org> Acked-by: Arnd Bergmann <arnd@arndb.de> Acked-by: Guo Ren <guoren@kernel.org> [csky] Acked-by: Huacai Chen <chenhuacai@loongson.cn> [LoongArch] Acked-by: Stafford Horne <shorne@gmail.com> [OpenRISC] Acked-by: Michael Ellerman <mpe@ellerman.id.au> [powerpc] Reviewed-by: David Hildenbrand <david@redhat.com> Tested-by: Conor Dooley <conor.dooley@microchip.com> Cc: Brian Cain <bcain@quicinc.com> Cc: "David S. Miller" <davem@davemloft.net> Cc: Dinh Nguyen <dinguyen@kernel.org> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Greg Ungerer <gerg@linux-m68k.org> Cc: Helge Deller <deller@gmx.de> Cc: Huacai Chen <chenhuacai@kernel.org> Cc: Matt Turner <mattst88@gmail.com> Cc: Max Filippov <jcmvbkbc@gmail.com> Cc: Michal Simek <monstr@monstr.eu> Cc: Palmer Dabbelt <palmer@dabbelt.com> Cc: Richard Weinberger <richard@nod.at> Cc: Rich Felker <dalias@libc.org> Cc: Russell King <linux@armlinux.org.uk> Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de> Cc: Vineet Gupta <vgupta@kernel.org> Cc: WANG Xuerui <kernel@xen0n.name> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
70 lines
1.7 KiB
C
70 lines
1.7 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef __ASM_MEMORY_MODEL_H
|
|
#define __ASM_MEMORY_MODEL_H
|
|
|
|
#include <linux/pfn.h>
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
/*
|
|
* supports 3 memory models.
|
|
*/
|
|
#if defined(CONFIG_FLATMEM)
|
|
|
|
#ifndef ARCH_PFN_OFFSET
|
|
#define ARCH_PFN_OFFSET (0UL)
|
|
#endif
|
|
|
|
#define __pfn_to_page(pfn) (mem_map + ((pfn) - ARCH_PFN_OFFSET))
|
|
#define __page_to_pfn(page) ((unsigned long)((page) - mem_map) + \
|
|
ARCH_PFN_OFFSET)
|
|
|
|
#ifndef pfn_valid
|
|
static inline int pfn_valid(unsigned long pfn)
|
|
{
|
|
/* avoid <linux/mm.h> include hell */
|
|
extern unsigned long max_mapnr;
|
|
unsigned long pfn_offset = ARCH_PFN_OFFSET;
|
|
|
|
return pfn >= pfn_offset && (pfn - pfn_offset) < max_mapnr;
|
|
}
|
|
#define pfn_valid pfn_valid
|
|
#endif
|
|
|
|
#elif defined(CONFIG_SPARSEMEM_VMEMMAP)
|
|
|
|
/* memmap is virtually contiguous. */
|
|
#define __pfn_to_page(pfn) (vmemmap + (pfn))
|
|
#define __page_to_pfn(page) (unsigned long)((page) - vmemmap)
|
|
|
|
#elif defined(CONFIG_SPARSEMEM)
|
|
/*
|
|
* Note: section's mem_map is encoded to reflect its start_pfn.
|
|
* section[i].section_mem_map == mem_map's address - start_pfn;
|
|
*/
|
|
#define __page_to_pfn(pg) \
|
|
({ const struct page *__pg = (pg); \
|
|
int __sec = page_to_section(__pg); \
|
|
(unsigned long)(__pg - __section_mem_map_addr(__nr_to_section(__sec))); \
|
|
})
|
|
|
|
#define __pfn_to_page(pfn) \
|
|
({ unsigned long __pfn = (pfn); \
|
|
struct mem_section *__sec = __pfn_to_section(__pfn); \
|
|
__section_mem_map_addr(__sec) + __pfn; \
|
|
})
|
|
#endif /* CONFIG_FLATMEM/SPARSEMEM */
|
|
|
|
/*
|
|
* Convert a physical address to a Page Frame Number and back
|
|
*/
|
|
#define __phys_to_pfn(paddr) PHYS_PFN(paddr)
|
|
#define __pfn_to_phys(pfn) PFN_PHYS(pfn)
|
|
|
|
#define page_to_pfn __page_to_pfn
|
|
#define pfn_to_page __pfn_to_page
|
|
|
|
#endif /* __ASSEMBLY__ */
|
|
|
|
#endif
|