2019-05-27 06:55:01 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2005-09-26 06:04:21 +00:00
|
|
|
/*
|
|
|
|
* PowerPC version
|
|
|
|
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
|
|
|
|
*
|
|
|
|
* Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
|
|
|
|
* and Cort Dougan (PReP) (cort@cs.nmt.edu)
|
|
|
|
* Copyright (C) 1996 Paul Mackerras
|
|
|
|
* PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
|
|
|
|
*
|
|
|
|
* Derived from "arch/i386/mm/init.c"
|
|
|
|
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
|
|
|
|
*/
|
|
|
|
|
2018-10-30 22:09:49 +00:00
|
|
|
#include <linux/memblock.h>
|
2005-09-26 06:04:21 +00:00
|
|
|
#include <linux/highmem.h>
|
2007-05-08 09:25:00 +00:00
|
|
|
#include <linux/suspend.h>
|
2019-10-14 18:31:03 +00:00
|
|
|
#include <linux/dma-direct.h>
|
2005-09-26 06:04:21 +00:00
|
|
|
|
|
|
|
#include <asm/machdep.h>
|
2011-12-02 12:26:23 +00:00
|
|
|
#include <asm/rtas.h>
|
2020-01-14 17:54:00 +00:00
|
|
|
#include <asm/kasan.h>
|
2020-08-18 22:11:26 +00:00
|
|
|
#include <asm/svm.h>
|
2021-09-13 15:17:26 +00:00
|
|
|
#include <asm/mmzone.h>
|
2005-09-26 06:04:21 +00:00
|
|
|
|
2019-03-29 09:59:59 +00:00
|
|
|
#include <mm/mmu_decl.h>
|
2005-09-26 06:04:21 +00:00
|
|
|
|
2012-08-21 01:42:33 +00:00
|
|
|
unsigned long long memory_limit;
|
2005-10-06 02:23:33 +00:00
|
|
|
|
2021-06-07 10:56:04 +00:00
|
|
|
unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
|
|
|
|
EXPORT_SYMBOL(empty_zero_page);
|
|
|
|
|
2005-10-29 00:46:18 +00:00
|
|
|
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
|
2005-09-26 06:04:21 +00:00
|
|
|
unsigned long size, pgprot_t vma_prot)
|
|
|
|
{
|
|
|
|
if (ppc_md.phys_mem_access_prot)
|
2005-10-29 00:46:18 +00:00
|
|
|
return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
|
2005-09-26 06:04:21 +00:00
|
|
|
|
2005-10-29 00:46:18 +00:00
|
|
|
if (!page_is_ram(pfn))
|
2008-12-18 19:13:51 +00:00
|
|
|
vma_prot = pgprot_noncached(vma_prot);
|
|
|
|
|
2005-09-26 06:04:21 +00:00
|
|
|
return vma_prot;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(phys_mem_access_prot);
|
|
|
|
|
2005-10-31 02:37:12 +00:00
|
|
|
#ifdef CONFIG_MEMORY_HOTPLUG
|
2021-02-19 16:56:48 +00:00
|
|
|
static DEFINE_MUTEX(linear_mapping_mutex);
|
2005-10-31 02:37:12 +00:00
|
|
|
|
2006-06-27 09:53:30 +00:00
|
|
|
#ifdef CONFIG_NUMA
|
|
|
|
int memory_add_physaddr_to_nid(u64 start)
|
|
|
|
{
|
|
|
|
return hot_add_scn_to_nid(start);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2020-04-10 21:33:32 +00:00
|
|
|
int __weak create_section_mapping(unsigned long start, unsigned long end,
|
|
|
|
int nid, pgprot_t prot)
|
2016-07-05 05:07:54 +00:00
|
|
|
{
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
|
|
|
int __weak remove_section_mapping(unsigned long start, unsigned long end)
|
|
|
|
{
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
2020-11-11 14:53:17 +00:00
|
|
|
int __ref arch_create_linear_mapping(int nid, u64 start, u64 size,
|
|
|
|
struct mhp_params *params)
|
2005-10-31 02:37:12 +00:00
|
|
|
{
|
2016-02-09 03:32:42 +00:00
|
|
|
int rc;
|
2005-10-31 02:37:12 +00:00
|
|
|
|
2006-03-22 07:00:05 +00:00
|
|
|
start = (unsigned long)__va(start);
|
2020-11-11 14:53:18 +00:00
|
|
|
mutex_lock(&linear_mapping_mutex);
|
mm/memory_hotplug: add pgprot_t to mhp_params
devm_memremap_pages() is currently used by the PCI P2PDMA code to create
struct page mappings for IO memory. At present, these mappings are
created with PAGE_KERNEL which implies setting the PAT bits to be WB.
However, on x86, an mtrr register will typically override this and force
the cache type to be UC-. In the case firmware doesn't set this
register it is effectively WB and will typically result in a machine
check exception when it's accessed.
Other arches are not currently likely to function correctly seeing they
don't have any MTRR registers to fall back on.
To solve this, provide a way to specify the pgprot value explicitly to
arch_add_memory().
Of the arches that support MEMORY_HOTPLUG: x86_64, and arm64 need a
simple change to pass the pgprot_t down to their respective functions
which set up the page tables. For x86_32, set the page tables
explicitly using _set_memory_prot() (seeing they are already mapped).
For ia64, s390 and sh, reject anything but PAGE_KERNEL settings -- this
should be fine, for now, seeing these architectures don't support
ZONE_DEVICE.
A check in __add_pages() is also added to ensure the pgprot parameter
was set for all arches.
Signed-off-by: Logan Gunthorpe <logang@deltatee.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Acked-by: David Hildenbrand <david@redhat.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Acked-by: Dan Williams <dan.j.williams@intel.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Eric Badger <ebadger@gigaio.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will@kernel.org>
Link: http://lkml.kernel.org/r/20200306170846.9333-7-logang@deltatee.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2020-04-10 21:33:36 +00:00
|
|
|
rc = create_section_mapping(start, start + size, nid,
|
|
|
|
params->pgprot);
|
2020-11-11 14:53:18 +00:00
|
|
|
mutex_unlock(&linear_mapping_mutex);
|
2016-02-09 03:32:42 +00:00
|
|
|
if (rc) {
|
2020-11-11 14:53:17 +00:00
|
|
|
pr_warn("Unable to create linear mapping for 0x%llx..0x%llx: %d\n",
|
2016-02-09 03:32:42 +00:00
|
|
|
start, start + size, rc);
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
2020-11-11 14:53:17 +00:00
|
|
|
return 0;
|
2005-10-31 02:37:12 +00:00
|
|
|
}
|
2013-02-23 00:32:58 +00:00
|
|
|
|
2020-11-11 14:53:17 +00:00
|
|
|
void __ref arch_remove_linear_mapping(u64 start, u64 size)
|
2013-02-23 00:32:58 +00:00
|
|
|
{
|
2014-01-27 16:54:06 +00:00
|
|
|
int ret;
|
2013-02-23 00:32:58 +00:00
|
|
|
|
2014-10-14 11:17:47 +00:00
|
|
|
/* Remove htab bolted mappings for this section of memory */
|
|
|
|
start = (unsigned long)__va(start);
|
2019-11-04 02:32:57 +00:00
|
|
|
|
2020-11-11 14:53:18 +00:00
|
|
|
mutex_lock(&linear_mapping_mutex);
|
2014-10-14 11:17:47 +00:00
|
|
|
ret = remove_section_mapping(start, start + size);
|
2020-11-11 14:53:18 +00:00
|
|
|
mutex_unlock(&linear_mapping_mutex);
|
2020-11-11 14:53:19 +00:00
|
|
|
if (ret)
|
|
|
|
pr_warn("Unable to remove linear mapping for 0x%llx..0x%llx: %d\n",
|
|
|
|
start, start + size, ret);
|
2014-10-14 11:17:47 +00:00
|
|
|
|
|
|
|
/* Ensure all vmalloc mappings are flushed in case they also
|
|
|
|
* hit that section of memory
|
|
|
|
*/
|
|
|
|
vm_unmap_aliases();
|
2013-02-23 00:32:58 +00:00
|
|
|
}
|
2020-11-11 14:53:17 +00:00
|
|
|
|
|
|
|
int __ref arch_add_memory(int nid, u64 start, u64 size,
|
|
|
|
struct mhp_params *params)
|
|
|
|
{
|
|
|
|
unsigned long start_pfn = start >> PAGE_SHIFT;
|
|
|
|
unsigned long nr_pages = size >> PAGE_SHIFT;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
rc = arch_create_linear_mapping(nid, start, size, params);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
2020-11-11 14:53:21 +00:00
|
|
|
rc = __add_pages(nid, start_pfn, nr_pages, params);
|
|
|
|
if (rc)
|
|
|
|
arch_remove_linear_mapping(start, size);
|
|
|
|
return rc;
|
2020-11-11 14:53:17 +00:00
|
|
|
}
|
|
|
|
|
2021-09-08 02:55:04 +00:00
|
|
|
void __ref arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
|
2020-11-11 14:53:17 +00:00
|
|
|
{
|
|
|
|
unsigned long start_pfn = start >> PAGE_SHIFT;
|
|
|
|
unsigned long nr_pages = size >> PAGE_SHIFT;
|
|
|
|
|
|
|
|
__remove_pages(start_pfn, nr_pages, altmap);
|
|
|
|
arch_remove_linear_mapping(start, size);
|
|
|
|
}
|
2013-02-23 00:32:58 +00:00
|
|
|
#endif
|
2008-02-05 08:10:18 +00:00
|
|
|
|
2021-06-29 02:43:01 +00:00
|
|
|
#ifndef CONFIG_NUMA
|
2018-02-13 15:08:16 +00:00
|
|
|
void __init mem_topology_setup(void)
|
2005-10-06 02:23:33 +00:00
|
|
|
{
|
2010-07-12 04:36:09 +00:00
|
|
|
max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
|
2014-09-17 12:15:33 +00:00
|
|
|
min_low_pfn = MEMORY_START >> PAGE_SHIFT;
|
2005-10-06 02:23:33 +00:00
|
|
|
#ifdef CONFIG_HIGHMEM
|
2008-04-15 19:52:22 +00:00
|
|
|
max_low_pfn = lowmem_end_addr >> PAGE_SHIFT;
|
2005-10-06 02:23:33 +00:00
|
|
|
#endif
|
|
|
|
|
2013-01-09 12:40:18 +00:00
|
|
|
/* Place all memblock_regions in the same node and merge contiguous
|
|
|
|
* memblock_regions
|
|
|
|
*/
|
2018-06-14 22:28:02 +00:00
|
|
|
memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
|
2018-02-13 15:08:16 +00:00
|
|
|
}
|
2006-09-27 08:49:49 +00:00
|
|
|
|
2018-02-13 15:08:16 +00:00
|
|
|
void __init initmem_init(void)
|
|
|
|
{
|
2014-09-17 12:15:36 +00:00
|
|
|
sparse_init();
|
2005-10-06 02:23:33 +00:00
|
|
|
}
|
|
|
|
|
2007-05-08 09:25:00 +00:00
|
|
|
/* mark pages that don't exist as nosave */
|
|
|
|
static int __init mark_nonram_nosave(void)
|
|
|
|
{
|
2020-10-13 23:58:03 +00:00
|
|
|
unsigned long spfn, epfn, prev = 0;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for_each_mem_pfn_range(i, MAX_NUMNODES, &spfn, &epfn, NULL) {
|
|
|
|
if (prev && prev < spfn)
|
|
|
|
register_nosave_region(prev, spfn);
|
|
|
|
|
|
|
|
prev = epfn;
|
2007-05-08 09:25:00 +00:00
|
|
|
}
|
2020-10-13 23:58:03 +00:00
|
|
|
|
2007-05-08 09:25:00 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2021-06-29 02:43:01 +00:00
|
|
|
#else /* CONFIG_NUMA */
|
2014-09-18 19:05:02 +00:00
|
|
|
static int __init mark_nonram_nosave(void)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif
|
2007-05-08 09:25:00 +00:00
|
|
|
|
2014-08-08 23:40:42 +00:00
|
|
|
/*
|
2018-12-16 16:53:49 +00:00
|
|
|
* Zones usage:
|
|
|
|
*
|
|
|
|
* We setup ZONE_DMA to be 31-bits on all platforms and ZONE_NORMAL to be
|
|
|
|
* everything else. GFP_DMA32 page allocations automatically fall back to
|
|
|
|
* ZONE_DMA.
|
|
|
|
*
|
2019-10-14 18:31:03 +00:00
|
|
|
* By using 31-bit unconditionally, we can exploit zone_dma_bits to inform the
|
|
|
|
* generic DMA mapping code. 32-bit only devices (if not handled by an IOMMU
|
|
|
|
* anyway) will take a first dip into ZONE_NORMAL and get otherwise served by
|
|
|
|
* ZONE_DMA.
|
2014-08-08 23:40:42 +00:00
|
|
|
*/
|
2018-12-16 16:53:49 +00:00
|
|
|
static unsigned long max_zone_pfns[MAX_NR_ZONES];
|
2014-08-08 23:40:42 +00:00
|
|
|
|
2005-10-06 02:23:33 +00:00
|
|
|
/*
|
|
|
|
* paging_init() sets up the page tables - in fact we've already done this.
|
|
|
|
*/
|
|
|
|
void __init paging_init(void)
|
|
|
|
{
|
2011-07-04 18:44:19 +00:00
|
|
|
unsigned long long total_ram = memblock_phys_mem_size();
|
2010-07-12 04:36:09 +00:00
|
|
|
phys_addr_t top_of_ram = memblock_end_of_DRAM();
|
2005-10-06 02:23:33 +00:00
|
|
|
|
|
|
|
#ifdef CONFIG_HIGHMEM
|
2019-11-28 07:59:22 +00:00
|
|
|
unsigned long v = __fix_to_virt(FIX_KMAP_END);
|
|
|
|
unsigned long end = __fix_to_virt(FIX_KMAP_BEGIN);
|
2008-04-23 13:05:20 +00:00
|
|
|
|
|
|
|
for (; v < end; v += PAGE_SIZE)
|
2018-10-09 13:51:45 +00:00
|
|
|
map_kernel_page(v, 0, __pgprot(0)); /* XXX gross */
|
2008-04-23 13:05:20 +00:00
|
|
|
|
2018-10-09 13:51:45 +00:00
|
|
|
map_kernel_page(PKMAP_BASE, 0, __pgprot(0)); /* XXX gross */
|
2008-04-23 13:05:20 +00:00
|
|
|
pkmap_page_table = virt_to_kpte(PKMAP_BASE);
|
2005-10-06 02:23:33 +00:00
|
|
|
#endif /* CONFIG_HIGHMEM */
|
|
|
|
|
2011-07-04 18:44:19 +00:00
|
|
|
printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%llx\n",
|
2008-07-31 03:51:42 +00:00
|
|
|
(unsigned long long)top_of_ram, total_ram);
|
2006-04-12 20:25:01 +00:00
|
|
|
printk(KERN_DEBUG "Memory hole size: %ldMB\n",
|
2008-07-09 15:09:23 +00:00
|
|
|
(long int)((top_of_ram - total_ram) >> 20));
|
2014-08-08 23:40:42 +00:00
|
|
|
|
2019-10-14 18:31:03 +00:00
|
|
|
/*
|
|
|
|
* Allow 30-bit DMA for very limited Broadcom wifi chips on many
|
|
|
|
* powerbooks.
|
|
|
|
*/
|
|
|
|
if (IS_ENABLED(CONFIG_PPC32))
|
|
|
|
zone_dma_bits = 30;
|
|
|
|
else
|
|
|
|
zone_dma_bits = 31;
|
|
|
|
|
2018-12-16 16:53:49 +00:00
|
|
|
#ifdef CONFIG_ZONE_DMA
|
2019-06-13 08:24:46 +00:00
|
|
|
max_zone_pfns[ZONE_DMA] = min(max_low_pfn,
|
2019-10-14 18:31:03 +00:00
|
|
|
1UL << (zone_dma_bits - PAGE_SHIFT));
|
2018-12-16 16:53:49 +00:00
|
|
|
#endif
|
|
|
|
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
|
2005-10-06 02:23:33 +00:00
|
|
|
#ifdef CONFIG_HIGHMEM
|
2018-12-16 16:53:49 +00:00
|
|
|
max_zone_pfns[ZONE_HIGHMEM] = max_pfn;
|
2006-09-27 08:49:49 +00:00
|
|
|
#endif
|
2018-12-16 16:53:49 +00:00
|
|
|
|
2020-06-03 22:57:10 +00:00
|
|
|
free_area_init(max_zone_pfns);
|
2007-05-08 09:25:00 +00:00
|
|
|
|
|
|
|
mark_nonram_nosave();
|
2005-10-06 02:23:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void __init mem_init(void)
|
|
|
|
{
|
2013-10-12 00:22:38 +00:00
|
|
|
/*
|
|
|
|
* book3s is limited to 16 page sizes due to encoding this in
|
|
|
|
* a 4-bit field for slices.
|
|
|
|
*/
|
|
|
|
BUILD_BUG_ON(MMU_PAGE_COUNT > 16);
|
|
|
|
|
2010-03-16 13:16:25 +00:00
|
|
|
#ifdef CONFIG_SWIOTLB
|
2019-12-04 12:35:24 +00:00
|
|
|
/*
|
|
|
|
* Some platforms (e.g. 85xx) limit DMA-able memory way below
|
|
|
|
* 4G. We force memblock to bottom-up mode to ensure that the
|
|
|
|
* memory allocated in swiotlb_init() is DMA-able.
|
|
|
|
* As it's the last memblock allocation, no need to reset it
|
|
|
|
* back to to-down.
|
|
|
|
*/
|
|
|
|
memblock_set_bottom_up(true);
|
2020-08-18 22:11:26 +00:00
|
|
|
if (is_secure_guest())
|
|
|
|
svm_swiotlb_init();
|
|
|
|
else
|
|
|
|
swiotlb_init(0);
|
2010-03-16 13:16:25 +00:00
|
|
|
#endif
|
|
|
|
|
2005-10-06 02:23:33 +00:00
|
|
|
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
|
2021-10-12 10:40:37 +00:00
|
|
|
set_max_mapnr(max_low_pfn);
|
2020-01-14 17:54:00 +00:00
|
|
|
|
|
|
|
kasan_late_init();
|
|
|
|
|
2018-10-30 22:09:30 +00:00
|
|
|
memblock_free_all();
|
2005-10-06 02:23:33 +00:00
|
|
|
|
|
|
|
#ifdef CONFIG_HIGHMEM
|
|
|
|
{
|
|
|
|
unsigned long pfn, highmem_mapnr;
|
|
|
|
|
2008-04-15 19:52:22 +00:00
|
|
|
highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT;
|
2005-10-06 02:23:33 +00:00
|
|
|
for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
|
2011-06-28 09:54:46 +00:00
|
|
|
phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT;
|
2005-10-06 02:23:33 +00:00
|
|
|
struct page *page = pfn_to_page(pfn);
|
2013-07-03 22:04:09 +00:00
|
|
|
if (!memblock_is_reserved(paddr))
|
|
|
|
free_highmem_page(page);
|
2005-10-06 02:23:33 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_HIGHMEM */
|
|
|
|
|
2011-06-28 19:54:47 +00:00
|
|
|
#if defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_SMP)
|
|
|
|
/*
|
|
|
|
* If smp is enabled, next_tlbcam_idx is initialized in the cpu up
|
|
|
|
* functions.... do it here for the non-smp case.
|
|
|
|
*/
|
|
|
|
per_cpu(next_tlbcam_idx, smp_processor_id()) =
|
|
|
|
(mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
|
|
|
|
#endif
|
|
|
|
|
2009-05-27 03:44:50 +00:00
|
|
|
#ifdef CONFIG_PPC32
|
|
|
|
pr_info("Kernel virtual memory layout:\n");
|
2019-04-26 16:23:32 +00:00
|
|
|
#ifdef CONFIG_KASAN
|
|
|
|
pr_info(" * 0x%08lx..0x%08lx : kasan shadow mem\n",
|
|
|
|
KASAN_SHADOW_START, KASAN_SHADOW_END);
|
|
|
|
#endif
|
2009-05-27 03:44:50 +00:00
|
|
|
pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP);
|
|
|
|
#ifdef CONFIG_HIGHMEM
|
|
|
|
pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n",
|
|
|
|
PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP));
|
|
|
|
#endif /* CONFIG_HIGHMEM */
|
powerpc/mm: don't display empty early ioremap area
On the 8xx, the layout displayed at boot is:
[ 0.000000] Memory: 121856K/131072K available (5728K kernel code, 592K rwdata, 1248K rodata, 560K init, 448K bss, 9216K reserved, 0K cma-reserved)
[ 0.000000] Kernel virtual memory layout:
[ 0.000000] * 0xffefc000..0xffffc000 : fixmap
[ 0.000000] * 0xffefc000..0xffefc000 : early ioremap
[ 0.000000] * 0xc9000000..0xffefc000 : vmalloc & ioremap
[ 0.000000] SLUB: HWalign=16, Order=0-3, MinObjects=0, CPUs=1, Nodes=1
Remove display of an empty early ioremap.
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/f6267226038cb25a839b567319e240576e3f8565.1565793287.git.christophe.leroy@c-s.fr
2019-08-14 14:36:10 +00:00
|
|
|
if (ioremap_bot != IOREMAP_TOP)
|
|
|
|
pr_info(" * 0x%08lx..0x%08lx : early ioremap\n",
|
|
|
|
ioremap_bot, IOREMAP_TOP);
|
2009-05-27 03:44:50 +00:00
|
|
|
pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n",
|
|
|
|
VMALLOC_START, VMALLOC_END);
|
2021-06-11 19:08:54 +00:00
|
|
|
#ifdef MODULES_VADDR
|
|
|
|
pr_info(" * 0x%08lx..0x%08lx : modules\n",
|
|
|
|
MODULES_VADDR, MODULES_END);
|
|
|
|
#endif
|
2009-05-27 03:44:50 +00:00
|
|
|
#endif /* CONFIG_PPC32 */
|
2005-10-06 02:23:33 +00:00
|
|
|
}
|
|
|
|
|
2011-06-18 07:36:39 +00:00
|
|
|
void free_initmem(void)
|
|
|
|
{
|
2011-06-18 07:36:40 +00:00
|
|
|
ppc_md.progress = ppc_printk_progress;
|
2017-07-14 06:51:23 +00:00
|
|
|
mark_initmem_nx();
|
2013-04-29 22:06:47 +00:00
|
|
|
free_initmem_default(POISON_FREE_INITMEM);
|
2011-06-18 07:36:39 +00:00
|
|
|
}
|
|
|
|
|
2011-11-02 14:56:12 +00:00
|
|
|
/*
|
|
|
|
* System memory should not be in /proc/iomem but various tools expect it
|
|
|
|
* (eg kdump).
|
|
|
|
*/
|
2013-09-15 09:39:36 +00:00
|
|
|
static int __init add_system_ram_resources(void)
|
2011-11-02 14:56:12 +00:00
|
|
|
{
|
2020-10-13 23:58:08 +00:00
|
|
|
phys_addr_t start, end;
|
|
|
|
u64 i;
|
2011-11-02 14:56:12 +00:00
|
|
|
|
2020-10-13 23:58:08 +00:00
|
|
|
for_each_mem_range(i, &start, &end) {
|
2011-11-02 14:56:12 +00:00
|
|
|
struct resource *res;
|
|
|
|
|
|
|
|
res = kzalloc(sizeof(struct resource), GFP_KERNEL);
|
|
|
|
WARN_ON(!res);
|
|
|
|
|
|
|
|
if (res) {
|
|
|
|
res->name = "System RAM";
|
2020-10-13 23:58:08 +00:00
|
|
|
res->start = start;
|
|
|
|
/*
|
|
|
|
* In memblock, end points to the first byte after
|
|
|
|
* the range while in resourses, end points to the
|
|
|
|
* last byte in the range.
|
|
|
|
*/
|
|
|
|
res->end = end - 1;
|
2016-01-26 20:57:22 +00:00
|
|
|
res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
|
2011-11-02 14:56:12 +00:00
|
|
|
WARN_ON(request_resource(&iomem_resource, res) < 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
subsys_initcall(add_system_ram_resources);
|
2011-08-30 09:19:17 +00:00
|
|
|
|
|
|
|
#ifdef CONFIG_STRICT_DEVMEM
|
|
|
|
/*
|
|
|
|
* devmem_is_allowed(): check to see if /dev/mem access to a certain address
|
|
|
|
* is valid. The argument is a physical page number.
|
|
|
|
*
|
|
|
|
* Access has to be given to non-kernel-ram areas as well, these contain the
|
|
|
|
* PCI mmio resources as well as potential bios/acpi data regions.
|
|
|
|
*/
|
|
|
|
int devmem_is_allowed(unsigned long pfn)
|
|
|
|
{
|
2016-01-21 16:15:31 +00:00
|
|
|
if (page_is_rtas_user_buf(pfn))
|
|
|
|
return 1;
|
2015-04-17 21:17:14 +00:00
|
|
|
if (iomem_is_exclusive(PFN_PHYS(pfn)))
|
2011-08-30 09:19:17 +00:00
|
|
|
return 0;
|
|
|
|
if (!page_is_ram(pfn))
|
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_STRICT_DEVMEM */
|
2019-02-01 10:46:52 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* This is defined in kernel/resource.c but only powerpc needs to export it, for
|
|
|
|
* the EHEA driver. Drop this when drivers/net/ethernet/ibm/ehea is removed.
|
|
|
|
*/
|
|
|
|
EXPORT_SYMBOL_GPL(walk_system_ram_range);
|