mirror of
https://github.com/torvalds/linux.git
synced 2024-12-29 06:12:08 +00:00
523402fa91
We currently attempt to check whether a physical address range provided to __ioremap() may be in use by the page allocator by examining the value of PageReserved for each page in the region - lowmem pages not marked reserved are presumed to be in use by the page allocator, and requests to ioremap them fail. The way we check this has been broken since commit92923ca3aa
("mm: meminit: only set page reserved in the memblock region"), because memblock will typically not have any knowledge of non-RAM pages and therefore those pages will not have the PageReserved flag set. Thus when we attempt to ioremap a region outside of RAM we incorrectly fail believing that the region is RAM that may be in use. In most cases ioremap() on MIPS will take a fast-path to use the unmapped kseg1 or xkphys virtual address spaces and never hit this path, so the only way to hit it is for a MIPS32 system to attempt to ioremap() an address range in lowmem with flags other than _CACHE_UNCACHED. Perhaps the most straightforward way to do this is using ioremap_uncached_accelerated(), which is how the problem was discovered. Fix this by making use of walk_system_ram_range() to test the address range provided to __ioremap() against only RAM pages, rather than all lowmem pages. This means that if we have a lowmem I/O region, which is very common for MIPS systems, we're free to ioremap() address ranges within it. A nice bonus is that the test is no longer limited to lowmem. The approach here matches the way x86 performed the same test after commitc81c8a1eee
("x86, ioremap: Speed up check for RAM pages") until x86 moved towards a slightly more complicated check using walk_mem_res() for unrelated reasons with commit0e4c12b45a
("x86/mm, resource: Use PAGE_KERNEL protection for ioremap of memory pages"). Signed-off-by: Paul Burton <paul.burton@mips.com> Reported-by: Serge Semin <fancer.lancer@gmail.com> Tested-by: Serge Semin <fancer.lancer@gmail.com> Fixes:92923ca3aa
("mm: meminit: only set page reserved in the memblock region") Cc: James Hogan <jhogan@kernel.org> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: linux-mips@linux-mips.org Cc: stable@vger.kernel.org # v4.2+ Patchwork: https://patchwork.linux-mips.org/patch/19786/
207 lines
5.0 KiB
C
207 lines
5.0 KiB
C
/*
|
|
* This file is subject to the terms and conditions of the GNU General Public
|
|
* License. See the file "COPYING" in the main directory of this archive
|
|
* for more details.
|
|
*
|
|
* (C) Copyright 1995 1996 Linus Torvalds
|
|
* (C) Copyright 2001, 2002 Ralf Baechle
|
|
*/
|
|
#include <linux/export.h>
|
|
#include <asm/addrspace.h>
|
|
#include <asm/byteorder.h>
|
|
#include <linux/ioport.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/vmalloc.h>
|
|
#include <linux/mm_types.h>
|
|
#include <asm/cacheflush.h>
|
|
#include <asm/io.h>
|
|
#include <asm/tlbflush.h>
|
|
|
|
static inline void remap_area_pte(pte_t * pte, unsigned long address,
|
|
phys_addr_t size, phys_addr_t phys_addr, unsigned long flags)
|
|
{
|
|
phys_addr_t end;
|
|
unsigned long pfn;
|
|
pgprot_t pgprot = __pgprot(_PAGE_GLOBAL | _PAGE_PRESENT | __READABLE
|
|
| __WRITEABLE | flags);
|
|
|
|
address &= ~PMD_MASK;
|
|
end = address + size;
|
|
if (end > PMD_SIZE)
|
|
end = PMD_SIZE;
|
|
BUG_ON(address >= end);
|
|
pfn = phys_addr >> PAGE_SHIFT;
|
|
do {
|
|
if (!pte_none(*pte)) {
|
|
printk("remap_area_pte: page already exists\n");
|
|
BUG();
|
|
}
|
|
set_pte(pte, pfn_pte(pfn, pgprot));
|
|
address += PAGE_SIZE;
|
|
pfn++;
|
|
pte++;
|
|
} while (address && (address < end));
|
|
}
|
|
|
|
static inline int remap_area_pmd(pmd_t * pmd, unsigned long address,
|
|
phys_addr_t size, phys_addr_t phys_addr, unsigned long flags)
|
|
{
|
|
phys_addr_t end;
|
|
|
|
address &= ~PGDIR_MASK;
|
|
end = address + size;
|
|
if (end > PGDIR_SIZE)
|
|
end = PGDIR_SIZE;
|
|
phys_addr -= address;
|
|
BUG_ON(address >= end);
|
|
do {
|
|
pte_t * pte = pte_alloc_kernel(pmd, address);
|
|
if (!pte)
|
|
return -ENOMEM;
|
|
remap_area_pte(pte, address, end - address, address + phys_addr, flags);
|
|
address = (address + PMD_SIZE) & PMD_MASK;
|
|
pmd++;
|
|
} while (address && (address < end));
|
|
return 0;
|
|
}
|
|
|
|
static int remap_area_pages(unsigned long address, phys_addr_t phys_addr,
|
|
phys_addr_t size, unsigned long flags)
|
|
{
|
|
int error;
|
|
pgd_t * dir;
|
|
unsigned long end = address + size;
|
|
|
|
phys_addr -= address;
|
|
dir = pgd_offset(&init_mm, address);
|
|
flush_cache_all();
|
|
BUG_ON(address >= end);
|
|
do {
|
|
pud_t *pud;
|
|
pmd_t *pmd;
|
|
|
|
error = -ENOMEM;
|
|
pud = pud_alloc(&init_mm, dir, address);
|
|
if (!pud)
|
|
break;
|
|
pmd = pmd_alloc(&init_mm, pud, address);
|
|
if (!pmd)
|
|
break;
|
|
if (remap_area_pmd(pmd, address, end - address,
|
|
phys_addr + address, flags))
|
|
break;
|
|
error = 0;
|
|
address = (address + PGDIR_SIZE) & PGDIR_MASK;
|
|
dir++;
|
|
} while (address && (address < end));
|
|
flush_tlb_all();
|
|
return error;
|
|
}
|
|
|
|
static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
|
|
void *arg)
|
|
{
|
|
unsigned long i;
|
|
|
|
for (i = 0; i < nr_pages; i++) {
|
|
if (pfn_valid(start_pfn + i) &&
|
|
!PageReserved(pfn_to_page(start_pfn + i)))
|
|
return 1;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* Generic mapping function (not visible outside):
|
|
*/
|
|
|
|
/*
|
|
* Remap an arbitrary physical address space into the kernel virtual
|
|
* address space. Needed when the kernel wants to access high addresses
|
|
* directly.
|
|
*
|
|
* NOTE! We need to allow non-page-aligned mappings too: we will obviously
|
|
* have to convert them into an offset in a page-aligned mapping, but the
|
|
* caller shouldn't need to know that small detail.
|
|
*/
|
|
|
|
#define IS_LOW512(addr) (!((phys_addr_t)(addr) & (phys_addr_t) ~0x1fffffffULL))
|
|
|
|
void __iomem * __ioremap(phys_addr_t phys_addr, phys_addr_t size, unsigned long flags)
|
|
{
|
|
unsigned long offset, pfn, last_pfn;
|
|
struct vm_struct * area;
|
|
phys_addr_t last_addr;
|
|
void * addr;
|
|
|
|
phys_addr = fixup_bigphys_addr(phys_addr, size);
|
|
|
|
/* Don't allow wraparound or zero size */
|
|
last_addr = phys_addr + size - 1;
|
|
if (!size || last_addr < phys_addr)
|
|
return NULL;
|
|
|
|
/*
|
|
* Map uncached objects in the low 512mb of address space using KSEG1,
|
|
* otherwise map using page tables.
|
|
*/
|
|
if (IS_LOW512(phys_addr) && IS_LOW512(last_addr) &&
|
|
flags == _CACHE_UNCACHED)
|
|
return (void __iomem *) CKSEG1ADDR(phys_addr);
|
|
|
|
/*
|
|
* Don't allow anybody to remap RAM that may be allocated by the page
|
|
* allocator, since that could lead to races & data clobbering.
|
|
*/
|
|
pfn = PFN_DOWN(phys_addr);
|
|
last_pfn = PFN_DOWN(last_addr);
|
|
if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
|
|
__ioremap_check_ram) == 1) {
|
|
WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
|
|
&phys_addr, &last_addr);
|
|
return NULL;
|
|
}
|
|
|
|
/*
|
|
* Mappings have to be page-aligned
|
|
*/
|
|
offset = phys_addr & ~PAGE_MASK;
|
|
phys_addr &= PAGE_MASK;
|
|
size = PAGE_ALIGN(last_addr + 1) - phys_addr;
|
|
|
|
/*
|
|
* Ok, go for it..
|
|
*/
|
|
area = get_vm_area(size, VM_IOREMAP);
|
|
if (!area)
|
|
return NULL;
|
|
addr = area->addr;
|
|
if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) {
|
|
vunmap(addr);
|
|
return NULL;
|
|
}
|
|
|
|
return (void __iomem *) (offset + (char *)addr);
|
|
}
|
|
|
|
#define IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == CKSEG1)
|
|
|
|
void __iounmap(const volatile void __iomem *addr)
|
|
{
|
|
struct vm_struct *p;
|
|
|
|
if (IS_KSEG1(addr))
|
|
return;
|
|
|
|
p = remove_vm_area((void *) (PAGE_MASK & (unsigned long __force) addr));
|
|
if (!p)
|
|
printk(KERN_ERR "iounmap: bad address %p\n", addr);
|
|
|
|
kfree(p);
|
|
}
|
|
|
|
EXPORT_SYMBOL(__ioremap);
|
|
EXPORT_SYMBOL(__iounmap);
|