mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 12:11:40 +00:00
mm: Introduce vmap_page_range() to map pages in PCI address space
ioremap_page_range() should be used for ranges within vmalloc range only.
The vmalloc ranges are allocated by get_vm_area(). PCI has "resource"
allocator that manages PCI_IOBASE, IO_SPACE_LIMIT address range, hence
introduce vmap_page_range() to be used exclusively to map pages
in PCI address space.
Fixes: 3e49a866c9
("mm: Enforce VM_IOREMAP flag and range in ioremap_page_range.")
Reported-by: Miguel Ojeda <ojeda@kernel.org>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Tested-by: Miguel Ojeda <ojeda@kernel.org>
Link: https://lore.kernel.org/bpf/CANiq72ka4rir+RTN2FQoT=Vvprp_Ao-CvoYEkSNqtSY+RZj+AA@mail.gmail.com
This commit is contained in:
parent
96b0f5addc
commit
d7bca9199a
@ -110,8 +110,8 @@ void __init add_static_vm_early(struct static_vm *svm)
|
||||
int ioremap_page(unsigned long virt, unsigned long phys,
|
||||
const struct mem_type *mtype)
|
||||
{
|
||||
return ioremap_page_range(virt, virt + PAGE_SIZE, phys,
|
||||
__pgprot(mtype->prot_pte));
|
||||
return vmap_page_range(virt, virt + PAGE_SIZE, phys,
|
||||
__pgprot(mtype->prot_pte));
|
||||
}
|
||||
EXPORT_SYMBOL(ioremap_page);
|
||||
|
||||
@ -466,8 +466,8 @@ int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
|
||||
if (res->end > IO_SPACE_LIMIT)
|
||||
return -EINVAL;
|
||||
|
||||
return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
|
||||
__pgprot(get_mem_type(pci_ioremap_mem_type)->prot_pte));
|
||||
return vmap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
|
||||
__pgprot(get_mem_type(pci_ioremap_mem_type)->prot_pte));
|
||||
}
|
||||
EXPORT_SYMBOL(pci_remap_iospace);
|
||||
|
||||
|
@ -490,7 +490,7 @@ static int __init add_legacy_isa_io(struct fwnode_handle *fwnode,
|
||||
}
|
||||
|
||||
vaddr = (unsigned long)(PCI_IOBASE + range->io_start);
|
||||
ioremap_page_range(vaddr, vaddr + size, hw_start, pgprot_device(PAGE_KERNEL));
|
||||
vmap_page_range(vaddr, vaddr + size, hw_start, pgprot_device(PAGE_KERNEL));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -180,7 +180,7 @@ static int __init add_legacy_isa_io(struct fwnode_handle *fwnode, resource_size_
|
||||
|
||||
vaddr = PCI_IOBASE + range->io_start;
|
||||
|
||||
ioremap_page_range(vaddr, vaddr + size, hw_start, pgprot_device(PAGE_KERNEL));
|
||||
vmap_page_range(vaddr, vaddr + size, hw_start, pgprot_device(PAGE_KERNEL));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -46,8 +46,8 @@ static void remap_isa_base(phys_addr_t pa, unsigned long size)
|
||||
WARN_ON_ONCE(size & ~PAGE_MASK);
|
||||
|
||||
if (slab_is_available()) {
|
||||
if (ioremap_page_range(ISA_IO_BASE, ISA_IO_BASE + size, pa,
|
||||
pgprot_noncached(PAGE_KERNEL)))
|
||||
if (vmap_page_range(ISA_IO_BASE, ISA_IO_BASE + size, pa,
|
||||
pgprot_noncached(PAGE_KERNEL)))
|
||||
vunmap_range(ISA_IO_BASE, ISA_IO_BASE + size);
|
||||
} else {
|
||||
early_ioremap_range(ISA_IO_BASE, pa, size,
|
||||
|
@ -4353,8 +4353,8 @@ int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
|
||||
if (res->end > IO_SPACE_LIMIT)
|
||||
return -EINVAL;
|
||||
|
||||
return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
|
||||
pgprot_device(PAGE_KERNEL));
|
||||
return vmap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
|
||||
pgprot_device(PAGE_KERNEL));
|
||||
#else
|
||||
/*
|
||||
* This architecture does not have memory mapped I/O space,
|
||||
|
@ -23,12 +23,19 @@ void __iowrite64_copy(void __iomem *to, const void *from, size_t count);
|
||||
#ifdef CONFIG_MMU
|
||||
int ioremap_page_range(unsigned long addr, unsigned long end,
|
||||
phys_addr_t phys_addr, pgprot_t prot);
|
||||
int vmap_page_range(unsigned long addr, unsigned long end,
|
||||
phys_addr_t phys_addr, pgprot_t prot);
|
||||
#else
|
||||
static inline int ioremap_page_range(unsigned long addr, unsigned long end,
|
||||
phys_addr_t phys_addr, pgprot_t prot)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline int vmap_page_range(unsigned long addr, unsigned long end,
|
||||
phys_addr_t phys_addr, pgprot_t prot)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
23
mm/vmalloc.c
23
mm/vmalloc.c
@ -304,11 +304,24 @@ static int vmap_range_noflush(unsigned long addr, unsigned long end,
|
||||
return err;
|
||||
}
|
||||
|
||||
int vmap_page_range(unsigned long addr, unsigned long end,
|
||||
phys_addr_t phys_addr, pgprot_t prot)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = vmap_range_noflush(addr, end, phys_addr, pgprot_nx(prot),
|
||||
ioremap_max_page_shift);
|
||||
flush_cache_vmap(addr, end);
|
||||
if (!err)
|
||||
err = kmsan_ioremap_page_range(addr, end, phys_addr, prot,
|
||||
ioremap_max_page_shift);
|
||||
return err;
|
||||
}
|
||||
|
||||
int ioremap_page_range(unsigned long addr, unsigned long end,
|
||||
phys_addr_t phys_addr, pgprot_t prot)
|
||||
{
|
||||
struct vm_struct *area;
|
||||
int err;
|
||||
|
||||
area = find_vm_area((void *)addr);
|
||||
if (!area || !(area->flags & VM_IOREMAP)) {
|
||||
@ -322,13 +335,7 @@ int ioremap_page_range(unsigned long addr, unsigned long end,
|
||||
(long)area->addr + get_vm_area_size(area));
|
||||
return -ERANGE;
|
||||
}
|
||||
err = vmap_range_noflush(addr, end, phys_addr, pgprot_nx(prot),
|
||||
ioremap_max_page_shift);
|
||||
flush_cache_vmap(addr, end);
|
||||
if (!err)
|
||||
err = kmsan_ioremap_page_range(addr, end, phys_addr, prot,
|
||||
ioremap_max_page_shift);
|
||||
return err;
|
||||
return vmap_page_range(addr, end, phys_addr, prot);
|
||||
}
|
||||
|
||||
static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
|
||||
|
Loading…
Reference in New Issue
Block a user