forked from Minki/linux
generic ioremap support
- clean up various obsolete ioremap and iounmap variants - add a new generic ioremap implementation and switch csky, nds32 and riscv over to it -----BEGIN PGP SIGNATURE----- iQI/BAABCgApFiEEgdbnc3r/njty3Iq9D55TZVIEUYMFAl3cKcsLHGhjaEBsc3Qu ZGUACgkQD55TZVIEUYO1CRAAwFQigsbi0CqqshPWnP0owKV+HA4Xfz/lQZsd7SM/ BVXhKyDJQum6gp73dW025HCfjidTknsbdCUIP/LNUgAnop3lOlnB31/munDnJJ1H 6hB1pc+zB9VgbOe0A6TxtxPRm5aE33k1hZIZS99lOh7mY3FvF7mbkkbVoCjdS3Cq a9bTX+X+esfUQ5GgaIc2zmz2GLkyFXIeVGs8/CoOX58ESCWQcVZrsQRompo4SgrI jqwf47NzdmK8hW4mZ+jdQUiWiAmNs5+2om7Bvi/deFAIFUo1/hLHvQzqEGramq/j 5SPHax2gWAN3uWYP91QISkUAJWFydwgmUDoTO1M04ov4xLuBrqIQmc43tLjHo2UT RwMozWJWN+gkB9zTIboqMPi2qcuDaWcCij7LwHl5zLxPTcOKsrALarL55BQ8MipQ x6fpvskrQQvlArNTsRWFRUq0mCtkzE3wMZ9RR3AIETQL2hlAzB1S4gzhD+Z6WTYY pXNgkunonVGxwyN/7iJTEl/mvF/+MynGcWqhrwHZLqncyhn/WJJ2USH3nAD1+yjp v8v6UUeMXIjUsGAyfTjXy/WXAfwRuSC038AAFcmWKDdh08h4XvPHRficT4U8wr34 7WzGizHP9f1CqrhYL/4exhPY9X2Yb7HhsFd0bZGG0rRvSillPUp0b8s++m12QuQU +VY= =ooiA -----END PGP SIGNATURE----- Merge tag 'ioremap-5.5' of git://git.infradead.org/users/hch/ioremap Pull generic ioremap support from Christoph Hellwig: "This adds the remaining bits for an entirely generic ioremap and iounmap to lib/ioremap.c. To facilitate that, it cleans up the giant mess of weird ioremap variants we had with no users outside the arch code. For now just the three newest ports use the code, but there is more than a handful others that can be converted without too much work. Summary: - clean up various obsolete ioremap and iounmap variants - add a new generic ioremap implementation and switch csky, nds32 and riscv over to it" * tag 'ioremap-5.5' of git://git.infradead.org/users/hch/ioremap: (21 commits) nds32: use generic ioremap csky: use generic ioremap csky: remove ioremap_cache riscv: use the generic ioremap code lib: provide a simple generic ioremap implementation sh: remove __iounmap nios2: remove __iounmap hexagon: remove __iounmap m68k: rename __iounmap and mark it static arch: rely on asm-generic/io.h for default ioremap_* definitions asm-generic: don't provide ioremap for CONFIG_MMU asm-generic: ioremap_uc should behave the same with and without MMU xtensa: clean up ioremap x86: Clean up ioremap() parisc: remove __ioremap nios2: remove __ioremap alpha: remove the unused __ioremap wrapper hexagon: clean up ioremap ia64: rename ioremap_nocache to ioremap_uc unicore32: remove ioremap_cached ...
This commit is contained in:
commit
a308a71022
@ -283,12 +283,6 @@ static inline void __iomem *ioremap(unsigned long port, unsigned long size)
|
||||
return IO_CONCAT(__IO_PREFIX,ioremap) (port, size);
|
||||
}
|
||||
|
||||
static inline void __iomem *__ioremap(unsigned long port, unsigned long size,
|
||||
unsigned long flags)
|
||||
{
|
||||
return ioremap(port, size);
|
||||
}
|
||||
|
||||
static inline void __iomem * ioremap_nocache(unsigned long offset,
|
||||
unsigned long size)
|
||||
{
|
||||
|
@ -34,10 +34,6 @@ static inline void ioport_unmap(void __iomem *addr)
|
||||
|
||||
extern void iounmap(const void __iomem *addr);
|
||||
|
||||
#define ioremap_nocache(phy, sz) ioremap(phy, sz)
|
||||
#define ioremap_wc(phy, sz) ioremap(phy, sz)
|
||||
#define ioremap_wt(phy, sz) ioremap(phy, sz)
|
||||
|
||||
/*
|
||||
* io{read,write}{16,32}be() macros
|
||||
*/
|
||||
|
@ -392,7 +392,6 @@ static inline void memcpy_toio(volatile void __iomem *to, const void *from,
|
||||
*/
|
||||
void __iomem *ioremap(resource_size_t res_cookie, size_t size);
|
||||
#define ioremap ioremap
|
||||
#define ioremap_nocache ioremap
|
||||
|
||||
/*
|
||||
* Do not use ioremap_cache for mapping memory. Use memremap instead.
|
||||
@ -400,12 +399,6 @@ void __iomem *ioremap(resource_size_t res_cookie, size_t size);
|
||||
void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size);
|
||||
#define ioremap_cache ioremap_cache
|
||||
|
||||
/*
|
||||
* Do not use ioremap_cached in new code. Provided for the benefit of
|
||||
* the pxa2xx-flash MTD driver only.
|
||||
*/
|
||||
void __iomem *ioremap_cached(resource_size_t res_cookie, size_t size);
|
||||
|
||||
void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size);
|
||||
#define ioremap_wc ioremap_wc
|
||||
#define ioremap_wt ioremap_wc
|
||||
|
@ -382,15 +382,11 @@ void __iomem *ioremap(resource_size_t res_cookie, size_t size)
|
||||
EXPORT_SYMBOL(ioremap);
|
||||
|
||||
void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size)
|
||||
__alias(ioremap_cached);
|
||||
|
||||
void __iomem *ioremap_cached(resource_size_t res_cookie, size_t size)
|
||||
{
|
||||
return arch_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED,
|
||||
__builtin_return_address(0));
|
||||
}
|
||||
EXPORT_SYMBOL(ioremap_cache);
|
||||
EXPORT_SYMBOL(ioremap_cached);
|
||||
|
||||
void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
|
||||
{
|
||||
|
@ -259,7 +259,7 @@ static struct mem_type mem_types[] __ro_after_init = {
|
||||
.prot_sect = PROT_SECT_DEVICE,
|
||||
.domain = DOMAIN_IO,
|
||||
},
|
||||
[MT_DEVICE_CACHED] = { /* ioremap_cached */
|
||||
[MT_DEVICE_CACHED] = { /* ioremap_cache */
|
||||
.prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_CACHED,
|
||||
.prot_l1 = PMD_TYPE_TABLE,
|
||||
.prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB,
|
||||
|
@ -206,15 +206,11 @@ void __iomem *ioremap(resource_size_t res_cookie, size_t size)
|
||||
EXPORT_SYMBOL(ioremap);
|
||||
|
||||
void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size)
|
||||
__alias(ioremap_cached);
|
||||
|
||||
void __iomem *ioremap_cached(resource_size_t res_cookie, size_t size)
|
||||
{
|
||||
return __arm_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED,
|
||||
__builtin_return_address(0));
|
||||
}
|
||||
EXPORT_SYMBOL(ioremap_cache);
|
||||
EXPORT_SYMBOL(ioremap_cached);
|
||||
|
||||
void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
|
||||
{
|
||||
|
@ -167,9 +167,7 @@ extern void iounmap(volatile void __iomem *addr);
|
||||
extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);
|
||||
|
||||
#define ioremap(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
|
||||
#define ioremap_nocache(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
|
||||
#define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC))
|
||||
#define ioremap_wt(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
|
||||
|
||||
/*
|
||||
* PCI configuration space mapping function.
|
||||
|
@ -17,6 +17,7 @@ config CSKY
|
||||
select IRQ_DOMAIN
|
||||
select HANDLE_DOMAIN_IRQ
|
||||
select DW_APB_TIMER_OF
|
||||
select GENERIC_IOREMAP
|
||||
select GENERIC_LIB_ASHLDI3
|
||||
select GENERIC_LIB_ASHRDI3
|
||||
select GENERIC_LIB_LSHRDI3
|
||||
|
@ -36,14 +36,9 @@
|
||||
/*
|
||||
* I/O memory mapping functions.
|
||||
*/
|
||||
extern void __iomem *ioremap_cache(phys_addr_t addr, size_t size);
|
||||
extern void __iomem *__ioremap(phys_addr_t addr, size_t size, pgprot_t prot);
|
||||
extern void iounmap(void *addr);
|
||||
|
||||
#define ioremap(addr, size) __ioremap((addr), (size), pgprot_noncached(PAGE_KERNEL))
|
||||
#define ioremap_wc(addr, size) __ioremap((addr), (size), pgprot_writecombine(PAGE_KERNEL))
|
||||
#define ioremap_nocache(addr, size) ioremap((addr), (size))
|
||||
#define ioremap_cache ioremap_cache
|
||||
#define ioremap_wc(addr, size) \
|
||||
ioremap_prot((addr), (size), \
|
||||
(_PAGE_IOREMAP & ~_CACHE_MASK) | _CACHE_UNCACHED)
|
||||
|
||||
#include <asm-generic/io.h>
|
||||
|
||||
|
@ -86,6 +86,10 @@
|
||||
#define PAGE_USERIO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
|
||||
_CACHE_CACHED)
|
||||
|
||||
#define _PAGE_IOREMAP \
|
||||
(_PAGE_PRESENT | __READABLE | __WRITEABLE | _PAGE_GLOBAL | \
|
||||
_CACHE_UNCACHED | _PAGE_SO)
|
||||
|
||||
#define __P000 PAGE_NONE
|
||||
#define __P001 PAGE_READONLY
|
||||
#define __P010 PAGE_COPY
|
||||
|
@ -3,60 +3,8 @@
|
||||
|
||||
#include <linux/export.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/io.h>
|
||||
|
||||
#include <asm/pgtable.h>
|
||||
|
||||
static void __iomem *__ioremap_caller(phys_addr_t addr, size_t size,
|
||||
pgprot_t prot, void *caller)
|
||||
{
|
||||
phys_addr_t last_addr;
|
||||
unsigned long offset, vaddr;
|
||||
struct vm_struct *area;
|
||||
|
||||
last_addr = addr + size - 1;
|
||||
if (!size || last_addr < addr)
|
||||
return NULL;
|
||||
|
||||
offset = addr & (~PAGE_MASK);
|
||||
addr &= PAGE_MASK;
|
||||
size = PAGE_ALIGN(size + offset);
|
||||
|
||||
area = get_vm_area_caller(size, VM_IOREMAP, caller);
|
||||
if (!area)
|
||||
return NULL;
|
||||
|
||||
vaddr = (unsigned long)area->addr;
|
||||
|
||||
if (ioremap_page_range(vaddr, vaddr + size, addr, prot)) {
|
||||
free_vm_area(area);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return (void __iomem *)(vaddr + offset);
|
||||
}
|
||||
|
||||
void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot)
|
||||
{
|
||||
return __ioremap_caller(phys_addr, size, prot,
|
||||
__builtin_return_address(0));
|
||||
}
|
||||
EXPORT_SYMBOL(__ioremap);
|
||||
|
||||
void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size)
|
||||
{
|
||||
return __ioremap_caller(phys_addr, size, PAGE_KERNEL,
|
||||
__builtin_return_address(0));
|
||||
}
|
||||
EXPORT_SYMBOL(ioremap_cache);
|
||||
|
||||
void iounmap(void __iomem *addr)
|
||||
{
|
||||
vunmap((void *)((unsigned long)addr & PAGE_MASK));
|
||||
}
|
||||
EXPORT_SYMBOL(iounmap);
|
||||
|
||||
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
|
||||
unsigned long size, pgprot_t vma_prot)
|
||||
{
|
||||
|
@ -27,7 +27,7 @@
|
||||
extern int remap_area_pages(unsigned long start, unsigned long phys_addr,
|
||||
unsigned long end, unsigned long flags);
|
||||
|
||||
extern void __iounmap(const volatile void __iomem *addr);
|
||||
extern void iounmap(const volatile void __iomem *addr);
|
||||
|
||||
/* Defined in lib/io.c, needed for smc91x driver. */
|
||||
extern void __raw_readsw(const void __iomem *addr, void *data, int wordlen);
|
||||
@ -171,21 +171,9 @@ static inline void writel(u32 data, volatile void __iomem *addr)
|
||||
#define writew_relaxed __raw_writew
|
||||
#define writel_relaxed __raw_writel
|
||||
|
||||
/*
|
||||
* Need an mtype somewhere in here, for cache type deals?
|
||||
* This is probably too long for an inline.
|
||||
*/
|
||||
void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size);
|
||||
void __iomem *ioremap(unsigned long phys_addr, unsigned long size);
|
||||
#define ioremap_nocache ioremap
|
||||
|
||||
static inline void __iomem *ioremap(unsigned long phys_addr, unsigned long size)
|
||||
{
|
||||
return ioremap_nocache(phys_addr, size);
|
||||
}
|
||||
|
||||
static inline void iounmap(volatile void __iomem *addr)
|
||||
{
|
||||
__iounmap(addr);
|
||||
}
|
||||
|
||||
#define __raw_writel writel
|
||||
|
||||
|
@ -14,13 +14,13 @@
|
||||
EXPORT_SYMBOL(__clear_user_hexagon);
|
||||
EXPORT_SYMBOL(raw_copy_from_user);
|
||||
EXPORT_SYMBOL(raw_copy_to_user);
|
||||
EXPORT_SYMBOL(__iounmap);
|
||||
EXPORT_SYMBOL(iounmap);
|
||||
EXPORT_SYMBOL(__strnlen_user);
|
||||
EXPORT_SYMBOL(__vmgetie);
|
||||
EXPORT_SYMBOL(__vmsetie);
|
||||
EXPORT_SYMBOL(__vmyield);
|
||||
EXPORT_SYMBOL(empty_zero_page);
|
||||
EXPORT_SYMBOL(ioremap_nocache);
|
||||
EXPORT_SYMBOL(ioremap);
|
||||
EXPORT_SYMBOL(memcpy);
|
||||
EXPORT_SYMBOL(memset);
|
||||
|
||||
|
@ -9,7 +9,7 @@
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/mm.h>
|
||||
|
||||
void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size)
|
||||
void __iomem *ioremap(unsigned long phys_addr, unsigned long size)
|
||||
{
|
||||
unsigned long last_addr, addr;
|
||||
unsigned long offset = phys_addr & ~PAGE_MASK;
|
||||
@ -38,7 +38,7 @@ void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size)
|
||||
return (void __iomem *) (offset + addr);
|
||||
}
|
||||
|
||||
void __iounmap(const volatile void __iomem *addr)
|
||||
void iounmap(const volatile void __iomem *addr)
|
||||
{
|
||||
vunmap((void *) ((unsigned long) addr & PAGE_MASK));
|
||||
}
|
||||
|
@ -256,16 +256,15 @@ static inline void outsl(unsigned long port, const void *src,
|
||||
# ifdef __KERNEL__
|
||||
|
||||
extern void __iomem * ioremap(unsigned long offset, unsigned long size);
|
||||
extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size);
|
||||
extern void __iomem * ioremap_uc(unsigned long offset, unsigned long size);
|
||||
extern void iounmap (volatile void __iomem *addr);
|
||||
static inline void __iomem * ioremap_cache (unsigned long phys_addr, unsigned long size)
|
||||
{
|
||||
return ioremap(phys_addr, size);
|
||||
}
|
||||
#define ioremap ioremap
|
||||
#define ioremap_nocache ioremap_nocache
|
||||
#define ioremap_cache ioremap_cache
|
||||
#define ioremap_uc ioremap_nocache
|
||||
#define ioremap_uc ioremap_uc
|
||||
#define iounmap iounmap
|
||||
|
||||
/*
|
||||
|
@ -99,14 +99,14 @@ ioremap (unsigned long phys_addr, unsigned long size)
|
||||
EXPORT_SYMBOL(ioremap);
|
||||
|
||||
void __iomem *
|
||||
ioremap_nocache (unsigned long phys_addr, unsigned long size)
|
||||
ioremap_uc(unsigned long phys_addr, unsigned long size)
|
||||
{
|
||||
if (kern_mem_attribute(phys_addr, size) & EFI_MEMORY_WB)
|
||||
return NULL;
|
||||
|
||||
return __ioremap_uc(phys_addr);
|
||||
}
|
||||
EXPORT_SYMBOL(ioremap_nocache);
|
||||
EXPORT_SYMBOL(ioremap_uc);
|
||||
|
||||
void
|
||||
early_iounmap (volatile void __iomem *addr, unsigned long size)
|
||||
|
@ -20,7 +20,6 @@ extern void __iomem *__ioremap(unsigned long physaddr, unsigned long size,
|
||||
int cacheflag);
|
||||
#define iounmap iounmap
|
||||
extern void iounmap(void __iomem *addr);
|
||||
extern void __iounmap(void *addr, unsigned long size);
|
||||
|
||||
#define ioremap ioremap
|
||||
static inline void __iomem *ioremap(unsigned long physaddr, unsigned long size)
|
||||
|
@ -54,6 +54,55 @@ static inline void free_io_area(void *addr)
|
||||
|
||||
static struct vm_struct *iolist;
|
||||
|
||||
/*
|
||||
* __free_io_area unmaps nearly everything, so be careful
|
||||
* Currently it doesn't free pointer/page tables anymore but this
|
||||
* wasn't used anyway and might be added later.
|
||||
*/
|
||||
static void __free_io_area(void *addr, unsigned long size)
|
||||
{
|
||||
unsigned long virtaddr = (unsigned long)addr;
|
||||
pgd_t *pgd_dir;
|
||||
pmd_t *pmd_dir;
|
||||
pte_t *pte_dir;
|
||||
|
||||
while ((long)size > 0) {
|
||||
pgd_dir = pgd_offset_k(virtaddr);
|
||||
if (pgd_bad(*pgd_dir)) {
|
||||
printk("iounmap: bad pgd(%08lx)\n", pgd_val(*pgd_dir));
|
||||
pgd_clear(pgd_dir);
|
||||
return;
|
||||
}
|
||||
pmd_dir = pmd_offset(pgd_dir, virtaddr);
|
||||
|
||||
if (CPU_IS_020_OR_030) {
|
||||
int pmd_off = (virtaddr/PTRTREESIZE) & 15;
|
||||
int pmd_type = pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK;
|
||||
|
||||
if (pmd_type == _PAGE_PRESENT) {
|
||||
pmd_dir->pmd[pmd_off] = 0;
|
||||
virtaddr += PTRTREESIZE;
|
||||
size -= PTRTREESIZE;
|
||||
continue;
|
||||
} else if (pmd_type == 0)
|
||||
continue;
|
||||
}
|
||||
|
||||
if (pmd_bad(*pmd_dir)) {
|
||||
printk("iounmap: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
|
||||
pmd_clear(pmd_dir);
|
||||
return;
|
||||
}
|
||||
pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
|
||||
|
||||
pte_val(*pte_dir) = 0;
|
||||
virtaddr += PAGE_SIZE;
|
||||
size -= PAGE_SIZE;
|
||||
}
|
||||
|
||||
flush_tlb_all();
|
||||
}
|
||||
|
||||
static struct vm_struct *get_io_area(unsigned long size)
|
||||
{
|
||||
unsigned long addr;
|
||||
@ -90,7 +139,7 @@ static inline void free_io_area(void *addr)
|
||||
if (tmp->addr == addr) {
|
||||
*p = tmp->next;
|
||||
/* remove gap added in get_io_area() */
|
||||
__iounmap(tmp->addr, tmp->size - IO_SIZE);
|
||||
__free_io_area(tmp->addr, tmp->size - IO_SIZE);
|
||||
kfree(tmp);
|
||||
return;
|
||||
}
|
||||
@ -249,55 +298,6 @@ void iounmap(void __iomem *addr)
|
||||
}
|
||||
EXPORT_SYMBOL(iounmap);
|
||||
|
||||
/*
|
||||
* __iounmap unmaps nearly everything, so be careful
|
||||
* Currently it doesn't free pointer/page tables anymore but this
|
||||
* wasn't used anyway and might be added later.
|
||||
*/
|
||||
void __iounmap(void *addr, unsigned long size)
|
||||
{
|
||||
unsigned long virtaddr = (unsigned long)addr;
|
||||
pgd_t *pgd_dir;
|
||||
pmd_t *pmd_dir;
|
||||
pte_t *pte_dir;
|
||||
|
||||
while ((long)size > 0) {
|
||||
pgd_dir = pgd_offset_k(virtaddr);
|
||||
if (pgd_bad(*pgd_dir)) {
|
||||
printk("iounmap: bad pgd(%08lx)\n", pgd_val(*pgd_dir));
|
||||
pgd_clear(pgd_dir);
|
||||
return;
|
||||
}
|
||||
pmd_dir = pmd_offset(pgd_dir, virtaddr);
|
||||
|
||||
if (CPU_IS_020_OR_030) {
|
||||
int pmd_off = (virtaddr/PTRTREESIZE) & 15;
|
||||
int pmd_type = pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK;
|
||||
|
||||
if (pmd_type == _PAGE_PRESENT) {
|
||||
pmd_dir->pmd[pmd_off] = 0;
|
||||
virtaddr += PTRTREESIZE;
|
||||
size -= PTRTREESIZE;
|
||||
continue;
|
||||
} else if (pmd_type == 0)
|
||||
continue;
|
||||
}
|
||||
|
||||
if (pmd_bad(*pmd_dir)) {
|
||||
printk("iounmap: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
|
||||
pmd_clear(pmd_dir);
|
||||
return;
|
||||
}
|
||||
pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
|
||||
|
||||
pte_val(*pte_dir) = 0;
|
||||
virtaddr += PAGE_SIZE;
|
||||
size -= PAGE_SIZE;
|
||||
}
|
||||
|
||||
flush_tlb_all();
|
||||
}
|
||||
|
||||
/*
|
||||
* Set new cache mode for some kernel address space.
|
||||
* The caller must push data for that range itself, if such data may already
|
||||
|
@ -39,9 +39,6 @@ extern resource_size_t isa_mem_base;
|
||||
extern void iounmap(volatile void __iomem *addr);
|
||||
|
||||
extern void __iomem *ioremap(phys_addr_t address, unsigned long size);
|
||||
#define ioremap_nocache(addr, size) ioremap((addr), (size))
|
||||
#define ioremap_wc(addr, size) ioremap((addr), (size))
|
||||
#define ioremap_wt(addr, size) ioremap((addr), (size))
|
||||
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
|
@ -20,6 +20,7 @@ config NDS32
|
||||
select GENERIC_CLOCKEVENTS
|
||||
select GENERIC_IRQ_CHIP
|
||||
select GENERIC_IRQ_SHOW
|
||||
select GENERIC_IOREMAP
|
||||
select GENERIC_LIB_ASHLDI3
|
||||
select GENERIC_LIB_ASHRDI3
|
||||
select GENERIC_LIB_CMPDI2
|
||||
|
@ -6,7 +6,6 @@
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
extern void iounmap(volatile void __iomem *addr);
|
||||
#define __raw_writeb __raw_writeb
|
||||
static inline void __raw_writeb(u8 val, volatile void __iomem *addr)
|
||||
{
|
||||
@ -79,5 +78,7 @@ static inline u32 __raw_readl(const volatile void __iomem *addr)
|
||||
#define writeb(v,c) ({ __iowmb(); writeb_relaxed((v),(c)); })
|
||||
#define writew(v,c) ({ __iowmb(); writew_relaxed((v),(c)); })
|
||||
#define writel(v,c) ({ __iowmb(); writel_relaxed((v),(c)); })
|
||||
|
||||
#include <asm-generic/io.h>
|
||||
|
||||
#endif /* __ASM_NDS32_IO_H */
|
||||
|
@ -12,7 +12,6 @@
|
||||
#include <asm/nds32.h>
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <asm/fixmap.h>
|
||||
#include <asm/io.h>
|
||||
#include <nds32_intrinsic.h>
|
||||
#endif
|
||||
|
||||
@ -130,6 +129,9 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
|
||||
#define _PAGE_CACHE _PAGE_C_MEM_WB
|
||||
#endif
|
||||
|
||||
#define _PAGE_IOREMAP \
|
||||
(_PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_G | _PAGE_C_DEV)
|
||||
|
||||
/*
|
||||
* + Level 1 descriptor (PMD)
|
||||
*/
|
||||
|
@ -1,6 +1,5 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
obj-y := extable.o tlb.o \
|
||||
fault.o init.o ioremap.o mmap.o \
|
||||
obj-y := extable.o tlb.o fault.o init.o mmap.o \
|
||||
mm-nds32.o cacheflush.o proc.o
|
||||
|
||||
obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o
|
||||
|
@ -1,62 +0,0 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
// Copyright (C) 2005-2017 Andes Technology Corporation
|
||||
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/mm.h>
|
||||
#include <asm/pgtable.h>
|
||||
|
||||
void __iomem *ioremap(phys_addr_t phys_addr, size_t size);
|
||||
|
||||
static void __iomem *__ioremap_caller(phys_addr_t phys_addr, size_t size,
|
||||
void *caller)
|
||||
{
|
||||
struct vm_struct *area;
|
||||
unsigned long addr, offset, last_addr;
|
||||
pgprot_t prot;
|
||||
|
||||
/* Don't allow wraparound or zero size */
|
||||
last_addr = phys_addr + size - 1;
|
||||
if (!size || last_addr < phys_addr)
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* Mappings have to be page-aligned
|
||||
*/
|
||||
offset = phys_addr & ~PAGE_MASK;
|
||||
phys_addr &= PAGE_MASK;
|
||||
size = PAGE_ALIGN(last_addr + 1) - phys_addr;
|
||||
|
||||
/*
|
||||
* Ok, go for it..
|
||||
*/
|
||||
area = get_vm_area_caller(size, VM_IOREMAP, caller);
|
||||
if (!area)
|
||||
return NULL;
|
||||
|
||||
area->phys_addr = phys_addr;
|
||||
addr = (unsigned long)area->addr;
|
||||
prot = __pgprot(_PAGE_V | _PAGE_M_KRW | _PAGE_D |
|
||||
_PAGE_G | _PAGE_C_DEV);
|
||||
if (ioremap_page_range(addr, addr + size, phys_addr, prot)) {
|
||||
vunmap((void *)addr);
|
||||
return NULL;
|
||||
}
|
||||
return (__force void __iomem *)(offset + (char *)addr);
|
||||
|
||||
}
|
||||
|
||||
void __iomem *ioremap(phys_addr_t phys_addr, size_t size)
|
||||
{
|
||||
return __ioremap_caller(phys_addr, size,
|
||||
__builtin_return_address(0));
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(ioremap);
|
||||
|
||||
void iounmap(volatile void __iomem * addr)
|
||||
{
|
||||
vunmap((void *)(PAGE_MASK & (unsigned long)addr));
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(iounmap);
|
@ -25,29 +25,8 @@
|
||||
#define writew_relaxed(x, addr) writew(x, addr)
|
||||
#define writel_relaxed(x, addr) writel(x, addr)
|
||||
|
||||
extern void __iomem *__ioremap(unsigned long physaddr, unsigned long size,
|
||||
unsigned long cacheflag);
|
||||
extern void __iounmap(void __iomem *addr);
|
||||
|
||||
static inline void __iomem *ioremap(unsigned long physaddr, unsigned long size)
|
||||
{
|
||||
return __ioremap(physaddr, size, 0);
|
||||
}
|
||||
|
||||
static inline void __iomem *ioremap_nocache(unsigned long physaddr,
|
||||
unsigned long size)
|
||||
{
|
||||
return __ioremap(physaddr, size, 0);
|
||||
}
|
||||
|
||||
static inline void iounmap(void __iomem *addr)
|
||||
{
|
||||
__iounmap(addr);
|
||||
}
|
||||
|
||||
#define ioremap_nocache ioremap_nocache
|
||||
#define ioremap_wc ioremap_nocache
|
||||
#define ioremap_wt ioremap_nocache
|
||||
void __iomem *ioremap(unsigned long physaddr, unsigned long size);
|
||||
void iounmap(void __iomem *addr);
|
||||
|
||||
/* Pages to physical address... */
|
||||
#define page_to_phys(page) virt_to_phys(page_to_virt(page))
|
||||
|
@ -112,8 +112,7 @@ static int remap_area_pages(unsigned long address, unsigned long phys_addr,
|
||||
/*
|
||||
* Map some physical address range into the kernel address space.
|
||||
*/
|
||||
void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
|
||||
unsigned long cacheflag)
|
||||
void __iomem *ioremap(unsigned long phys_addr, unsigned long size)
|
||||
{
|
||||
struct vm_struct *area;
|
||||
unsigned long offset;
|
||||
@ -139,15 +138,6 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Map uncached objects in the low part of address space to
|
||||
* CONFIG_NIOS2_IO_REGION_BASE
|
||||
*/
|
||||
if (IS_MAPPABLE_UNCACHEABLE(phys_addr) &&
|
||||
IS_MAPPABLE_UNCACHEABLE(last_addr) &&
|
||||
!(cacheflag & _PAGE_CACHED))
|
||||
return (void __iomem *)(CONFIG_NIOS2_IO_REGION_BASE + phys_addr);
|
||||
|
||||
/* Mappings have to be page-aligned */
|
||||
offset = phys_addr & ~PAGE_MASK;
|
||||
phys_addr &= PAGE_MASK;
|
||||
@ -158,21 +148,20 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
|
||||
if (!area)
|
||||
return NULL;
|
||||
addr = area->addr;
|
||||
if (remap_area_pages((unsigned long) addr, phys_addr, size,
|
||||
cacheflag)) {
|
||||
if (remap_area_pages((unsigned long) addr, phys_addr, size, 0)) {
|
||||
vunmap(addr);
|
||||
return NULL;
|
||||
}
|
||||
return (void __iomem *) (offset + (char *)addr);
|
||||
}
|
||||
EXPORT_SYMBOL(__ioremap);
|
||||
EXPORT_SYMBOL(ioremap);
|
||||
|
||||
/*
|
||||
* __iounmap unmaps nearly everything, so be careful
|
||||
* iounmap unmaps nearly everything, so be careful
|
||||
* it doesn't free currently pointer/page tables anymore but it
|
||||
* wasn't used anyway and might be added later.
|
||||
*/
|
||||
void __iounmap(void __iomem *addr)
|
||||
void iounmap(void __iomem *addr)
|
||||
{
|
||||
struct vm_struct *p;
|
||||
|
||||
@ -184,4 +173,4 @@ void __iounmap(void __iomem *addr)
|
||||
pr_err("iounmap: bad address %p\n", addr);
|
||||
kfree(p);
|
||||
}
|
||||
EXPORT_SYMBOL(__iounmap);
|
||||
EXPORT_SYMBOL(iounmap);
|
||||
|
@ -25,7 +25,6 @@
|
||||
#define PIO_OFFSET 0
|
||||
#define PIO_MASK 0
|
||||
|
||||
#define ioremap_nocache ioremap
|
||||
#include <asm-generic/io.h>
|
||||
#include <asm/pgtable.h>
|
||||
|
||||
|
@ -127,16 +127,7 @@ static inline void gsc_writeq(unsigned long long val, unsigned long addr)
|
||||
/*
|
||||
* The standard PCI ioremap interfaces
|
||||
*/
|
||||
|
||||
extern void __iomem * __ioremap(unsigned long offset, unsigned long size, unsigned long flags);
|
||||
|
||||
/* Most machines react poorly to I/O-space being cacheable... Instead let's
|
||||
* define ioremap() in terms of ioremap_nocache().
|
||||
*/
|
||||
static inline void __iomem * ioremap(unsigned long offset, unsigned long size)
|
||||
{
|
||||
return __ioremap(offset, size, _PAGE_NO_CACHE);
|
||||
}
|
||||
void __iomem *ioremap(unsigned long offset, unsigned long size);
|
||||
#define ioremap_nocache(off, sz) ioremap((off), (sz))
|
||||
#define ioremap_wc ioremap_nocache
|
||||
#define ioremap_uc ioremap_nocache
|
||||
|
@ -25,7 +25,7 @@
|
||||
* have to convert them into an offset in a page-aligned mapping, but the
|
||||
* caller shouldn't need to know that small detail.
|
||||
*/
|
||||
void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
|
||||
void __iomem *ioremap(unsigned long phys_addr, unsigned long size)
|
||||
{
|
||||
void __iomem *addr;
|
||||
struct vm_struct *area;
|
||||
@ -36,10 +36,8 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
|
||||
unsigned long end = phys_addr + size - 1;
|
||||
/* Support EISA addresses */
|
||||
if ((phys_addr >= 0x00080000 && end < 0x000fffff) ||
|
||||
(phys_addr >= 0x00500000 && end < 0x03bfffff)) {
|
||||
(phys_addr >= 0x00500000 && end < 0x03bfffff))
|
||||
phys_addr |= F_EXTEND(0xfc000000);
|
||||
flags |= _PAGE_NO_CACHE;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Don't allow wraparound or zero size */
|
||||
@ -65,7 +63,7 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
|
||||
}
|
||||
|
||||
pgprot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY |
|
||||
_PAGE_ACCESSED | flags);
|
||||
_PAGE_ACCESSED | _PAGE_NO_CACHE);
|
||||
|
||||
/*
|
||||
* Mappings have to be page-aligned
|
||||
@ -90,7 +88,7 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
|
||||
|
||||
return (void __iomem *) (offset + (char __iomem *)addr);
|
||||
}
|
||||
EXPORT_SYMBOL(__ioremap);
|
||||
EXPORT_SYMBOL(ioremap);
|
||||
|
||||
void iounmap(const volatile void __iomem *io_addr)
|
||||
{
|
||||
|
@ -30,6 +30,7 @@ config RISCV
|
||||
select GENERIC_STRNLEN_USER if MMU
|
||||
select GENERIC_SMP_IDLE_THREAD
|
||||
select GENERIC_ATOMIC64 if !64BIT
|
||||
select GENERIC_IOREMAP
|
||||
select HAVE_ARCH_AUDITSYSCALL
|
||||
select HAVE_ARCH_SECCOMP_FILTER
|
||||
select HAVE_ASM_MODVERSIONS
|
||||
|
@ -14,20 +14,7 @@
|
||||
#include <linux/types.h>
|
||||
#include <asm/mmiowb.h>
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
void __iomem *ioremap(phys_addr_t offset, unsigned long size);
|
||||
|
||||
/*
|
||||
* The RISC-V ISA doesn't yet specify how to query or modify PMAs, so we can't
|
||||
* change the properties of memory regions. This should be fixed by the
|
||||
* upcoming platform spec.
|
||||
*/
|
||||
#define ioremap_nocache(addr, size) ioremap((addr), (size))
|
||||
#define ioremap_wc(addr, size) ioremap((addr), (size))
|
||||
#define ioremap_wt(addr, size) ioremap((addr), (size))
|
||||
|
||||
void iounmap(volatile void __iomem *addr);
|
||||
#else
|
||||
#ifndef CONFIG_MMU
|
||||
#define pgprot_noncached(x) (x)
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
|
@ -62,6 +62,12 @@
|
||||
|
||||
#define PAGE_TABLE __pgprot(_PAGE_TABLE)
|
||||
|
||||
/*
|
||||
* The RISC-V ISA doesn't yet specify how to query or modify PMAs, so we can't
|
||||
* change the properties of memory regions.
|
||||
*/
|
||||
#define _PAGE_IOREMAP _PAGE_KERNEL
|
||||
|
||||
extern pgd_t swapper_pg_dir[];
|
||||
|
||||
/* MAP_PRIVATE permissions: xwr (copy-on-write) */
|
||||
|
@ -7,7 +7,7 @@ endif
|
||||
|
||||
obj-y += init.o
|
||||
obj-y += extable.o
|
||||
obj-$(CONFIG_MMU) += fault.o ioremap.o
|
||||
obj-$(CONFIG_MMU) += fault.o
|
||||
obj-y += cacheflush.o
|
||||
obj-y += context.o
|
||||
obj-y += sifive_l2_cache.o
|
||||
|
@ -1,84 +0,0 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* (C) Copyright 1995 1996 Linus Torvalds
|
||||
* (C) Copyright 2012 Regents of the University of California
|
||||
*/
|
||||
|
||||
#include <linux/export.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/io.h>
|
||||
|
||||
#include <asm/pgtable.h>
|
||||
|
||||
/*
|
||||
* Remap an arbitrary physical address space into the kernel virtual
|
||||
* address space. Needed when the kernel wants to access high addresses
|
||||
* directly.
|
||||
*
|
||||
* NOTE! We need to allow non-page-aligned mappings too: we will obviously
|
||||
* have to convert them into an offset in a page-aligned mapping, but the
|
||||
* caller shouldn't need to know that small detail.
|
||||
*/
|
||||
static void __iomem *__ioremap_caller(phys_addr_t addr, size_t size,
|
||||
pgprot_t prot, void *caller)
|
||||
{
|
||||
phys_addr_t last_addr;
|
||||
unsigned long offset, vaddr;
|
||||
struct vm_struct *area;
|
||||
|
||||
/* Disallow wrap-around or zero size */
|
||||
last_addr = addr + size - 1;
|
||||
if (!size || last_addr < addr)
|
||||
return NULL;
|
||||
|
||||
/* Page-align mappings */
|
||||
offset = addr & (~PAGE_MASK);
|
||||
addr -= offset;
|
||||
size = PAGE_ALIGN(size + offset);
|
||||
|
||||
area = get_vm_area_caller(size, VM_IOREMAP, caller);
|
||||
if (!area)
|
||||
return NULL;
|
||||
vaddr = (unsigned long)area->addr;
|
||||
|
||||
if (ioremap_page_range(vaddr, vaddr + size, addr, prot)) {
|
||||
free_vm_area(area);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return (void __iomem *)(vaddr + offset);
|
||||
}
|
||||
|
||||
/*
|
||||
* ioremap - map bus memory into CPU space
|
||||
* @offset: bus address of the memory
|
||||
* @size: size of the resource to map
|
||||
*
|
||||
* ioremap performs a platform specific sequence of operations to
|
||||
* make bus memory CPU accessible via the readb/readw/readl/writeb/
|
||||
* writew/writel functions and the other mmio helpers. The returned
|
||||
* address is not guaranteed to be usable directly as a virtual
|
||||
* address.
|
||||
*
|
||||
* Must be freed with iounmap.
|
||||
*/
|
||||
void __iomem *ioremap(phys_addr_t offset, unsigned long size)
|
||||
{
|
||||
return __ioremap_caller(offset, size, PAGE_KERNEL,
|
||||
__builtin_return_address(0));
|
||||
}
|
||||
EXPORT_SYMBOL(ioremap);
|
||||
|
||||
|
||||
/**
|
||||
* iounmap - Free a IO remapping
|
||||
* @addr: virtual address from ioremap_*
|
||||
*
|
||||
* Caller must ensure there is only one unmapping for the same pointer.
|
||||
*/
|
||||
void iounmap(volatile void __iomem *addr)
|
||||
{
|
||||
vunmap((void *)((unsigned long)addr & PAGE_MASK));
|
||||
}
|
||||
EXPORT_SYMBOL(iounmap);
|
@ -26,10 +26,6 @@ void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr);
|
||||
|
||||
#define IO_SPACE_LIMIT 0
|
||||
|
||||
#define ioremap_nocache(addr, size) ioremap(addr, size)
|
||||
#define ioremap_wc ioremap_nocache
|
||||
#define ioremap_wt ioremap_nocache
|
||||
|
||||
void __iomem *ioremap(unsigned long offset, unsigned long size);
|
||||
void iounmap(volatile void __iomem *addr);
|
||||
|
||||
|
@ -267,7 +267,7 @@ unsigned long long poke_real_address_q(unsigned long long addr,
|
||||
#ifdef CONFIG_MMU
|
||||
void __iomem *__ioremap_caller(phys_addr_t offset, unsigned long size,
|
||||
pgprot_t prot, void *caller);
|
||||
void __iounmap(void __iomem *addr);
|
||||
void iounmap(void __iomem *addr);
|
||||
|
||||
static inline void __iomem *
|
||||
__ioremap(phys_addr_t offset, unsigned long size, pgprot_t prot)
|
||||
@ -328,7 +328,7 @@ __ioremap_mode(phys_addr_t offset, unsigned long size, pgprot_t prot)
|
||||
#else
|
||||
#define __ioremap(offset, size, prot) ((void __iomem *)(offset))
|
||||
#define __ioremap_mode(offset, size, prot) ((void __iomem *)(offset))
|
||||
#define __iounmap(addr) do { } while (0)
|
||||
#define iounmap(addr) do { } while (0)
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
static inline void __iomem *ioremap(phys_addr_t offset, unsigned long size)
|
||||
@ -370,11 +370,6 @@ static inline int iounmap_fixed(void __iomem *addr) { return -EINVAL; }
|
||||
#define ioremap_nocache ioremap
|
||||
#define ioremap_uc ioremap
|
||||
|
||||
static inline void iounmap(void __iomem *addr)
|
||||
{
|
||||
__iounmap(addr);
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
|
||||
* access
|
||||
|
@ -103,7 +103,7 @@ static inline int iomapping_nontranslatable(unsigned long offset)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void __iounmap(void __iomem *addr)
|
||||
void iounmap(void __iomem *addr)
|
||||
{
|
||||
unsigned long vaddr = (unsigned long __force)addr;
|
||||
struct vm_struct *p;
|
||||
@ -134,4 +134,4 @@ void __iounmap(void __iomem *addr)
|
||||
|
||||
kfree(p);
|
||||
}
|
||||
EXPORT_SYMBOL(__iounmap);
|
||||
EXPORT_SYMBOL(iounmap);
|
||||
|
@ -127,6 +127,7 @@ static inline void sbus_memcpy_toio(volatile void __iomem *dst,
|
||||
* Bus number may be embedded in the higher bits of the physical address.
|
||||
* This is why we have no bus number argument to ioremap().
|
||||
*/
|
||||
void __iomem *ioremap(phys_addr_t offset, size_t size);
|
||||
void iounmap(volatile void __iomem *addr);
|
||||
/* Create a virtual mapping cookie for an IO port range */
|
||||
void __iomem *ioport_map(unsigned long port, unsigned int nr);
|
||||
|
@ -18,10 +18,9 @@
|
||||
#include <asm-generic/io.h>
|
||||
|
||||
/*
|
||||
* __uc32_ioremap and __uc32_ioremap_cached takes CPU physical address.
|
||||
* __uc32_ioremap takes CPU physical address.
|
||||
*/
|
||||
extern void __iomem *__uc32_ioremap(unsigned long, size_t);
|
||||
extern void __iomem *__uc32_ioremap_cached(unsigned long, size_t);
|
||||
extern void __uc32_iounmap(volatile void __iomem *addr);
|
||||
|
||||
/*
|
||||
@ -32,7 +31,6 @@ extern void __uc32_iounmap(volatile void __iomem *addr);
|
||||
*
|
||||
*/
|
||||
#define ioremap(cookie, size) __uc32_ioremap(cookie, size)
|
||||
#define ioremap_cached(cookie, size) __uc32_ioremap_cached(cookie, size)
|
||||
#define ioremap_nocache(cookie, size) __uc32_ioremap(cookie, size)
|
||||
#define iounmap(cookie) __uc32_iounmap(cookie)
|
||||
|
||||
|
@ -220,14 +220,6 @@ __uc32_ioremap(unsigned long phys_addr, size_t size)
|
||||
}
|
||||
EXPORT_SYMBOL(__uc32_ioremap);
|
||||
|
||||
void __iomem *
|
||||
__uc32_ioremap_cached(unsigned long phys_addr, size_t size)
|
||||
{
|
||||
return __uc32_ioremap_caller(phys_addr, size, MT_DEVICE_CACHED,
|
||||
__builtin_return_address(0));
|
||||
}
|
||||
EXPORT_SYMBOL(__uc32_ioremap_cached);
|
||||
|
||||
void __uc32_iounmap(volatile void __iomem *io_addr)
|
||||
{
|
||||
void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
|
||||
|
@ -180,8 +180,6 @@ static inline unsigned int isa_virt_to_bus(volatile void *address)
|
||||
* The default ioremap() behavior is non-cached; if you need something
|
||||
* else, you probably want one of the following.
|
||||
*/
|
||||
extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
|
||||
#define ioremap_nocache ioremap_nocache
|
||||
extern void __iomem *ioremap_uc(resource_size_t offset, unsigned long size);
|
||||
#define ioremap_uc ioremap_uc
|
||||
extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
|
||||
@ -205,10 +203,7 @@ extern void __iomem *ioremap_encrypted(resource_size_t phys_addr, unsigned long
|
||||
* If the area you are trying to map is a PCI BAR you should have a
|
||||
* look at pci_iomap().
|
||||
*/
|
||||
static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
|
||||
{
|
||||
return ioremap_nocache(offset, size);
|
||||
}
|
||||
void __iomem *ioremap(resource_size_t offset, unsigned long size);
|
||||
#define ioremap ioremap
|
||||
|
||||
extern void iounmap(volatile void __iomem *addr);
|
||||
|
@ -280,11 +280,11 @@ err_free_memtype:
|
||||
}
|
||||
|
||||
/**
|
||||
* ioremap_nocache - map bus memory into CPU space
|
||||
* ioremap - map bus memory into CPU space
|
||||
* @phys_addr: bus address of the memory
|
||||
* @size: size of the resource to map
|
||||
*
|
||||
* ioremap_nocache performs a platform specific sequence of operations to
|
||||
* ioremap performs a platform specific sequence of operations to
|
||||
* make bus memory CPU accessible via the readb/readw/readl/writeb/
|
||||
* writew/writel functions and the other mmio helpers. The returned
|
||||
* address is not guaranteed to be usable directly as a virtual
|
||||
@ -300,7 +300,7 @@ err_free_memtype:
|
||||
*
|
||||
* Must be freed with iounmap.
|
||||
*/
|
||||
void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
|
||||
void __iomem *ioremap(resource_size_t phys_addr, unsigned long size)
|
||||
{
|
||||
/*
|
||||
* Ideally, this should be:
|
||||
@ -315,7 +315,7 @@ void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
|
||||
return __ioremap_caller(phys_addr, size, pcm,
|
||||
__builtin_return_address(0), false);
|
||||
}
|
||||
EXPORT_SYMBOL(ioremap_nocache);
|
||||
EXPORT_SYMBOL(ioremap);
|
||||
|
||||
/**
|
||||
* ioremap_uc - map bus memory into CPU space as strongly uncachable
|
||||
|
@ -1784,7 +1784,7 @@ static inline int cpa_clear_pages_array(struct page **pages, int numpages,
|
||||
int _set_memory_uc(unsigned long addr, int numpages)
|
||||
{
|
||||
/*
|
||||
* for now UC MINUS. see comments in ioremap_nocache()
|
||||
* for now UC MINUS. see comments in ioremap()
|
||||
* If you really need strong UC use ioremap_uc(), but note
|
||||
* that you cannot override IO areas with set_memory_*() as
|
||||
* these helpers cannot work with IO memory.
|
||||
@ -1799,7 +1799,7 @@ int set_memory_uc(unsigned long addr, int numpages)
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* for now UC MINUS. see comments in ioremap_nocache()
|
||||
* for now UC MINUS. see comments in ioremap()
|
||||
*/
|
||||
ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
|
||||
_PAGE_CACHE_MODE_UC_MINUS, NULL);
|
||||
|
@ -32,8 +32,7 @@ void xtensa_iounmap(volatile void __iomem *addr);
|
||||
/*
|
||||
* Return the virtual address for the specified bus memory.
|
||||
*/
|
||||
static inline void __iomem *ioremap_nocache(unsigned long offset,
|
||||
unsigned long size)
|
||||
static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
|
||||
{
|
||||
if (offset >= XCHAL_KIO_PADDR
|
||||
&& offset - XCHAL_KIO_PADDR < XCHAL_KIO_SIZE)
|
||||
@ -52,15 +51,6 @@ static inline void __iomem *ioremap_cache(unsigned long offset,
|
||||
return xtensa_ioremap_cache(offset, size);
|
||||
}
|
||||
#define ioremap_cache ioremap_cache
|
||||
#define ioremap_nocache ioremap_nocache
|
||||
|
||||
#define ioremap_wc ioremap_nocache
|
||||
#define ioremap_wt ioremap_nocache
|
||||
|
||||
static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
|
||||
{
|
||||
return ioremap_nocache(offset, size);
|
||||
}
|
||||
|
||||
static inline void iounmap(volatile void __iomem *addr)
|
||||
{
|
||||
|
@ -922,39 +922,17 @@ static inline void *phys_to_virt(unsigned long address)
|
||||
/**
|
||||
* DOC: ioremap() and ioremap_*() variants
|
||||
*
|
||||
* If you have an IOMMU your architecture is expected to have both ioremap()
|
||||
* and iounmap() implemented otherwise the asm-generic helpers will provide a
|
||||
* direct mapping.
|
||||
* Architectures with an MMU are expected to provide ioremap() and iounmap()
|
||||
* themselves or rely on GENERIC_IOREMAP. For NOMMU architectures we provide
|
||||
* a default nop-op implementation that expect that the physical address used
|
||||
* for MMIO are already marked as uncached, and can be used as kernel virtual
|
||||
* addresses.
|
||||
*
|
||||
* There are ioremap_*() call variants, if you have no IOMMU we naturally will
|
||||
* default to direct mapping for all of them, you can override these defaults.
|
||||
* If you have an IOMMU you are highly encouraged to provide your own
|
||||
* ioremap variant implementation as there currently is no safe architecture
|
||||
* agnostic default. To avoid possible improper behaviour default asm-generic
|
||||
* ioremap_*() variants all return NULL when an IOMMU is available. If you've
|
||||
* defined your own ioremap_*() variant you must then declare your own
|
||||
* ioremap_*() variant as defined to itself to avoid the default NULL return.
|
||||
* ioremap_wc() and ioremap_wt() can provide more relaxed caching attributes
|
||||
* for specific drivers if the architecture choses to implement them. If they
|
||||
* are not implemented we fall back to plain ioremap.
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
|
||||
#ifndef ioremap_uc
|
||||
#define ioremap_uc ioremap_uc
|
||||
static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
#else /* !CONFIG_MMU */
|
||||
|
||||
/*
|
||||
* Change "struct page" to physical address.
|
||||
*
|
||||
* This implementation is for the no-MMU case only... if you have an MMU
|
||||
* you'll need to provide your own definitions.
|
||||
*/
|
||||
|
||||
#ifndef CONFIG_MMU
|
||||
#ifndef ioremap
|
||||
#define ioremap ioremap
|
||||
static inline void __iomem *ioremap(phys_addr_t offset, size_t size)
|
||||
@ -965,42 +943,47 @@ static inline void __iomem *ioremap(phys_addr_t offset, size_t size)
|
||||
|
||||
#ifndef iounmap
|
||||
#define iounmap iounmap
|
||||
|
||||
static inline void iounmap(void __iomem *addr)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
#endif /* CONFIG_MMU */
|
||||
#ifndef ioremap_nocache
|
||||
void __iomem *ioremap(phys_addr_t phys_addr, size_t size);
|
||||
#define ioremap_nocache ioremap_nocache
|
||||
static inline void __iomem *ioremap_nocache(phys_addr_t offset, size_t size)
|
||||
#elif defined(CONFIG_GENERIC_IOREMAP)
|
||||
#include <asm/pgtable.h>
|
||||
|
||||
void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot);
|
||||
void iounmap(volatile void __iomem *addr);
|
||||
|
||||
static inline void __iomem *ioremap(phys_addr_t addr, size_t size)
|
||||
{
|
||||
return ioremap(offset, size);
|
||||
/* _PAGE_IOREMAP needs to be supplied by the architecture */
|
||||
return ioremap_prot(addr, size, _PAGE_IOREMAP);
|
||||
}
|
||||
#endif /* !CONFIG_MMU || CONFIG_GENERIC_IOREMAP */
|
||||
|
||||
#ifndef ioremap_nocache
|
||||
#define ioremap_nocache ioremap
|
||||
#endif
|
||||
|
||||
#ifndef ioremap_wc
|
||||
#define ioremap_wc ioremap
|
||||
#endif
|
||||
|
||||
#ifndef ioremap_wt
|
||||
#define ioremap_wt ioremap
|
||||
#endif
|
||||
|
||||
/*
|
||||
* ioremap_uc is special in that we do require an explicit architecture
|
||||
* implementation. In general you do not want to use this function in a
|
||||
* driver and use plain ioremap, which is uncached by default. Similarly
|
||||
* architectures should not implement it unless they have a very good
|
||||
* reason.
|
||||
*/
|
||||
#ifndef ioremap_uc
|
||||
#define ioremap_uc ioremap_uc
|
||||
static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size)
|
||||
{
|
||||
return ioremap_nocache(offset, size);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef ioremap_wc
|
||||
#define ioremap_wc ioremap_wc
|
||||
static inline void __iomem *ioremap_wc(phys_addr_t offset, size_t size)
|
||||
{
|
||||
return ioremap_nocache(offset, size);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef ioremap_wt
|
||||
#define ioremap_wt ioremap_wt
|
||||
static inline void __iomem *ioremap_wt(phys_addr_t offset, size_t size)
|
||||
{
|
||||
return ioremap_nocache(offset, size);
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -640,6 +640,9 @@ config STRING_SELFTEST
|
||||
|
||||
endmenu
|
||||
|
||||
config GENERIC_IOREMAP
|
||||
bool
|
||||
|
||||
config GENERIC_LIB_ASHLDI3
|
||||
bool
|
||||
|
||||
|
@ -231,3 +231,42 @@ int ioremap_page_range(unsigned long addr,
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_GENERIC_IOREMAP
|
||||
void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot)
|
||||
{
|
||||
unsigned long offset, vaddr;
|
||||
phys_addr_t last_addr;
|
||||
struct vm_struct *area;
|
||||
|
||||
/* Disallow wrap-around or zero size */
|
||||
last_addr = addr + size - 1;
|
||||
if (!size || last_addr < addr)
|
||||
return NULL;
|
||||
|
||||
/* Page-align mappings */
|
||||
offset = addr & (~PAGE_MASK);
|
||||
addr -= offset;
|
||||
size = PAGE_ALIGN(size + offset);
|
||||
|
||||
area = get_vm_area_caller(size, VM_IOREMAP,
|
||||
__builtin_return_address(0));
|
||||
if (!area)
|
||||
return NULL;
|
||||
vaddr = (unsigned long)area->addr;
|
||||
|
||||
if (ioremap_page_range(vaddr, vaddr + size, addr, __pgprot(prot))) {
|
||||
free_vm_area(area);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return (void __iomem *)(vaddr + offset);
|
||||
}
|
||||
EXPORT_SYMBOL(ioremap_prot);
|
||||
|
||||
void iounmap(volatile void __iomem *addr)
|
||||
{
|
||||
vunmap((void *)((unsigned long)addr & PAGE_MASK));
|
||||
}
|
||||
EXPORT_SYMBOL(iounmap);
|
||||
#endif /* CONFIG_GENERIC_IOREMAP */
|
||||
|
Loading…
Reference in New Issue
Block a user