forked from Minki/linux
ARM: Add caller information to ioremap
This allows the procfs vmallocinfo file to show who created the ioremap regions. Note: __builtin_return_address(0) doesn't do what's expected if its used in an inline function, so we leave __arm_ioremap callers in such places alone. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
This commit is contained in:
parent
7284ce6c9f
commit
31aa8fd6fd
@ -69,9 +69,16 @@ extern void __raw_readsl(const void __iomem *addr, void *data, int longlen);
|
||||
/*
|
||||
* __arm_ioremap takes CPU physical address.
|
||||
* __arm_ioremap_pfn takes a Page Frame Number and an offset into that page
|
||||
* The _caller variety takes a __builtin_return_address(0) value for
|
||||
* /proc/vmalloc to use - and should only be used in non-inline functions.
|
||||
*/
|
||||
extern void __iomem * __arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int);
|
||||
extern void __iomem * __arm_ioremap(unsigned long, size_t, unsigned int);
|
||||
extern void __iomem *__arm_ioremap_pfn_caller(unsigned long, unsigned long,
|
||||
size_t, unsigned int, void *);
|
||||
extern void __iomem *__arm_ioremap_caller(unsigned long, size_t, unsigned int,
|
||||
void *);
|
||||
|
||||
extern void __iomem *__arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int);
|
||||
extern void __iomem *__arm_ioremap(unsigned long, size_t, unsigned int);
|
||||
extern void __iounmap(volatile void __iomem *addr);
|
||||
|
||||
/*
|
||||
|
@ -24,7 +24,7 @@ void __iomem *davinci_ioremap(unsigned long p, size_t size, unsigned int type)
|
||||
if (BETWEEN(p, IO_PHYS, IO_SIZE))
|
||||
return XLATE(p, IO_PHYS, IO_VIRT);
|
||||
|
||||
return __arm_ioremap(p, size, type);
|
||||
return __arm_ioremap_caller(p, size, type, __builtin_return_address(0));
|
||||
}
|
||||
EXPORT_SYMBOL(davinci_ioremap);
|
||||
|
||||
|
@ -61,9 +61,9 @@ void * __iomem __iop13xx_ioremap(unsigned long cookie, size_t size,
|
||||
(cookie - IOP13XX_PCIE_LOWER_MEM_RA));
|
||||
break;
|
||||
case IOP13XX_PBI_LOWER_MEM_RA ... IOP13XX_PBI_UPPER_MEM_RA:
|
||||
retval = __arm_ioremap(IOP13XX_PBI_LOWER_MEM_PA +
|
||||
retval = __arm_ioremap_caller(IOP13XX_PBI_LOWER_MEM_PA +
|
||||
(cookie - IOP13XX_PBI_LOWER_MEM_RA),
|
||||
size, mtype);
|
||||
size, mtype, __builtin_return_address(0));
|
||||
break;
|
||||
case IOP13XX_PCIE_LOWER_IO_PA ... IOP13XX_PCIE_UPPER_IO_PA:
|
||||
retval = (void *) IOP13XX_PCIE_IO_PHYS_TO_VIRT(cookie);
|
||||
@ -75,7 +75,8 @@ void * __iomem __iop13xx_ioremap(unsigned long cookie, size_t size,
|
||||
retval = (void *) IOP13XX_PMMR_PHYS_TO_VIRT(cookie);
|
||||
break;
|
||||
default:
|
||||
retval = __arm_ioremap(cookie, size, mtype);
|
||||
retval = __arm_ioremap_caller(cookie, size, mtype,
|
||||
__builtin_return_address(0));
|
||||
}
|
||||
|
||||
return retval;
|
||||
|
@ -76,5 +76,6 @@ __msm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
|
||||
mtype = MT_DEVICE_NONSHARED;
|
||||
}
|
||||
|
||||
return __arm_ioremap(phys_addr, size, mtype);
|
||||
return __arm_ioremap_caller(phys_addr, size, mtype,
|
||||
__builtin_return_address(0));
|
||||
}
|
||||
|
@ -139,8 +139,8 @@ void __check_kvm_seq(struct mm_struct *mm)
|
||||
* which requires the new ioremap'd region to be referenced, the CPU will
|
||||
* reference the _old_ region.
|
||||
*
|
||||
* Note that get_vm_area() allocates a guard 4K page, so we need to mask
|
||||
* the size back to 1MB aligned or we will overflow in the loop below.
|
||||
* Note that get_vm_area_caller() allocates a guard 4K page, so we need to
|
||||
* mask the size back to 1MB aligned or we will overflow in the loop below.
|
||||
*/
|
||||
static void unmap_area_sections(unsigned long virt, unsigned long size)
|
||||
{
|
||||
@ -254,22 +254,8 @@ remap_area_supersections(unsigned long virt, unsigned long pfn,
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* Remap an arbitrary physical address space into the kernel virtual
|
||||
* address space. Needed when the kernel wants to access high addresses
|
||||
* directly.
|
||||
*
|
||||
* NOTE! We need to allow non-page-aligned mappings too: we will obviously
|
||||
* have to convert them into an offset in a page-aligned mapping, but the
|
||||
* caller shouldn't need to know that small detail.
|
||||
*
|
||||
* 'flags' are the extra L_PTE_ flags that you want to specify for this
|
||||
* mapping. See <asm/pgtable.h> for more information.
|
||||
*/
|
||||
void __iomem *
|
||||
__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
|
||||
unsigned int mtype)
|
||||
void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
|
||||
unsigned long offset, size_t size, unsigned int mtype, void *caller)
|
||||
{
|
||||
const struct mem_type *type;
|
||||
int err;
|
||||
@ -291,7 +277,7 @@ __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
|
||||
*/
|
||||
size = PAGE_ALIGN(offset + size);
|
||||
|
||||
area = get_vm_area(size, VM_IOREMAP);
|
||||
area = get_vm_area_caller(size, VM_IOREMAP, caller);
|
||||
if (!area)
|
||||
return NULL;
|
||||
addr = (unsigned long)area->addr;
|
||||
@ -318,10 +304,9 @@ __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
|
||||
flush_cache_vmap(addr, addr + size);
|
||||
return (void __iomem *) (offset + addr);
|
||||
}
|
||||
EXPORT_SYMBOL(__arm_ioremap_pfn);
|
||||
|
||||
void __iomem *
|
||||
__arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
|
||||
void __iomem *__arm_ioremap_caller(unsigned long phys_addr, size_t size,
|
||||
unsigned int mtype, void *caller)
|
||||
{
|
||||
unsigned long last_addr;
|
||||
unsigned long offset = phys_addr & ~PAGE_MASK;
|
||||
@ -334,7 +319,33 @@ __arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
|
||||
if (!size || last_addr < phys_addr)
|
||||
return NULL;
|
||||
|
||||
return __arm_ioremap_pfn(pfn, offset, size, mtype);
|
||||
return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
|
||||
caller);
|
||||
}
|
||||
|
||||
/*
|
||||
* Remap an arbitrary physical address space into the kernel virtual
|
||||
* address space. Needed when the kernel wants to access high addresses
|
||||
* directly.
|
||||
*
|
||||
* NOTE! We need to allow non-page-aligned mappings too: we will obviously
|
||||
* have to convert them into an offset in a page-aligned mapping, but the
|
||||
* caller shouldn't need to know that small detail.
|
||||
*/
|
||||
void __iomem *
|
||||
__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
|
||||
unsigned int mtype)
|
||||
{
|
||||
return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
|
||||
__builtin_return_address(0));
|
||||
}
|
||||
EXPORT_SYMBOL(__arm_ioremap_pfn);
|
||||
|
||||
void __iomem *
|
||||
__arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
|
||||
{
|
||||
return __arm_ioremap_caller(phys_addr, size, mtype,
|
||||
__builtin_return_address(0));
|
||||
}
|
||||
EXPORT_SYMBOL(__arm_ioremap);
|
||||
|
||||
|
@ -74,6 +74,12 @@ void __iomem *__arm_ioremap_pfn(unsigned long pfn, unsigned long offset,
|
||||
}
|
||||
EXPORT_SYMBOL(__arm_ioremap_pfn);
|
||||
|
||||
void __iomem *__arm_ioremap_pfn_caller(unsigned long pfn, unsigned long offset,
|
||||
size_t size, unsigned int mtype, void *caller)
|
||||
{
|
||||
return __arm_ioremap_pfn(pfn, offset, size, mtype);
|
||||
}
|
||||
|
||||
void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size,
|
||||
unsigned int mtype)
|
||||
{
|
||||
@ -81,6 +87,12 @@ void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size,
|
||||
}
|
||||
EXPORT_SYMBOL(__arm_ioremap);
|
||||
|
||||
void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size,
|
||||
unsigned int mtype, void *caller)
|
||||
{
|
||||
return __arm_ioremap(phys_addr, size, mtype);
|
||||
}
|
||||
|
||||
void __iounmap(volatile void __iomem *addr)
|
||||
{
|
||||
}
|
||||
|
@ -34,7 +34,8 @@ void * __iomem __iop3xx_ioremap(unsigned long cookie, size_t size,
|
||||
retval = (void *) IOP3XX_PMMR_PHYS_TO_VIRT(cookie);
|
||||
break;
|
||||
default:
|
||||
retval = __arm_ioremap(cookie, size, mtype);
|
||||
retval = __arm_ioremap_caller(cookie, size, mtype,
|
||||
__builtin_return_address(0));
|
||||
}
|
||||
|
||||
return retval;
|
||||
|
@ -128,7 +128,7 @@ void __iomem *omap_ioremap(unsigned long p, size_t size, unsigned int type)
|
||||
return XLATE(p, L4_EMU_44XX_PHYS, L4_EMU_44XX_VIRT);
|
||||
}
|
||||
#endif
|
||||
return __arm_ioremap(p, size, type);
|
||||
return __arm_ioremap_caller(p, size, type, __builtin_return_address(0));
|
||||
}
|
||||
EXPORT_SYMBOL(omap_ioremap);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user