linux/drivers/of/of_reserved_mem.c

413 lines
10 KiB
C
Raw Normal View History

// SPDX-License-Identifier: GPL-2.0+
/*
* Device tree based initialization code for reserved memory.
*
* Copyright (c) 2013, 2015 The Linux Foundation. All Rights Reserved.
* Copyright (c) 2013,2014 Samsung Electronics Co., Ltd.
* http://www.samsung.com
* Author: Marek Szyprowski <m.szyprowski@samsung.com>
* Author: Josh Cartwright <joshc@codeaurora.org>
*/
#define pr_fmt(fmt) "OF: reserved mem: " fmt
#include <linux/err.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/of_platform.h>
#include <linux/mm.h>
#include <linux/sizes.h>
#include <linux/of_reserved_mem.h>
#include <linux/sort.h>
#include <linux/slab.h>
mm: remove CONFIG_HAVE_MEMBLOCK All architecures use memblock for early memory management. There is no need for the CONFIG_HAVE_MEMBLOCK configuration option. [rppt@linux.vnet.ibm.com: of/fdt: fixup #ifdefs] Link: http://lkml.kernel.org/r/20180919103457.GA20545@rapoport-lnx [rppt@linux.vnet.ibm.com: csky: fixups after bootmem removal] Link: http://lkml.kernel.org/r/20180926112744.GC4628@rapoport-lnx [rppt@linux.vnet.ibm.com: remove stale #else and the code it protects] Link: http://lkml.kernel.org/r/1538067825-24835-1-git-send-email-rppt@linux.vnet.ibm.com Link: http://lkml.kernel.org/r/1536927045-23536-4-git-send-email-rppt@linux.vnet.ibm.com Signed-off-by: Mike Rapoport <rppt@linux.vnet.ibm.com> Acked-by: Michal Hocko <mhocko@suse.com> Tested-by: Jonathan Cameron <jonathan.cameron@huawei.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Chris Zankel <chris@zankel.net> Cc: "David S. Miller" <davem@davemloft.net> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Greentime Hu <green.hu@gmail.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Guan Xuetao <gxt@pku.edu.cn> Cc: Ingo Molnar <mingo@redhat.com> Cc: "James E.J. Bottomley" <jejb@parisc-linux.org> Cc: Jonas Bonn <jonas@southpole.se> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Ley Foon Tan <lftan@altera.com> Cc: Mark Salter <msalter@redhat.com> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Matt Turner <mattst88@gmail.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Simek <monstr@monstr.eu> Cc: Palmer Dabbelt <palmer@sifive.com> Cc: Paul Burton <paul.burton@mips.com> Cc: Richard Kuo <rkuo@codeaurora.org> Cc: Richard Weinberger <richard@nod.at> Cc: Rich Felker <dalias@libc.org> Cc: Russell King <linux@armlinux.org.uk> Cc: Serge Semin <fancer.lancer@gmail.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tony Luck <tony.luck@intel.com> Cc: Vineet Gupta <vgupta@synopsys.com> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2018-10-30 22:07:44 +00:00
#include <linux/memblock.h>
drivers: of: increase MAX_RESERVED_REGIONS to 32 There are two types of memory reservations firmware can ask the kernel to make in the device tree: static and dynamic. See Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt If you have greater than 16 entries in /reserved-memory (as we do on POWER9 systems) you would get this scary looking error message: [ 0.000000] OF: reserved mem: not enough space all defined regions. This is harmless if all your reservations are static (which with OPAL on POWER9, they are). It is not harmless if you have any dynamic reservations after the 16th. In the first pass over the fdt to find reservations, the child nodes of /reserved-memory are added to a static array in of_reserved_mem.c so that memory can be reserved in a 2nd pass. The array has 16 entries. This is why, on my dual socket POWER9 system, I get that error 4 times with 20 static reservations. We don't have a problem on ppc though, as in arch/powerpc/kernel/prom.c we look at the new style /reserved-ranges property to do reservations, and this logic was introduced in 0962e8004e974 (well before any powernv system shipped). A Google search shows up no occurances of that exact error message, so we're probably safe in that no machine that people use has memory not being reserved when it should be. The simple fix is to bump the length of the array to 32 which "should be enough for everyone(TM)". The simple fix of not recording static allocations in the array would cause problems for devices with "memory-region" properties. A more future-proof fix is likely possible, although more invasive and this simple fix is perfectly suitable in the meantime while a more future-proof fix is developed. Signed-off-by: Stewart Smith <stewart@linux.vnet.ibm.com> Tested-by: Mauricio Faria de Oliveira <mauricfo@linux.vnet.ibm.com> Signed-off-by: Rob Herring <robh@kernel.org>
2017-09-26 08:40:00 +00:00
#define MAX_RESERVED_REGIONS 32
static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS];
static int reserved_mem_count;
static int __init early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap,
phys_addr_t *res_base)
{
phys_addr_t base;
memblock: drop __memblock_alloc_base() The __memblock_alloc_base() function tries to allocate a memory up to the limit specified by its max_addr parameter. Depending on the value of this parameter, the __memblock_alloc_base() can is replaced with the appropriate memblock_phys_alloc*() variant. Link: http://lkml.kernel.org/r/1548057848-15136-9-git-send-email-rppt@linux.ibm.com Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> Acked-by: Rob Herring <robh@kernel.org> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Christophe Leroy <christophe.leroy@c-s.fr> Cc: Christoph Hellwig <hch@lst.de> Cc: "David S. Miller" <davem@davemloft.net> Cc: Dennis Zhou <dennis@kernel.org> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Greentime Hu <green.hu@gmail.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Guan Xuetao <gxt@pku.edu.cn> Cc: Guo Ren <guoren@kernel.org> Cc: Guo Ren <ren_guo@c-sky.com> [c-sky] Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Juergen Gross <jgross@suse.com> [Xen] Cc: Mark Salter <msalter@redhat.com> Cc: Matt Turner <mattst88@gmail.com> Cc: Max Filippov <jcmvbkbc@gmail.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Simek <monstr@monstr.eu> Cc: Paul Burton <paul.burton@mips.com> Cc: Petr Mladek <pmladek@suse.com> Cc: Richard Weinberger <richard@nod.at> Cc: Rich Felker <dalias@libc.org> Cc: Rob Herring <robh+dt@kernel.org> Cc: Russell King <linux@armlinux.org.uk> Cc: Stafford Horne <shorne@gmail.com> Cc: Tony Luck <tony.luck@intel.com> Cc: Vineet Gupta <vgupta@synopsys.com> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2019-03-12 06:29:31 +00:00
end = !end ? MEMBLOCK_ALLOC_ANYWHERE : end;
memblock: stop using implicit alignment to SMP_CACHE_BYTES When a memblock allocation APIs are called with align = 0, the alignment is implicitly set to SMP_CACHE_BYTES. Implicit alignment is done deep in the memblock allocator and it can come as a surprise. Not that such an alignment would be wrong even when used incorrectly but it is better to be explicit for the sake of clarity and the prinicple of the least surprise. Replace all such uses of memblock APIs with the 'align' parameter explicitly set to SMP_CACHE_BYTES and stop implicit alignment assignment in the memblock internal allocation functions. For the case when memblock APIs are used via helper functions, e.g. like iommu_arena_new_node() in Alpha, the helper functions were detected with Coccinelle's help and then manually examined and updated where appropriate. The direct memblock APIs users were updated using the semantic patch below: @@ expression size, min_addr, max_addr, nid; @@ ( | - memblock_alloc_try_nid_raw(size, 0, min_addr, max_addr, nid) + memblock_alloc_try_nid_raw(size, SMP_CACHE_BYTES, min_addr, max_addr, nid) | - memblock_alloc_try_nid_nopanic(size, 0, min_addr, max_addr, nid) + memblock_alloc_try_nid_nopanic(size, SMP_CACHE_BYTES, min_addr, max_addr, nid) | - memblock_alloc_try_nid(size, 0, min_addr, max_addr, nid) + memblock_alloc_try_nid(size, SMP_CACHE_BYTES, min_addr, max_addr, nid) | - memblock_alloc(size, 0) + memblock_alloc(size, SMP_CACHE_BYTES) | - memblock_alloc_raw(size, 0) + memblock_alloc_raw(size, SMP_CACHE_BYTES) | - memblock_alloc_from(size, 0, min_addr) + memblock_alloc_from(size, SMP_CACHE_BYTES, min_addr) | - memblock_alloc_nopanic(size, 0) + memblock_alloc_nopanic(size, SMP_CACHE_BYTES) | - memblock_alloc_low(size, 0) + memblock_alloc_low(size, SMP_CACHE_BYTES) | - memblock_alloc_low_nopanic(size, 0) + memblock_alloc_low_nopanic(size, SMP_CACHE_BYTES) | - memblock_alloc_from_nopanic(size, 0, min_addr) + memblock_alloc_from_nopanic(size, SMP_CACHE_BYTES, min_addr) | - memblock_alloc_node(size, 0, nid) + memblock_alloc_node(size, SMP_CACHE_BYTES, nid) ) [mhocko@suse.com: changelog update] [akpm@linux-foundation.org: coding-style fixes] [rppt@linux.ibm.com: fix missed uses of implicit alignment] Link: http://lkml.kernel.org/r/20181016133656.GA10925@rapoport-lnx Link: http://lkml.kernel.org/r/1538687224-17535-1-git-send-email-rppt@linux.vnet.ibm.com Signed-off-by: Mike Rapoport <rppt@linux.vnet.ibm.com> Suggested-by: Michal Hocko <mhocko@suse.com> Acked-by: Paul Burton <paul.burton@mips.com> [MIPS] Acked-by: Michael Ellerman <mpe@ellerman.id.au> [powerpc] Acked-by: Michal Hocko <mhocko@suse.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Chris Zankel <chris@zankel.net> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Guan Xuetao <gxt@pku.edu.cn> Cc: Ingo Molnar <mingo@redhat.com> Cc: Matt Turner <mattst88@gmail.com> Cc: Michal Simek <monstr@monstr.eu> Cc: Richard Weinberger <richard@nod.at> Cc: Russell King <linux@armlinux.org.uk> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tony Luck <tony.luck@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2018-10-30 22:09:57 +00:00
align = !align ? SMP_CACHE_BYTES : align;
of: fix kmemleak crash caused by imbalance in early memory reservation Marc Gonzalez reported the following kmemleak crash: Unable to handle kernel paging request at virtual address ffffffc021e00000 Mem abort info: ESR = 0x96000006 Exception class = DABT (current EL), IL = 32 bits SET = 0, FnV = 0 EA = 0, S1PTW = 0 Data abort info: ISV = 0, ISS = 0x00000006 CM = 0, WnR = 0 swapper pgtable: 4k pages, 39-bit VAs, pgdp = (____ptrval____) [ffffffc021e00000] pgd=000000017e3ba803, pud=000000017e3ba803, pmd=0000000000000000 Internal error: Oops: 96000006 [#1] PREEMPT SMP Modules linked in: CPU: 6 PID: 523 Comm: kmemleak Tainted: G S W 5.0.0-rc1 #13 Hardware name: Qualcomm Technologies, Inc. MSM8998 v1 MTP (DT) pstate: 80000085 (Nzcv daIf -PAN -UAO) pc : scan_block+0x70/0x190 lr : scan_block+0x6c/0x190 Process kmemleak (pid: 523, stack limit = 0x(____ptrval____)) Call trace: scan_block+0x70/0x190 scan_gray_list+0x108/0x1c0 kmemleak_scan+0x33c/0x7c0 kmemleak_scan_thread+0x98/0xf0 kthread+0x11c/0x120 ret_from_fork+0x10/0x1c Code: f9000fb4 d503201f 97ffffd2 35000580 (f9400260) The crash happens when a no-map area is allocated in early_init_dt_alloc_reserved_memory_arch(). The allocated region is registered with kmemleak, but it is then removed from memblock using memblock_remove() that is not kmemleak-aware. Replacing memblock_phys_alloc_range() with memblock_find_in_range() makes sure that the allocated memory is not added to kmemleak and then memblock_remove()'ing this memory is safe. As a bonus, since memblock_find_in_range() ensures the allocation in the specified range, the bounds check can be removed. [rppt@linux.ibm.com: of: fix parameters order for call to memblock_find_in_range()] Link: http://lkml.kernel.org/r/20190221112619.GC32004@rapoport-lnx Link: http://lkml.kernel.org/r/20190213181921.GB15270@rapoport-lnx Fixes: 3f0c820664483 ("drivers: of: add initialization code for dynamic reserved memory") Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> Acked-by: Marek Szyprowski <m.szyprowski@samsung.com> Acked-by: Prateek Patel <prpatel@nvidia.com> Tested-by: Marc Gonzalez <marc.w.gonzalez@free.fr> Cc: Rob Herring <robh+dt@kernel.org> Cc: Frank Rowand <frowand.list@gmail.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2019-03-12 06:30:58 +00:00
base = memblock_find_in_range(start, end, size, align);
if (!base)
return -ENOMEM;
*res_base = base;
if (nomap)
return memblock_remove(base, size);
of: fix kmemleak crash caused by imbalance in early memory reservation Marc Gonzalez reported the following kmemleak crash: Unable to handle kernel paging request at virtual address ffffffc021e00000 Mem abort info: ESR = 0x96000006 Exception class = DABT (current EL), IL = 32 bits SET = 0, FnV = 0 EA = 0, S1PTW = 0 Data abort info: ISV = 0, ISS = 0x00000006 CM = 0, WnR = 0 swapper pgtable: 4k pages, 39-bit VAs, pgdp = (____ptrval____) [ffffffc021e00000] pgd=000000017e3ba803, pud=000000017e3ba803, pmd=0000000000000000 Internal error: Oops: 96000006 [#1] PREEMPT SMP Modules linked in: CPU: 6 PID: 523 Comm: kmemleak Tainted: G S W 5.0.0-rc1 #13 Hardware name: Qualcomm Technologies, Inc. MSM8998 v1 MTP (DT) pstate: 80000085 (Nzcv daIf -PAN -UAO) pc : scan_block+0x70/0x190 lr : scan_block+0x6c/0x190 Process kmemleak (pid: 523, stack limit = 0x(____ptrval____)) Call trace: scan_block+0x70/0x190 scan_gray_list+0x108/0x1c0 kmemleak_scan+0x33c/0x7c0 kmemleak_scan_thread+0x98/0xf0 kthread+0x11c/0x120 ret_from_fork+0x10/0x1c Code: f9000fb4 d503201f 97ffffd2 35000580 (f9400260) The crash happens when a no-map area is allocated in early_init_dt_alloc_reserved_memory_arch(). The allocated region is registered with kmemleak, but it is then removed from memblock using memblock_remove() that is not kmemleak-aware. Replacing memblock_phys_alloc_range() with memblock_find_in_range() makes sure that the allocated memory is not added to kmemleak and then memblock_remove()'ing this memory is safe. As a bonus, since memblock_find_in_range() ensures the allocation in the specified range, the bounds check can be removed. [rppt@linux.ibm.com: of: fix parameters order for call to memblock_find_in_range()] Link: http://lkml.kernel.org/r/20190221112619.GC32004@rapoport-lnx Link: http://lkml.kernel.org/r/20190213181921.GB15270@rapoport-lnx Fixes: 3f0c820664483 ("drivers: of: add initialization code for dynamic reserved memory") Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> Acked-by: Marek Szyprowski <m.szyprowski@samsung.com> Acked-by: Prateek Patel <prpatel@nvidia.com> Tested-by: Marc Gonzalez <marc.w.gonzalez@free.fr> Cc: Rob Herring <robh+dt@kernel.org> Cc: Frank Rowand <frowand.list@gmail.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2019-03-12 06:30:58 +00:00
return memblock_reserve(base, size);
}
/**
* res_mem_save_node() - save fdt node for second pass initialization
*/
void __init fdt_reserved_mem_save_node(unsigned long node, const char *uname,
phys_addr_t base, phys_addr_t size)
{
struct reserved_mem *rmem = &reserved_mem[reserved_mem_count];
if (reserved_mem_count == ARRAY_SIZE(reserved_mem)) {
pr_err("not enough space all defined regions.\n");
return;
}
rmem->fdt_node = node;
rmem->name = uname;
rmem->base = base;
rmem->size = size;
reserved_mem_count++;
return;
}
/**
* res_mem_alloc_size() - allocate reserved memory described by 'size', 'align'
* and 'alloc-ranges' properties
*/
static int __init __reserved_mem_alloc_size(unsigned long node,
const char *uname, phys_addr_t *res_base, phys_addr_t *res_size)
{
int t_len = (dt_root_addr_cells + dt_root_size_cells) * sizeof(__be32);
phys_addr_t start = 0, end = 0;
phys_addr_t base = 0, align = 0, size;
int len;
const __be32 *prop;
int nomap;
int ret;
prop = of_get_flat_dt_prop(node, "size", &len);
if (!prop)
return -EINVAL;
if (len != dt_root_size_cells * sizeof(__be32)) {
pr_err("invalid size property in '%s' node.\n", uname);
return -EINVAL;
}
size = dt_mem_next_cell(dt_root_size_cells, &prop);
nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
prop = of_get_flat_dt_prop(node, "alignment", &len);
if (prop) {
if (len != dt_root_addr_cells * sizeof(__be32)) {
pr_err("invalid alignment property in '%s' node.\n",
uname);
return -EINVAL;
}
align = dt_mem_next_cell(dt_root_addr_cells, &prop);
}
/* Need adjust the alignment to satisfy the CMA requirement */
if (IS_ENABLED(CONFIG_CMA)
&& of_flat_dt_is_compatible(node, "shared-dma-pool")
&& of_get_flat_dt_prop(node, "reusable", NULL)
&& !of_get_flat_dt_prop(node, "no-map", NULL)) {
unsigned long order =
max_t(unsigned long, MAX_ORDER - 1, pageblock_order);
align = max(align, (phys_addr_t)PAGE_SIZE << order);
}
prop = of_get_flat_dt_prop(node, "alloc-ranges", &len);
if (prop) {
if (len % t_len != 0) {
pr_err("invalid alloc-ranges property in '%s', skipping node.\n",
uname);
return -EINVAL;
}
base = 0;
while (len > 0) {
start = dt_mem_next_cell(dt_root_addr_cells, &prop);
end = start + dt_mem_next_cell(dt_root_size_cells,
&prop);
ret = early_init_dt_alloc_reserved_memory_arch(size,
align, start, end, nomap, &base);
if (ret == 0) {
pr_debug("allocated memory for '%s' node: base %pa, size %ld MiB\n",
uname, &base,
(unsigned long)size / SZ_1M);
break;
}
len -= t_len;
}
} else {
ret = early_init_dt_alloc_reserved_memory_arch(size, align,
0, 0, nomap, &base);
if (ret == 0)
pr_debug("allocated memory for '%s' node: base %pa, size %ld MiB\n",
uname, &base, (unsigned long)size / SZ_1M);
}
if (base == 0) {
pr_info("failed to allocate memory for node '%s'\n", uname);
return -ENOMEM;
}
*res_base = base;
*res_size = size;
return 0;
}
static const struct of_device_id __rmem_of_table_sentinel
__used __section(__reservedmem_of_table_end);
/**
* res_mem_init_node() - call region specific reserved memory init code
*/
static int __init __reserved_mem_init_node(struct reserved_mem *rmem)
{
extern const struct of_device_id __reservedmem_of_table[];
const struct of_device_id *i;
int ret = -ENOENT;
for (i = __reservedmem_of_table; i < &__rmem_of_table_sentinel; i++) {
reservedmem_of_init_fn initfn = i->data;
const char *compat = i->compatible;
if (!of_flat_dt_is_compatible(rmem->fdt_node, compat))
continue;
ret = initfn(rmem);
if (ret == 0) {
pr_info("initialized node %s, compatible id %s\n",
rmem->name, compat);
break;
}
}
return ret;
}
static int __init __rmem_cmp(const void *a, const void *b)
{
const struct reserved_mem *ra = a, *rb = b;
if (ra->base < rb->base)
return -1;
if (ra->base > rb->base)
return 1;
return 0;
}
static void __init __rmem_check_for_overlap(void)
{
int i;
if (reserved_mem_count < 2)
return;
sort(reserved_mem, reserved_mem_count, sizeof(reserved_mem[0]),
__rmem_cmp, NULL);
for (i = 0; i < reserved_mem_count - 1; i++) {
struct reserved_mem *this, *next;
this = &reserved_mem[i];
next = &reserved_mem[i + 1];
if (!(this->base && next->base))
continue;
if (this->base + this->size > next->base) {
phys_addr_t this_end, next_end;
this_end = this->base + this->size;
next_end = next->base + next->size;
pr_err("OVERLAP DETECTED!\n%s (%pa--%pa) overlaps with %s (%pa--%pa)\n",
this->name, &this->base, &this_end,
next->name, &next->base, &next_end);
}
}
}
/**
* fdt_init_reserved_mem - allocate and init all saved reserved memory regions
*/
void __init fdt_init_reserved_mem(void)
{
int i;
/* check for overlapping reserved regions */
__rmem_check_for_overlap();
for (i = 0; i < reserved_mem_count; i++) {
struct reserved_mem *rmem = &reserved_mem[i];
unsigned long node = rmem->fdt_node;
int len;
const __be32 *prop;
int err = 0;
int nomap;
nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
prop = of_get_flat_dt_prop(node, "phandle", &len);
if (!prop)
prop = of_get_flat_dt_prop(node, "linux,phandle", &len);
if (prop)
rmem->phandle = of_read_number(prop, len/4);
if (rmem->size == 0)
err = __reserved_mem_alloc_size(node, rmem->name,
&rmem->base, &rmem->size);
if (err == 0) {
err = __reserved_mem_init_node(rmem);
if (err != 0 && err != -ENOENT) {
pr_info("node %s compatible matching fail\n",
rmem->name);
memblock_free(rmem->base, rmem->size);
if (nomap)
memblock_add(rmem->base, rmem->size);
}
}
}
}
static inline struct reserved_mem *__find_rmem(struct device_node *node)
{
unsigned int i;
if (!node->phandle)
return NULL;
for (i = 0; i < reserved_mem_count; i++)
if (reserved_mem[i].phandle == node->phandle)
return &reserved_mem[i];
return NULL;
}
struct rmem_assigned_device {
struct device *dev;
struct reserved_mem *rmem;
struct list_head list;
};
static LIST_HEAD(of_rmem_assigned_device_list);
static DEFINE_MUTEX(of_rmem_assigned_device_mutex);
/**
* of_reserved_mem_device_init_by_idx() - assign reserved memory region to
* given device
* @dev: Pointer to the device to configure
* @np: Pointer to the device_node with 'reserved-memory' property
* @idx: Index of selected region
*
* This function assigns respective DMA-mapping operations based on reserved
* memory region specified by 'memory-region' property in @np node to the @dev
* device. When driver needs to use more than one reserved memory region, it
* should allocate child devices and initialize regions by name for each of
* child device.
*
* Returns error code or zero on success.
*/
int of_reserved_mem_device_init_by_idx(struct device *dev,
struct device_node *np, int idx)
{
struct rmem_assigned_device *rd;
struct device_node *target;
struct reserved_mem *rmem;
int ret;
if (!np || !dev)
return -EINVAL;
target = of_parse_phandle(np, "memory-region", idx);
if (!target)
return -ENODEV;
if (!of_device_is_available(target))
return 0;
rmem = __find_rmem(target);
of_node_put(target);
if (!rmem || !rmem->ops || !rmem->ops->device_init)
return -EINVAL;
rd = kmalloc(sizeof(struct rmem_assigned_device), GFP_KERNEL);
if (!rd)
return -ENOMEM;
ret = rmem->ops->device_init(rmem, dev);
if (ret == 0) {
rd->dev = dev;
rd->rmem = rmem;
mutex_lock(&of_rmem_assigned_device_mutex);
list_add(&rd->list, &of_rmem_assigned_device_list);
mutex_unlock(&of_rmem_assigned_device_mutex);
dev_info(dev, "assigned reserved memory node %s\n", rmem->name);
} else {
kfree(rd);
}
return ret;
}
EXPORT_SYMBOL_GPL(of_reserved_mem_device_init_by_idx);
/**
* of_reserved_mem_device_release() - release reserved memory device structures
* @dev: Pointer to the device to deconfigure
*
* This function releases structures allocated for memory region handling for
* the given device.
*/
void of_reserved_mem_device_release(struct device *dev)
{
struct rmem_assigned_device *rd;
struct reserved_mem *rmem = NULL;
mutex_lock(&of_rmem_assigned_device_mutex);
list_for_each_entry(rd, &of_rmem_assigned_device_list, list) {
if (rd->dev == dev) {
rmem = rd->rmem;
list_del(&rd->list);
kfree(rd);
break;
}
}
mutex_unlock(&of_rmem_assigned_device_mutex);
if (!rmem || !rmem->ops || !rmem->ops->device_release)
return;
rmem->ops->device_release(rmem, dev);
}
EXPORT_SYMBOL_GPL(of_reserved_mem_device_release);
/**
* of_reserved_mem_lookup() - acquire reserved_mem from a device node
* @np: node pointer of the desired reserved-memory region
*
* This function allows drivers to acquire a reference to the reserved_mem
* struct based on a device node handle.
*
* Returns a reserved_mem reference, or NULL on error.
*/
struct reserved_mem *of_reserved_mem_lookup(struct device_node *np)
{
const char *name;
int i;
if (!np->full_name)
return NULL;
name = kbasename(np->full_name);
for (i = 0; i < reserved_mem_count; i++)
if (!strcmp(reserved_mem[i].name, name))
return &reserved_mem[i];
return NULL;
}
EXPORT_SYMBOL_GPL(of_reserved_mem_lookup);