mirror of
https://github.com/torvalds/linux.git
synced 2024-12-22 10:56:40 +00:00
8f4b01fcde
The nvdimm core currently maps the full namespace to an ioremap range while probing the namespace mode. This can result in probe failures on architectures that have limited ioremap space. For example, with a large btt namespace that consumes most of I/O remap range, depending on the sequence of namespace initialization, the user can find a pfn namespace initialization failure due to unavailable I/O remap space which nvdimm core uses for temporary mapping. nvdimm core can avoid this failure by only mapping the reserved info block area to check for pfn superblock type and map the full namespace resource only before using the namespace. Given that personalities like BTT can be layered on top of any namespace type create a generic form of devm_nsio_enable (devm_namespace_enable) and use it inside the per-personality attach routines. Now devm_namespace_enable() is always paired with disable unless the mapping is going to be used for long term runtime access. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> Link: https://lore.kernel.org/r/20191017073308.32645-1-aneesh.kumar@linux.ibm.com [djbw: reworks to move devm_namespace_{en,dis}able into *attach helpers] Reported-by: kbuild test robot <lkp@intel.com> Link: https://lore.kernel.org/r/20191031105741.102793-2-aneesh.kumar@linux.ibm.com Signed-off-by: Dan Williams <dan.j.williams@intel.com>
72 lines
2.1 KiB
C
72 lines
2.1 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/* Copyright(c) 2016 - 2018 Intel Corporation. All rights reserved. */
|
|
#include <linux/memremap.h>
|
|
#include <linux/module.h>
|
|
#include <linux/pfn_t.h>
|
|
#include "../../nvdimm/pfn.h"
|
|
#include "../../nvdimm/nd.h"
|
|
#include "../bus.h"
|
|
|
|
struct dev_dax *__dax_pmem_probe(struct device *dev, enum dev_dax_subsys subsys)
|
|
{
|
|
struct resource res;
|
|
int rc, id, region_id;
|
|
resource_size_t offset;
|
|
struct nd_pfn_sb *pfn_sb;
|
|
struct dev_dax *dev_dax;
|
|
struct nd_namespace_io *nsio;
|
|
struct dax_region *dax_region;
|
|
struct dev_pagemap pgmap = { };
|
|
struct nd_namespace_common *ndns;
|
|
struct nd_dax *nd_dax = to_nd_dax(dev);
|
|
struct nd_pfn *nd_pfn = &nd_dax->nd_pfn;
|
|
struct nd_region *nd_region = to_nd_region(dev->parent);
|
|
|
|
ndns = nvdimm_namespace_common_probe(dev);
|
|
if (IS_ERR(ndns))
|
|
return ERR_CAST(ndns);
|
|
|
|
/* parse the 'pfn' info block via ->rw_bytes */
|
|
rc = devm_namespace_enable(dev, ndns, nd_info_block_reserve());
|
|
if (rc)
|
|
return ERR_PTR(rc);
|
|
rc = nvdimm_setup_pfn(nd_pfn, &pgmap);
|
|
if (rc)
|
|
return ERR_PTR(rc);
|
|
devm_namespace_disable(dev, ndns);
|
|
|
|
/* reserve the metadata area, device-dax will reserve the data */
|
|
pfn_sb = nd_pfn->pfn_sb;
|
|
offset = le64_to_cpu(pfn_sb->dataoff);
|
|
nsio = to_nd_namespace_io(&ndns->dev);
|
|
if (!devm_request_mem_region(dev, nsio->res.start, offset,
|
|
dev_name(&ndns->dev))) {
|
|
dev_warn(dev, "could not reserve metadata\n");
|
|
return ERR_PTR(-EBUSY);
|
|
}
|
|
|
|
rc = sscanf(dev_name(&ndns->dev), "namespace%d.%d", ®ion_id, &id);
|
|
if (rc != 2)
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
/* adjust the dax_region resource to the start of data */
|
|
memcpy(&res, &pgmap.res, sizeof(res));
|
|
res.start += offset;
|
|
dax_region = alloc_dax_region(dev, region_id, &res,
|
|
nd_region->target_node, le32_to_cpu(pfn_sb->align),
|
|
PFN_DEV|PFN_MAP);
|
|
if (!dax_region)
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
dev_dax = __devm_create_dev_dax(dax_region, id, &pgmap, subsys);
|
|
|
|
/* child dev_dax instances now own the lifetime of the dax_region */
|
|
dax_region_put(dax_region);
|
|
|
|
return dev_dax;
|
|
}
|
|
EXPORT_SYMBOL_GPL(__dax_pmem_probe);
|
|
|
|
MODULE_LICENSE("GPL v2");
|
|
MODULE_AUTHOR("Intel Corporation");
|