libnvdimm/pfn: Account for PAGE_SIZE > info-block-size in nd_pfn_init()
Similar to "libnvdimm: Fix altmap reservation size calculation" provide for a reservation of a full page worth of info block space at info-block establishment time. Typically there is already slack in the padding from honoring the default 2MB alignment, but provide for a reservation for corner case configurations that would otherwise fit. Cc: Oliver O'Halloran <oohall@gmail.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
This commit is contained in:
parent
07464e8836
commit
11a358109e
@ -580,6 +580,11 @@ int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(nd_pfn_probe);
|
EXPORT_SYMBOL(nd_pfn_probe);
|
||||||
|
|
||||||
|
static u32 info_block_reserve(void)
|
||||||
|
{
|
||||||
|
return ALIGN(SZ_8K, PAGE_SIZE);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We hotplug memory at section granularity, pad the reserved area from
|
* We hotplug memory at section granularity, pad the reserved area from
|
||||||
* the previous section base to the namespace base address.
|
* the previous section base to the namespace base address.
|
||||||
@ -593,7 +598,7 @@ static unsigned long init_altmap_base(resource_size_t base)
|
|||||||
|
|
||||||
static unsigned long init_altmap_reserve(resource_size_t base)
|
static unsigned long init_altmap_reserve(resource_size_t base)
|
||||||
{
|
{
|
||||||
unsigned long reserve = PFN_UP(SZ_8K);
|
unsigned long reserve = info_block_reserve() >> PAGE_SHIFT;
|
||||||
unsigned long base_pfn = PHYS_PFN(base);
|
unsigned long base_pfn = PHYS_PFN(base);
|
||||||
|
|
||||||
reserve += base_pfn - PFN_SECTION_ALIGN_DOWN(base_pfn);
|
reserve += base_pfn - PFN_SECTION_ALIGN_DOWN(base_pfn);
|
||||||
@ -608,6 +613,7 @@ static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
|
|||||||
u64 offset = le64_to_cpu(pfn_sb->dataoff);
|
u64 offset = le64_to_cpu(pfn_sb->dataoff);
|
||||||
u32 start_pad = __le32_to_cpu(pfn_sb->start_pad);
|
u32 start_pad = __le32_to_cpu(pfn_sb->start_pad);
|
||||||
u32 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
|
u32 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
|
||||||
|
u32 reserve = info_block_reserve();
|
||||||
struct nd_namespace_common *ndns = nd_pfn->ndns;
|
struct nd_namespace_common *ndns = nd_pfn->ndns;
|
||||||
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
|
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
|
||||||
resource_size_t base = nsio->res.start + start_pad;
|
resource_size_t base = nsio->res.start + start_pad;
|
||||||
@ -621,7 +627,7 @@ static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
|
|||||||
res->end -= end_trunc;
|
res->end -= end_trunc;
|
||||||
|
|
||||||
if (nd_pfn->mode == PFN_MODE_RAM) {
|
if (nd_pfn->mode == PFN_MODE_RAM) {
|
||||||
if (offset < SZ_8K)
|
if (offset < reserve)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns);
|
nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns);
|
||||||
pgmap->altmap_valid = false;
|
pgmap->altmap_valid = false;
|
||||||
@ -634,7 +640,7 @@ static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
|
|||||||
le64_to_cpu(nd_pfn->pfn_sb->npfns),
|
le64_to_cpu(nd_pfn->pfn_sb->npfns),
|
||||||
nd_pfn->npfns);
|
nd_pfn->npfns);
|
||||||
memcpy(altmap, &__altmap, sizeof(*altmap));
|
memcpy(altmap, &__altmap, sizeof(*altmap));
|
||||||
altmap->free = PHYS_PFN(offset - SZ_8K);
|
altmap->free = PHYS_PFN(offset - reserve);
|
||||||
altmap->alloc = 0;
|
altmap->alloc = 0;
|
||||||
pgmap->altmap_valid = true;
|
pgmap->altmap_valid = true;
|
||||||
} else
|
} else
|
||||||
@ -687,9 +693,9 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
|
|||||||
u32 dax_label_reserve = is_nd_dax(&nd_pfn->dev) ? SZ_128K : 0;
|
u32 dax_label_reserve = is_nd_dax(&nd_pfn->dev) ? SZ_128K : 0;
|
||||||
struct nd_namespace_common *ndns = nd_pfn->ndns;
|
struct nd_namespace_common *ndns = nd_pfn->ndns;
|
||||||
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
|
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
|
||||||
|
u32 start_pad, end_trunc, reserve = info_block_reserve();
|
||||||
resource_size_t start, size;
|
resource_size_t start, size;
|
||||||
struct nd_region *nd_region;
|
struct nd_region *nd_region;
|
||||||
u32 start_pad, end_trunc;
|
|
||||||
struct nd_pfn_sb *pfn_sb;
|
struct nd_pfn_sb *pfn_sb;
|
||||||
unsigned long npfns;
|
unsigned long npfns;
|
||||||
phys_addr_t offset;
|
phys_addr_t offset;
|
||||||
@ -734,7 +740,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
|
|||||||
*/
|
*/
|
||||||
start = nsio->res.start + start_pad;
|
start = nsio->res.start + start_pad;
|
||||||
size = resource_size(&nsio->res);
|
size = resource_size(&nsio->res);
|
||||||
npfns = PFN_SECTION_ALIGN_UP((size - start_pad - end_trunc - SZ_8K)
|
npfns = PFN_SECTION_ALIGN_UP((size - start_pad - end_trunc - reserve)
|
||||||
/ PAGE_SIZE);
|
/ PAGE_SIZE);
|
||||||
if (nd_pfn->mode == PFN_MODE_PMEM) {
|
if (nd_pfn->mode == PFN_MODE_PMEM) {
|
||||||
/*
|
/*
|
||||||
@ -742,10 +748,10 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
|
|||||||
* when populating the vmemmap. This *should* be equal to
|
* when populating the vmemmap. This *should* be equal to
|
||||||
* PMD_SIZE for most architectures.
|
* PMD_SIZE for most architectures.
|
||||||
*/
|
*/
|
||||||
offset = ALIGN(start + SZ_8K + 64 * npfns + dax_label_reserve,
|
offset = ALIGN(start + reserve + 64 * npfns + dax_label_reserve,
|
||||||
max(nd_pfn->align, PMD_SIZE)) - start;
|
max(nd_pfn->align, PMD_SIZE)) - start;
|
||||||
} else if (nd_pfn->mode == PFN_MODE_RAM)
|
} else if (nd_pfn->mode == PFN_MODE_RAM)
|
||||||
offset = ALIGN(start + SZ_8K + dax_label_reserve,
|
offset = ALIGN(start + reserve + dax_label_reserve,
|
||||||
nd_pfn->align) - start;
|
nd_pfn->align) - start;
|
||||||
else
|
else
|
||||||
return -ENXIO;
|
return -ENXIO;
|
||||||
|
Loading…
Reference in New Issue
Block a user