mirror of
https://github.com/torvalds/linux.git
synced 2024-11-27 14:41:39 +00:00
libnvdimm/pfn: stop padding pmem namespaces to section alignment
Now that the mm core supports section-unaligned hotplug of ZONE_DEVICE memory, we no longer need to add padding at pfn/dax device creation time. The kernel will still honor padding established by older kernels. Link: http://lkml.kernel.org/r/156092356588.979959.6793371748950931916.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams <dan.j.williams@intel.com> Reported-by: Jeff Moyer <jmoyer@redhat.com> Tested-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> [ppc64] Cc: David Hildenbrand <david@redhat.com> Cc: Jane Chu <jane.chu@oracle.com> Cc: Jérôme Glisse <jglisse@redhat.com> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Logan Gunthorpe <logang@deltatee.com> Cc: Michal Hocko <mhocko@suse.com> Cc: Mike Rapoport <rppt@linux.ibm.com> Cc: Oscar Salvador <osalvador@suse.de> Cc: Pavel Tatashin <pasha.tatashin@soleen.com> Cc: Toshi Kani <toshi.kani@hpe.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Wei Yang <richardw.yang@linux.intel.com> Cc: Jason Gunthorpe <jgg@mellanox.com> Cc: Christoph Hellwig <hch@lst.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
7e3e888dfc
commit
a3619190d6
@ -33,18 +33,4 @@ struct nd_pfn_sb {
|
||||
__le64 checksum;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_SPARSEMEM
|
||||
#define PFN_SECTION_ALIGN_DOWN(x) SECTION_ALIGN_DOWN(x)
|
||||
#define PFN_SECTION_ALIGN_UP(x) SECTION_ALIGN_UP(x)
|
||||
#else
|
||||
/*
|
||||
* In this case ZONE_DEVICE=n and we will disable 'pfn' device support,
|
||||
* but we still want pmem to compile.
|
||||
*/
|
||||
#define PFN_SECTION_ALIGN_DOWN(x) (x)
|
||||
#define PFN_SECTION_ALIGN_UP(x) (x)
|
||||
#endif
|
||||
|
||||
#define PHYS_SECTION_ALIGN_DOWN(x) PFN_PHYS(PFN_SECTION_ALIGN_DOWN(PHYS_PFN(x)))
|
||||
#define PHYS_SECTION_ALIGN_UP(x) PFN_PHYS(PFN_SECTION_ALIGN_UP(PHYS_PFN(x)))
|
||||
#endif /* __NVDIMM_PFN_H */
|
||||
|
@ -587,14 +587,14 @@ static u32 info_block_reserve(void)
|
||||
}
|
||||
|
||||
/*
|
||||
* We hotplug memory at section granularity, pad the reserved area from
|
||||
* the previous section base to the namespace base address.
|
||||
* We hotplug memory at sub-section granularity, pad the reserved area
|
||||
* from the previous section base to the namespace base address.
|
||||
*/
|
||||
static unsigned long init_altmap_base(resource_size_t base)
|
||||
{
|
||||
unsigned long base_pfn = PHYS_PFN(base);
|
||||
|
||||
return PFN_SECTION_ALIGN_DOWN(base_pfn);
|
||||
return SUBSECTION_ALIGN_DOWN(base_pfn);
|
||||
}
|
||||
|
||||
static unsigned long init_altmap_reserve(resource_size_t base)
|
||||
@ -602,7 +602,7 @@ static unsigned long init_altmap_reserve(resource_size_t base)
|
||||
unsigned long reserve = info_block_reserve() >> PAGE_SHIFT;
|
||||
unsigned long base_pfn = PHYS_PFN(base);
|
||||
|
||||
reserve += base_pfn - PFN_SECTION_ALIGN_DOWN(base_pfn);
|
||||
reserve += base_pfn - SUBSECTION_ALIGN_DOWN(base_pfn);
|
||||
return reserve;
|
||||
}
|
||||
|
||||
@ -632,8 +632,7 @@ static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
|
||||
return -EINVAL;
|
||||
nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns);
|
||||
} else if (nd_pfn->mode == PFN_MODE_PMEM) {
|
||||
nd_pfn->npfns = PFN_SECTION_ALIGN_UP((resource_size(res)
|
||||
- offset) / PAGE_SIZE);
|
||||
nd_pfn->npfns = PHYS_PFN((resource_size(res) - offset));
|
||||
if (le64_to_cpu(nd_pfn->pfn_sb->npfns) > nd_pfn->npfns)
|
||||
dev_info(&nd_pfn->dev,
|
||||
"number of pfns truncated from %lld to %ld\n",
|
||||
@ -649,54 +648,14 @@ static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u64 phys_pmem_align_down(struct nd_pfn *nd_pfn, u64 phys)
|
||||
{
|
||||
return min_t(u64, PHYS_SECTION_ALIGN_DOWN(phys),
|
||||
ALIGN_DOWN(phys, nd_pfn->align));
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if pmem collides with 'System RAM', or other regions when
|
||||
* section aligned. Trim it accordingly.
|
||||
*/
|
||||
static void trim_pfn_device(struct nd_pfn *nd_pfn, u32 *start_pad, u32 *end_trunc)
|
||||
{
|
||||
struct nd_namespace_common *ndns = nd_pfn->ndns;
|
||||
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
|
||||
struct nd_region *nd_region = to_nd_region(nd_pfn->dev.parent);
|
||||
const resource_size_t start = nsio->res.start;
|
||||
const resource_size_t end = start + resource_size(&nsio->res);
|
||||
resource_size_t adjust, size;
|
||||
|
||||
*start_pad = 0;
|
||||
*end_trunc = 0;
|
||||
|
||||
adjust = start - PHYS_SECTION_ALIGN_DOWN(start);
|
||||
size = resource_size(&nsio->res) + adjust;
|
||||
if (region_intersects(start - adjust, size, IORESOURCE_SYSTEM_RAM,
|
||||
IORES_DESC_NONE) == REGION_MIXED
|
||||
|| nd_region_conflict(nd_region, start - adjust, size))
|
||||
*start_pad = PHYS_SECTION_ALIGN_UP(start) - start;
|
||||
|
||||
/* Now check that end of the range does not collide. */
|
||||
adjust = PHYS_SECTION_ALIGN_UP(end) - end;
|
||||
size = resource_size(&nsio->res) + adjust;
|
||||
if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
|
||||
IORES_DESC_NONE) == REGION_MIXED
|
||||
|| !IS_ALIGNED(end, nd_pfn->align)
|
||||
|| nd_region_conflict(nd_region, start, size))
|
||||
*end_trunc = end - phys_pmem_align_down(nd_pfn, end);
|
||||
}
|
||||
|
||||
static int nd_pfn_init(struct nd_pfn *nd_pfn)
|
||||
{
|
||||
struct nd_namespace_common *ndns = nd_pfn->ndns;
|
||||
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
|
||||
u32 start_pad, end_trunc, reserve = info_block_reserve();
|
||||
resource_size_t start, size;
|
||||
struct nd_region *nd_region;
|
||||
unsigned long npfns, align;
|
||||
struct nd_pfn_sb *pfn_sb;
|
||||
unsigned long npfns;
|
||||
phys_addr_t offset;
|
||||
const char *sig;
|
||||
u64 checksum;
|
||||
@ -727,43 +686,35 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
memset(pfn_sb, 0, sizeof(*pfn_sb));
|
||||
|
||||
trim_pfn_device(nd_pfn, &start_pad, &end_trunc);
|
||||
if (start_pad + end_trunc)
|
||||
dev_info(&nd_pfn->dev, "%s alignment collision, truncate %d bytes\n",
|
||||
dev_name(&ndns->dev), start_pad + end_trunc);
|
||||
|
||||
/*
|
||||
* Note, we use 64 here for the standard size of struct page,
|
||||
* debugging options may cause it to be larger in which case the
|
||||
* implementation will limit the pfns advertised through
|
||||
* ->direct_access() to those that are included in the memmap.
|
||||
*/
|
||||
start = nsio->res.start + start_pad;
|
||||
start = nsio->res.start;
|
||||
size = resource_size(&nsio->res);
|
||||
npfns = PFN_SECTION_ALIGN_UP((size - start_pad - end_trunc - reserve)
|
||||
/ PAGE_SIZE);
|
||||
npfns = PHYS_PFN(size - SZ_8K);
|
||||
align = max(nd_pfn->align, (1UL << SUBSECTION_SHIFT));
|
||||
if (nd_pfn->mode == PFN_MODE_PMEM) {
|
||||
/*
|
||||
* The altmap should be padded out to the block size used
|
||||
* when populating the vmemmap. This *should* be equal to
|
||||
* PMD_SIZE for most architectures.
|
||||
*/
|
||||
offset = ALIGN(start + reserve + 64 * npfns,
|
||||
max(nd_pfn->align, PMD_SIZE)) - start;
|
||||
offset = ALIGN(start + SZ_8K + 64 * npfns, align) - start;
|
||||
} else if (nd_pfn->mode == PFN_MODE_RAM)
|
||||
offset = ALIGN(start + reserve, nd_pfn->align) - start;
|
||||
offset = ALIGN(start + SZ_8K, align) - start;
|
||||
else
|
||||
return -ENXIO;
|
||||
|
||||
if (offset + start_pad + end_trunc >= size) {
|
||||
if (offset >= size) {
|
||||
dev_err(&nd_pfn->dev, "%s unable to satisfy requested alignment\n",
|
||||
dev_name(&ndns->dev));
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
npfns = (size - offset - start_pad - end_trunc) / SZ_4K;
|
||||
npfns = PHYS_PFN(size - offset);
|
||||
pfn_sb->mode = cpu_to_le32(nd_pfn->mode);
|
||||
pfn_sb->dataoff = cpu_to_le64(offset);
|
||||
pfn_sb->npfns = cpu_to_le64(npfns);
|
||||
@ -772,8 +723,6 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
|
||||
memcpy(pfn_sb->parent_uuid, nd_dev_to_uuid(&ndns->dev), 16);
|
||||
pfn_sb->version_major = cpu_to_le16(1);
|
||||
pfn_sb->version_minor = cpu_to_le16(3);
|
||||
pfn_sb->start_pad = cpu_to_le32(start_pad);
|
||||
pfn_sb->end_trunc = cpu_to_le32(end_trunc);
|
||||
pfn_sb->align = cpu_to_le32(nd_pfn->align);
|
||||
checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb);
|
||||
pfn_sb->checksum = cpu_to_le64(checksum);
|
||||
|
@ -1160,6 +1160,9 @@ static inline unsigned long section_nr_to_pfn(unsigned long sec)
|
||||
#define SUBSECTIONS_PER_SECTION (1UL << (SECTION_SIZE_BITS - SUBSECTION_SHIFT))
|
||||
#endif
|
||||
|
||||
#define SUBSECTION_ALIGN_UP(pfn) ALIGN((pfn), PAGES_PER_SUBSECTION)
|
||||
#define SUBSECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SUBSECTION_MASK)
|
||||
|
||||
struct mem_section_usage {
|
||||
DECLARE_BITMAP(subsection_map, SUBSECTIONS_PER_SECTION);
|
||||
/* See declaration of similar field in struct zone */
|
||||
|
Loading…
Reference in New Issue
Block a user