mirror of
https://github.com/torvalds/linux.git
synced 2024-11-24 21:21:41 +00:00
4eca0ef49a
Large amounts of memory managed by the kmem driver may come in via CXL, and it is often desirable to have the memmap for this memory on the new memory itself. Enroll kmem-managed memory for memmap_on_memory semantics if the dax region originates via CXL. For non-CXL dax regions, retain the existing default behavior of hot adding without memmap_on_memory semantics. Link: https://lkml.kernel.org/r/20231107-vv-kmem_memmap-v10-3-1253ec050ed0@intel.com Signed-off-by: Vishal Verma <vishal.l.verma@intel.com> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Reviewed-by: David Hildenbrand <david@redhat.com> Reviewed-by: "Huang, Ying" <ying.huang@intel.com> Tested-by: Li Zhijian <lizhijian@fujitsu.com> [cxl.kmem and nvdimm.kmem] Cc: Michal Hocko <mhocko@suse.com> Cc: Oscar Salvador <osalvador@suse.de> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Dave Jiang <dave.jiang@intel.com> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> Cc: Fan Ni <fan.ni@samsung.com> Cc: Jeff Moyer <jmoyer@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
119 lines
3.2 KiB
C
119 lines
3.2 KiB
C
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/*
|
|
* Copyright(c) 2016 Intel Corporation. All rights reserved.
|
|
*/
|
|
#ifndef __DAX_PRIVATE_H__
|
|
#define __DAX_PRIVATE_H__
|
|
|
|
#include <linux/device.h>
|
|
#include <linux/cdev.h>
|
|
#include <linux/idr.h>
|
|
|
|
/* private routines between core files */
|
|
struct dax_device;
|
|
struct dax_device *inode_dax(struct inode *inode);
|
|
struct inode *dax_inode(struct dax_device *dax_dev);
|
|
int dax_bus_init(void);
|
|
void dax_bus_exit(void);
|
|
|
|
/**
|
|
* struct dax_region - mapping infrastructure for dax devices
|
|
* @id: kernel-wide unique region for a memory range
|
|
* @target_node: effective numa node if this memory range is onlined
|
|
* @kref: to pin while other agents have a need to do lookups
|
|
* @dev: parent device backing this region
|
|
* @align: allocation and mapping alignment for child dax devices
|
|
* @ida: instance id allocator
|
|
* @res: resource tree to track instance allocations
|
|
* @seed: allow userspace to find the first unbound seed device
|
|
* @youngest: allow userspace to find the most recently created device
|
|
*/
|
|
struct dax_region {
|
|
int id;
|
|
int target_node;
|
|
struct kref kref;
|
|
struct device *dev;
|
|
unsigned int align;
|
|
struct ida ida;
|
|
struct resource res;
|
|
struct device *seed;
|
|
struct device *youngest;
|
|
};
|
|
|
|
struct dax_mapping {
|
|
struct device dev;
|
|
int range_id;
|
|
int id;
|
|
};
|
|
|
|
/**
|
|
* struct dev_dax - instance data for a subdivision of a dax region, and
|
|
* data while the device is activated in the driver.
|
|
* @region - parent region
|
|
* @dax_dev - core dax functionality
|
|
* @target_node: effective numa node if dev_dax memory range is onlined
|
|
* @dyn_id: is this a dynamic or statically created instance
|
|
* @id: ida allocated id when the dax_region is not static
|
|
* @ida: mapping id allocator
|
|
* @dev - device core
|
|
* @pgmap - pgmap for memmap setup / lifetime (driver owned)
|
|
* @nr_range: size of @ranges
|
|
* @ranges: resource-span + pgoff tuples for the instance
|
|
*/
|
|
struct dev_dax {
|
|
struct dax_region *region;
|
|
struct dax_device *dax_dev;
|
|
unsigned int align;
|
|
int target_node;
|
|
bool dyn_id;
|
|
int id;
|
|
struct ida ida;
|
|
struct device dev;
|
|
struct dev_pagemap *pgmap;
|
|
bool memmap_on_memory;
|
|
int nr_range;
|
|
struct dev_dax_range {
|
|
unsigned long pgoff;
|
|
struct range range;
|
|
struct dax_mapping *mapping;
|
|
} *ranges;
|
|
};
|
|
|
|
/*
|
|
* While run_dax() is potentially a generic operation that could be
|
|
* defined in include/linux/dax.h we don't want to grow any users
|
|
* outside of drivers/dax/
|
|
*/
|
|
void run_dax(struct dax_device *dax_dev);
|
|
|
|
static inline struct dev_dax *to_dev_dax(struct device *dev)
|
|
{
|
|
return container_of(dev, struct dev_dax, dev);
|
|
}
|
|
|
|
static inline struct dax_mapping *to_dax_mapping(struct device *dev)
|
|
{
|
|
return container_of(dev, struct dax_mapping, dev);
|
|
}
|
|
|
|
phys_addr_t dax_pgoff_to_phys(struct dev_dax *dev_dax, pgoff_t pgoff, unsigned long size);
|
|
|
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
|
static inline bool dax_align_valid(unsigned long align)
|
|
{
|
|
if (align == PUD_SIZE && IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD))
|
|
return true;
|
|
if (align == PMD_SIZE && has_transparent_hugepage())
|
|
return true;
|
|
if (align == PAGE_SIZE)
|
|
return true;
|
|
return false;
|
|
}
|
|
#else
|
|
static inline bool dax_align_valid(unsigned long align)
|
|
{
|
|
return align == PAGE_SIZE;
|
|
}
|
|
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
|
|
#endif
|