linux/drivers/cxl/acpi.c

939 lines
23 KiB
C
Raw Normal View History

// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2021 Intel Corporation. All rights reserved. */
#include <linux/platform_device.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/acpi.h>
#include <linux/pci.h>
#include <linux/node.h>
#include <asm/div64.h>
#include "cxlpci.h"
#include "cxl.h"
2022-12-03 08:40:29 +00:00
#define CXL_RCRB_SIZE SZ_8K
struct cxl_cxims_data {
int nr_maps;
u64 xormaps[] __counted_by(nr_maps);
};
static const guid_t acpi_cxl_qtg_id_guid =
GUID_INIT(0xF365F9A6, 0xA7DE, 0x4071,
0xA6, 0x6A, 0xB4, 0x0C, 0x0B, 0x4F, 0x8E, 0x52);
cxl: Restore XOR'd position bits during address translation When a device reports a DPA in events like poison, general_media, and dram, the driver translates that DPA back to an HPA. Presently, the CXL driver translation only considers the Modulo position and will report the wrong HPA for XOR configured root decoders. Add a helper function that restores the XOR'd bits during DPA->HPA address translation. Plumb a root decoder callback to the new helper when XOR interleave arithmetic is in use. For Modulo arithmetic, just let the callback be NULL - as in no extra work required. Upon completion of a DPA->HPA translation a couple of checks are performed on the result. One simply confirms that the calculated HPA is within the address range of the region. That test is useful for both Modulo and XOR interleave arithmetic decodes. A second check confirms that the HPA is within an expected chunk based on the endpoints position in the region and the region granularity. An XOR decode disrupts the Modulo pattern making the chunk check useless. To align the checks with the proper decode, pull the region range check inline and use the helper to do the chunk check for Modulo decodes only. A cxl-test unit test is posted for upstream review here: https://lore.kernel.org/20240624210644.495563-1-alison.schofield@intel.com/ Fixes: 28a3ae4ff66c ("cxl/trace: Add an HPA to cxl_poison trace events") Signed-off-by: Alison Schofield <alison.schofield@intel.com> Tested-by: Diego Garcia Rodriguez <diego.garcia.rodriguez@intel.com> Reviewed-by: Dan Williams <dan.j.williams@intel.com> Link: https://patch.msgid.link/1a1ac880d9f889bd6384e657e810431b9a0a72e5.1719980933.git.alison.schofield@intel.com Signed-off-by: Dave Jiang <dave.jiang@intel.com>
2024-07-03 05:29:50 +00:00
static u64 cxl_xor_hpa_to_spa(struct cxl_root_decoder *cxlrd, u64 hpa)
{
struct cxl_cxims_data *cximsd = cxlrd->platform_data;
cxl: Restore XOR'd position bits during address translation When a device reports a DPA in events like poison, general_media, and dram, the driver translates that DPA back to an HPA. Presently, the CXL driver translation only considers the Modulo position and will report the wrong HPA for XOR configured root decoders. Add a helper function that restores the XOR'd bits during DPA->HPA address translation. Plumb a root decoder callback to the new helper when XOR interleave arithmetic is in use. For Modulo arithmetic, just let the callback be NULL - as in no extra work required. Upon completion of a DPA->HPA translation a couple of checks are performed on the result. One simply confirms that the calculated HPA is within the address range of the region. That test is useful for both Modulo and XOR interleave arithmetic decodes. A second check confirms that the HPA is within an expected chunk based on the endpoints position in the region and the region granularity. An XOR decode disrupts the Modulo pattern making the chunk check useless. To align the checks with the proper decode, pull the region range check inline and use the helper to do the chunk check for Modulo decodes only. A cxl-test unit test is posted for upstream review here: https://lore.kernel.org/20240624210644.495563-1-alison.schofield@intel.com/ Fixes: 28a3ae4ff66c ("cxl/trace: Add an HPA to cxl_poison trace events") Signed-off-by: Alison Schofield <alison.schofield@intel.com> Tested-by: Diego Garcia Rodriguez <diego.garcia.rodriguez@intel.com> Reviewed-by: Dan Williams <dan.j.williams@intel.com> Link: https://patch.msgid.link/1a1ac880d9f889bd6384e657e810431b9a0a72e5.1719980933.git.alison.schofield@intel.com Signed-off-by: Dave Jiang <dave.jiang@intel.com>
2024-07-03 05:29:50 +00:00
int hbiw = cxlrd->cxlsd.nr_targets;
u64 val;
int pos;
cxl: Restore XOR'd position bits during address translation When a device reports a DPA in events like poison, general_media, and dram, the driver translates that DPA back to an HPA. Presently, the CXL driver translation only considers the Modulo position and will report the wrong HPA for XOR configured root decoders. Add a helper function that restores the XOR'd bits during DPA->HPA address translation. Plumb a root decoder callback to the new helper when XOR interleave arithmetic is in use. For Modulo arithmetic, just let the callback be NULL - as in no extra work required. Upon completion of a DPA->HPA translation a couple of checks are performed on the result. One simply confirms that the calculated HPA is within the address range of the region. That test is useful for both Modulo and XOR interleave arithmetic decodes. A second check confirms that the HPA is within an expected chunk based on the endpoints position in the region and the region granularity. An XOR decode disrupts the Modulo pattern making the chunk check useless. To align the checks with the proper decode, pull the region range check inline and use the helper to do the chunk check for Modulo decodes only. A cxl-test unit test is posted for upstream review here: https://lore.kernel.org/20240624210644.495563-1-alison.schofield@intel.com/ Fixes: 28a3ae4ff66c ("cxl/trace: Add an HPA to cxl_poison trace events") Signed-off-by: Alison Schofield <alison.schofield@intel.com> Tested-by: Diego Garcia Rodriguez <diego.garcia.rodriguez@intel.com> Reviewed-by: Dan Williams <dan.j.williams@intel.com> Link: https://patch.msgid.link/1a1ac880d9f889bd6384e657e810431b9a0a72e5.1719980933.git.alison.schofield@intel.com Signed-off-by: Dave Jiang <dave.jiang@intel.com>
2024-07-03 05:29:50 +00:00
/* No xormaps for host bridge interleave ways of 1 or 3 */
if (hbiw == 1 || hbiw == 3)
return hpa;
cxl: Restore XOR'd position bits during address translation When a device reports a DPA in events like poison, general_media, and dram, the driver translates that DPA back to an HPA. Presently, the CXL driver translation only considers the Modulo position and will report the wrong HPA for XOR configured root decoders. Add a helper function that restores the XOR'd bits during DPA->HPA address translation. Plumb a root decoder callback to the new helper when XOR interleave arithmetic is in use. For Modulo arithmetic, just let the callback be NULL - as in no extra work required. Upon completion of a DPA->HPA translation a couple of checks are performed on the result. One simply confirms that the calculated HPA is within the address range of the region. That test is useful for both Modulo and XOR interleave arithmetic decodes. A second check confirms that the HPA is within an expected chunk based on the endpoints position in the region and the region granularity. An XOR decode disrupts the Modulo pattern making the chunk check useless. To align the checks with the proper decode, pull the region range check inline and use the helper to do the chunk check for Modulo decodes only. A cxl-test unit test is posted for upstream review here: https://lore.kernel.org/20240624210644.495563-1-alison.schofield@intel.com/ Fixes: 28a3ae4ff66c ("cxl/trace: Add an HPA to cxl_poison trace events") Signed-off-by: Alison Schofield <alison.schofield@intel.com> Tested-by: Diego Garcia Rodriguez <diego.garcia.rodriguez@intel.com> Reviewed-by: Dan Williams <dan.j.williams@intel.com> Link: https://patch.msgid.link/1a1ac880d9f889bd6384e657e810431b9a0a72e5.1719980933.git.alison.schofield@intel.com Signed-off-by: Dave Jiang <dave.jiang@intel.com>
2024-07-03 05:29:50 +00:00
/*
* For root decoders using xormaps (hbiw: 2,4,6,8,12,16) restore
* the position bit to its value before the xormap was applied at
* HPA->DPA translation.
*
* pos is the lowest set bit in an XORMAP
* val is the XORALLBITS(HPA & XORMAP)
*
* XORALLBITS: The CXL spec (3.1 Table 9-22) defines XORALLBITS
* as an operation that outputs a single bit by XORing all the
* bits in the input (hpa & xormap). Implement XORALLBITS using
* hweight64(). If the hamming weight is even the XOR of those
* bits results in val==0, if odd the XOR result is val==1.
*/
cxl: Restore XOR'd position bits during address translation When a device reports a DPA in events like poison, general_media, and dram, the driver translates that DPA back to an HPA. Presently, the CXL driver translation only considers the Modulo position and will report the wrong HPA for XOR configured root decoders. Add a helper function that restores the XOR'd bits during DPA->HPA address translation. Plumb a root decoder callback to the new helper when XOR interleave arithmetic is in use. For Modulo arithmetic, just let the callback be NULL - as in no extra work required. Upon completion of a DPA->HPA translation a couple of checks are performed on the result. One simply confirms that the calculated HPA is within the address range of the region. That test is useful for both Modulo and XOR interleave arithmetic decodes. A second check confirms that the HPA is within an expected chunk based on the endpoints position in the region and the region granularity. An XOR decode disrupts the Modulo pattern making the chunk check useless. To align the checks with the proper decode, pull the region range check inline and use the helper to do the chunk check for Modulo decodes only. A cxl-test unit test is posted for upstream review here: https://lore.kernel.org/20240624210644.495563-1-alison.schofield@intel.com/ Fixes: 28a3ae4ff66c ("cxl/trace: Add an HPA to cxl_poison trace events") Signed-off-by: Alison Schofield <alison.schofield@intel.com> Tested-by: Diego Garcia Rodriguez <diego.garcia.rodriguez@intel.com> Reviewed-by: Dan Williams <dan.j.williams@intel.com> Link: https://patch.msgid.link/1a1ac880d9f889bd6384e657e810431b9a0a72e5.1719980933.git.alison.schofield@intel.com Signed-off-by: Dave Jiang <dave.jiang@intel.com>
2024-07-03 05:29:50 +00:00
for (int i = 0; i < cximsd->nr_maps; i++) {
if (!cximsd->xormaps[i])
continue;
pos = __ffs(cximsd->xormaps[i]);
val = (hweight64(hpa & cximsd->xormaps[i]) & 1);
hpa = (hpa & ~(1ULL << pos)) | (val << pos);
}
cxl: Restore XOR'd position bits during address translation When a device reports a DPA in events like poison, general_media, and dram, the driver translates that DPA back to an HPA. Presently, the CXL driver translation only considers the Modulo position and will report the wrong HPA for XOR configured root decoders. Add a helper function that restores the XOR'd bits during DPA->HPA address translation. Plumb a root decoder callback to the new helper when XOR interleave arithmetic is in use. For Modulo arithmetic, just let the callback be NULL - as in no extra work required. Upon completion of a DPA->HPA translation a couple of checks are performed on the result. One simply confirms that the calculated HPA is within the address range of the region. That test is useful for both Modulo and XOR interleave arithmetic decodes. A second check confirms that the HPA is within an expected chunk based on the endpoints position in the region and the region granularity. An XOR decode disrupts the Modulo pattern making the chunk check useless. To align the checks with the proper decode, pull the region range check inline and use the helper to do the chunk check for Modulo decodes only. A cxl-test unit test is posted for upstream review here: https://lore.kernel.org/20240624210644.495563-1-alison.schofield@intel.com/ Fixes: 28a3ae4ff66c ("cxl/trace: Add an HPA to cxl_poison trace events") Signed-off-by: Alison Schofield <alison.schofield@intel.com> Tested-by: Diego Garcia Rodriguez <diego.garcia.rodriguez@intel.com> Reviewed-by: Dan Williams <dan.j.williams@intel.com> Link: https://patch.msgid.link/1a1ac880d9f889bd6384e657e810431b9a0a72e5.1719980933.git.alison.schofield@intel.com Signed-off-by: Dave Jiang <dave.jiang@intel.com>
2024-07-03 05:29:50 +00:00
return hpa;
}
struct cxl_cxims_context {
struct device *dev;
struct cxl_root_decoder *cxlrd;
};
static int cxl_parse_cxims(union acpi_subtable_headers *header, void *arg,
const unsigned long end)
{
struct acpi_cedt_cxims *cxims = (struct acpi_cedt_cxims *)header;
struct cxl_cxims_context *ctx = arg;
struct cxl_root_decoder *cxlrd = ctx->cxlrd;
struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld;
struct device *dev = ctx->dev;
struct cxl_cxims_data *cximsd;
unsigned int hbig, nr_maps;
int rc;
rc = eig_to_granularity(cxims->hbig, &hbig);
if (rc)
return rc;
/* Does this CXIMS entry apply to the given CXL Window? */
if (hbig != cxld->interleave_granularity)
return 0;
/* IW 1,3 do not use xormaps and skip this parsing entirely */
if (is_power_of_2(cxld->interleave_ways))
/* 2, 4, 8, 16 way */
nr_maps = ilog2(cxld->interleave_ways);
else
/* 6, 12 way */
nr_maps = ilog2(cxld->interleave_ways / 3);
if (cxims->nr_xormaps < nr_maps) {
dev_dbg(dev, "CXIMS nr_xormaps[%d] expected[%d]\n",
cxims->nr_xormaps, nr_maps);
return -ENXIO;
}
cximsd = devm_kzalloc(dev, struct_size(cximsd, xormaps, nr_maps),
GFP_KERNEL);
if (!cximsd)
return -ENOMEM;
cximsd->nr_maps = nr_maps;
memcpy(cximsd->xormaps, cxims->xormap_list,
nr_maps * sizeof(*cximsd->xormaps));
cxlrd->platform_data = cximsd;
return 0;
}
static unsigned long cfmws_to_decoder_flags(int restrictions)
{
unsigned long flags = CXL_DECODER_F_ENABLE;
if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_TYPE2)
flags |= CXL_DECODER_F_TYPE2;
if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_TYPE3)
flags |= CXL_DECODER_F_TYPE3;
if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_VOLATILE)
flags |= CXL_DECODER_F_RAM;
if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_PMEM)
flags |= CXL_DECODER_F_PMEM;
if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_FIXED)
flags |= CXL_DECODER_F_LOCK;
return flags;
}
static int cxl_acpi_cfmws_verify(struct device *dev,
struct acpi_cedt_cfmws *cfmws)
{
int rc, expected_len;
unsigned int ways;
if (cfmws->interleave_arithmetic != ACPI_CEDT_CFMWS_ARITHMETIC_MODULO &&
cfmws->interleave_arithmetic != ACPI_CEDT_CFMWS_ARITHMETIC_XOR) {
dev_err(dev, "CFMWS Unknown Interleave Arithmetic: %d\n",
cfmws->interleave_arithmetic);
return -EINVAL;
}
if (!IS_ALIGNED(cfmws->base_hpa, SZ_256M)) {
dev_err(dev, "CFMWS Base HPA not 256MB aligned\n");
return -EINVAL;
}
if (!IS_ALIGNED(cfmws->window_size, SZ_256M)) {
dev_err(dev, "CFMWS Window Size not 256MB aligned\n");
return -EINVAL;
}
rc = eiw_to_ways(cfmws->interleave_ways, &ways);
if (rc) {
dev_err(dev, "CFMWS Interleave Ways (%d) invalid\n",
cfmws->interleave_ways);
return -EINVAL;
}
expected_len = struct_size(cfmws, interleave_targets, ways);
if (cfmws->header.length < expected_len) {
dev_err(dev, "CFMWS length %d less than expected %d\n",
cfmws->header.length, expected_len);
return -EINVAL;
}
if (cfmws->header.length > expected_len)
dev_dbg(dev, "CFMWS length %d greater than expected %d\n",
cfmws->header.length, expected_len);
return 0;
}
/*
* Note, @dev must be the first member, see 'struct cxl_chbs_context'
* and mock_acpi_table_parse_cedt()
*/
struct cxl_cfmws_context {
struct device *dev;
struct cxl_port *root_port;
cxl/acpi: Track CXL resources in iomem_resource Recall that CXL capable address ranges, on ACPI platforms, are published in the CEDT.CFMWS (CXL Early Discovery Table: CXL Fixed Memory Window Structures). These windows represent both the actively mapped capacity and the potential address space that can be dynamically assigned to a new CXL decode configuration (region / interleave-set). CXL endpoints like DDR DIMMs can be mapped at any physical address including 0 and legacy ranges. There is an expectation and requirement that the /proc/iomem interface and the iomem_resource tree in the kernel reflect the full set of platform address ranges. I.e. that every address range that platform firmware and bus drivers enumerate be reflected as an iomem_resource entry. The hard requirement to do this for CXL arises from the fact that facilities like CONFIG_DEVICE_PRIVATE expect to be able to treat empty iomem_resource ranges as free for software to use as proxy address space. Without CXL publishing its potential address ranges in iomem_resource, the CONFIG_DEVICE_PRIVATE mechanism may inadvertently steal capacity reserved for runtime provisioning of new CXL regions. So, iomem_resource needs to know about both active and potential CXL resource ranges. The active CXL resources might already be reflected in iomem_resource as "System RAM". insert_resource_expand_to_fit() handles re-parenting "System RAM" underneath a CXL window. The "_expand_to_fit()" behavior handles cases where a CXL window is not a strict superset of an existing entry in the iomem_resource tree. The "_expand_to_fit()" behavior is acceptable from the perspective of resource allocation. The expansion happens because a conflicting resource range is already populated, which means the resource boundary expansion does not result in any additional free CXL address space being made available. CXL address space allocation is always bounded by the orginal unexpanded address range. However, the potential for expansion does mean that something like walk_iomem_res_desc(IORES_DESC_CXL...) can only return fuzzy answers on corner case platforms that cause the resource tree to expand a CXL window resource over a range that is not decoded by CXL. This would be an odd platform configuration, but if it becomes a problem in practice the CXL subsytem could just publish an API that returns definitive answers. Cc: Andrew Morton <akpm@linux-foundation.org> Cc: David Hildenbrand <david@redhat.com> Cc: Jason Gunthorpe <jgg@nvidia.com> Cc: Tony Luck <tony.luck@intel.com> Cc: Christoph Hellwig <hch@lst.de> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Link: https://lore.kernel.org/r/165784325943.1758207.5310344844375305118.stgit@dwillia2-xfh.jf.intel.com Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2022-07-13 01:37:54 +00:00
struct resource *cxl_res;
int id;
};
/**
* cxl_acpi_evaluate_qtg_dsm - Retrieve QTG ids via ACPI _DSM
* @handle: ACPI handle
* @coord: performance access coordinates
* @entries: number of QTG IDs to return
* @qos_class: int array provided by caller to return QTG IDs
*
* Return: number of QTG IDs returned, or -errno for errors
*
* Issue QTG _DSM with accompanied bandwidth and latency data in order to get
* the QTG IDs that are suitable for the performance point in order of most
* suitable to least suitable. Write back array of QTG IDs and return the
* actual number of QTG IDs written back.
*/
static int
cxl_acpi_evaluate_qtg_dsm(acpi_handle handle, struct access_coordinate *coord,
int entries, int *qos_class)
{
union acpi_object *out_obj, *out_buf, *obj;
union acpi_object in_array[4] = {
[0].integer = { ACPI_TYPE_INTEGER, coord->read_latency },
[1].integer = { ACPI_TYPE_INTEGER, coord->write_latency },
[2].integer = { ACPI_TYPE_INTEGER, coord->read_bandwidth },
[3].integer = { ACPI_TYPE_INTEGER, coord->write_bandwidth },
};
union acpi_object in_obj = {
.package = {
.type = ACPI_TYPE_PACKAGE,
.count = 4,
.elements = in_array,
},
};
int count, pkg_entries, i;
u16 max_qtg;
int rc;
if (!entries)
return -EINVAL;
out_obj = acpi_evaluate_dsm(handle, &acpi_cxl_qtg_id_guid, 1, 1, &in_obj);
if (!out_obj)
return -ENXIO;
if (out_obj->type != ACPI_TYPE_PACKAGE) {
rc = -ENXIO;
goto out;
}
/* Check Max QTG ID */
obj = &out_obj->package.elements[0];
if (obj->type != ACPI_TYPE_INTEGER) {
rc = -ENXIO;
goto out;
}
max_qtg = obj->integer.value;
/* It's legal to have 0 QTG entries */
pkg_entries = out_obj->package.count;
if (pkg_entries <= 1) {
rc = 0;
goto out;
}
/* Retrieve QTG IDs package */
obj = &out_obj->package.elements[1];
if (obj->type != ACPI_TYPE_PACKAGE) {
rc = -ENXIO;
goto out;
}
pkg_entries = obj->package.count;
count = min(entries, pkg_entries);
for (i = 0; i < count; i++) {
u16 qtg_id;
out_buf = &obj->package.elements[i];
if (out_buf->type != ACPI_TYPE_INTEGER) {
rc = -ENXIO;
goto out;
}
qtg_id = out_buf->integer.value;
if (qtg_id > max_qtg)
pr_warn("QTG ID %u greater than MAX %u\n",
qtg_id, max_qtg);
qos_class[i] = qtg_id;
}
rc = count;
out:
ACPI_FREE(out_obj);
return rc;
}
static int cxl_acpi_qos_class(struct cxl_root *cxl_root,
struct access_coordinate *coord, int entries,
int *qos_class)
{
struct device *dev = cxl_root->port.uport_dev;
acpi_handle handle;
if (!dev_is_platform(dev))
return -ENODEV;
handle = ACPI_HANDLE(dev);
if (!handle)
return -ENODEV;
return cxl_acpi_evaluate_qtg_dsm(handle, coord, entries, qos_class);
}
static const struct cxl_root_ops acpi_root_ops = {
.qos_class = cxl_acpi_qos_class,
};
cxl/acpi: Cleanup __cxl_parse_cfmws() As a follow on to the recent rework of __cxl_parse_cfmws() to always return errors [1], use cleanup.h helpers to remove goto and other cleanups now that logging is moved to the cxl_parse_cfmws() wrapper. This ends up adding more code than it deletes, but __cxl_parse_cfmws() itself does get smaller. The takeaway from the cond_no_free_ptr() discussion [2] was to not add new macros to handle the cases where no_free_ptr() is awkward, instead rework the code to have helpers and clearer delineation of responsibility. Now one might say that __free(del_cxl_resource) is excessive given it is immediately registered with add_or_reset_cxl_resource(). The rationale for keeping it is that it forces use of "no_free_ptr()" on the argument passed to add_or_reset_cxl_resource(). That in turn makes it clear that @res is NULL for the rest of the function which is part of the point of the cleanup helpers, to turn subtle use after free errors [3] into loud NULL pointer de-references. Link: http://lore.kernel.org/r/170820177238.631006.1012639681618409284.stgit@dwillia2-xfh.jf.intel.com [1] Link: http://lore.kernel.org/r/CAHk-=whBVhnh=KSeBBRet=E7qJAwnPR_aj5em187Q3FiD+LXnA@mail.gmail.com [2] Link: http://lore.kernel.org/r/20230714093146.2253438-1-leitao@debian.org [3] Reported-by: Jonathan Cameron <Jonathan.Cameron@Huawei.com> Closes: http://lore.kernel.org/r/20240219124041.00002bda@Huawei.com Reviewed-by: Alison Schofield <alison.schofield@intel.com> Reviewed-by: Vishal Verma <vishal.l.verma@intel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Link: https://lore.kernel.org/r/171235474028.2718248.14109646123143505522.stgit@dwillia2-xfh.jf.intel.com Signed-off-by: Dave Jiang <dave.jiang@intel.com>
2024-04-05 22:05:50 +00:00
static void del_cxl_resource(struct resource *res)
{
if (!res)
return;
kfree(res->name);
kfree(res);
}
static struct resource *alloc_cxl_resource(resource_size_t base,
resource_size_t n, int id)
{
struct resource *res __free(kfree) = kzalloc(sizeof(*res), GFP_KERNEL);
if (!res)
return NULL;
res->start = base;
res->end = base + n - 1;
res->flags = IORESOURCE_MEM;
res->name = kasprintf(GFP_KERNEL, "CXL Window %d", id);
if (!res->name)
return NULL;
return no_free_ptr(res);
}
static int add_or_reset_cxl_resource(struct resource *parent, struct resource *res)
{
int rc = insert_resource(parent, res);
if (rc)
del_cxl_resource(res);
return rc;
}
DEFINE_FREE(put_cxlrd, struct cxl_root_decoder *,
if (!IS_ERR_OR_NULL(_T)) put_device(&_T->cxlsd.cxld.dev))
DEFINE_FREE(del_cxl_resource, struct resource *, if (_T) del_cxl_resource(_T))
static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws,
struct cxl_cfmws_context *ctx)
{
int target_map[CXL_DECODER_MAX_INTERLEAVE];
struct cxl_port *root_port = ctx->root_port;
struct cxl_cxims_context cxims_ctx;
struct device *dev = ctx->dev;
struct cxl_decoder *cxld;
unsigned int ways, i, ig;
int rc;
rc = cxl_acpi_cfmws_verify(dev, cfmws);
cxl/acpi: Cleanup __cxl_parse_cfmws() As a follow on to the recent rework of __cxl_parse_cfmws() to always return errors [1], use cleanup.h helpers to remove goto and other cleanups now that logging is moved to the cxl_parse_cfmws() wrapper. This ends up adding more code than it deletes, but __cxl_parse_cfmws() itself does get smaller. The takeaway from the cond_no_free_ptr() discussion [2] was to not add new macros to handle the cases where no_free_ptr() is awkward, instead rework the code to have helpers and clearer delineation of responsibility. Now one might say that __free(del_cxl_resource) is excessive given it is immediately registered with add_or_reset_cxl_resource(). The rationale for keeping it is that it forces use of "no_free_ptr()" on the argument passed to add_or_reset_cxl_resource(). That in turn makes it clear that @res is NULL for the rest of the function which is part of the point of the cleanup helpers, to turn subtle use after free errors [3] into loud NULL pointer de-references. Link: http://lore.kernel.org/r/170820177238.631006.1012639681618409284.stgit@dwillia2-xfh.jf.intel.com [1] Link: http://lore.kernel.org/r/CAHk-=whBVhnh=KSeBBRet=E7qJAwnPR_aj5em187Q3FiD+LXnA@mail.gmail.com [2] Link: http://lore.kernel.org/r/20230714093146.2253438-1-leitao@debian.org [3] Reported-by: Jonathan Cameron <Jonathan.Cameron@Huawei.com> Closes: http://lore.kernel.org/r/20240219124041.00002bda@Huawei.com Reviewed-by: Alison Schofield <alison.schofield@intel.com> Reviewed-by: Vishal Verma <vishal.l.verma@intel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Link: https://lore.kernel.org/r/171235474028.2718248.14109646123143505522.stgit@dwillia2-xfh.jf.intel.com Signed-off-by: Dave Jiang <dave.jiang@intel.com>
2024-04-05 22:05:50 +00:00
if (rc)
return rc;
rc = eiw_to_ways(cfmws->interleave_ways, &ways);
if (rc)
return rc;
rc = eig_to_granularity(cfmws->granularity, &ig);
if (rc)
return rc;
for (i = 0; i < ways; i++)
target_map[i] = cfmws->interleave_targets[i];
cxl/acpi: Cleanup __cxl_parse_cfmws() As a follow on to the recent rework of __cxl_parse_cfmws() to always return errors [1], use cleanup.h helpers to remove goto and other cleanups now that logging is moved to the cxl_parse_cfmws() wrapper. This ends up adding more code than it deletes, but __cxl_parse_cfmws() itself does get smaller. The takeaway from the cond_no_free_ptr() discussion [2] was to not add new macros to handle the cases where no_free_ptr() is awkward, instead rework the code to have helpers and clearer delineation of responsibility. Now one might say that __free(del_cxl_resource) is excessive given it is immediately registered with add_or_reset_cxl_resource(). The rationale for keeping it is that it forces use of "no_free_ptr()" on the argument passed to add_or_reset_cxl_resource(). That in turn makes it clear that @res is NULL for the rest of the function which is part of the point of the cleanup helpers, to turn subtle use after free errors [3] into loud NULL pointer de-references. Link: http://lore.kernel.org/r/170820177238.631006.1012639681618409284.stgit@dwillia2-xfh.jf.intel.com [1] Link: http://lore.kernel.org/r/CAHk-=whBVhnh=KSeBBRet=E7qJAwnPR_aj5em187Q3FiD+LXnA@mail.gmail.com [2] Link: http://lore.kernel.org/r/20230714093146.2253438-1-leitao@debian.org [3] Reported-by: Jonathan Cameron <Jonathan.Cameron@Huawei.com> Closes: http://lore.kernel.org/r/20240219124041.00002bda@Huawei.com Reviewed-by: Alison Schofield <alison.schofield@intel.com> Reviewed-by: Vishal Verma <vishal.l.verma@intel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Link: https://lore.kernel.org/r/171235474028.2718248.14109646123143505522.stgit@dwillia2-xfh.jf.intel.com Signed-off-by: Dave Jiang <dave.jiang@intel.com>
2024-04-05 22:05:50 +00:00
struct resource *res __free(del_cxl_resource) = alloc_cxl_resource(
cfmws->base_hpa, cfmws->window_size, ctx->id++);
cxl/acpi: Track CXL resources in iomem_resource Recall that CXL capable address ranges, on ACPI platforms, are published in the CEDT.CFMWS (CXL Early Discovery Table: CXL Fixed Memory Window Structures). These windows represent both the actively mapped capacity and the potential address space that can be dynamically assigned to a new CXL decode configuration (region / interleave-set). CXL endpoints like DDR DIMMs can be mapped at any physical address including 0 and legacy ranges. There is an expectation and requirement that the /proc/iomem interface and the iomem_resource tree in the kernel reflect the full set of platform address ranges. I.e. that every address range that platform firmware and bus drivers enumerate be reflected as an iomem_resource entry. The hard requirement to do this for CXL arises from the fact that facilities like CONFIG_DEVICE_PRIVATE expect to be able to treat empty iomem_resource ranges as free for software to use as proxy address space. Without CXL publishing its potential address ranges in iomem_resource, the CONFIG_DEVICE_PRIVATE mechanism may inadvertently steal capacity reserved for runtime provisioning of new CXL regions. So, iomem_resource needs to know about both active and potential CXL resource ranges. The active CXL resources might already be reflected in iomem_resource as "System RAM". insert_resource_expand_to_fit() handles re-parenting "System RAM" underneath a CXL window. The "_expand_to_fit()" behavior handles cases where a CXL window is not a strict superset of an existing entry in the iomem_resource tree. The "_expand_to_fit()" behavior is acceptable from the perspective of resource allocation. The expansion happens because a conflicting resource range is already populated, which means the resource boundary expansion does not result in any additional free CXL address space being made available. CXL address space allocation is always bounded by the orginal unexpanded address range. However, the potential for expansion does mean that something like walk_iomem_res_desc(IORES_DESC_CXL...) can only return fuzzy answers on corner case platforms that cause the resource tree to expand a CXL window resource over a range that is not decoded by CXL. This would be an odd platform configuration, but if it becomes a problem in practice the CXL subsytem could just publish an API that returns definitive answers. Cc: Andrew Morton <akpm@linux-foundation.org> Cc: David Hildenbrand <david@redhat.com> Cc: Jason Gunthorpe <jgg@nvidia.com> Cc: Tony Luck <tony.luck@intel.com> Cc: Christoph Hellwig <hch@lst.de> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Link: https://lore.kernel.org/r/165784325943.1758207.5310344844375305118.stgit@dwillia2-xfh.jf.intel.com Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2022-07-13 01:37:54 +00:00
if (!res)
return -ENOMEM;
/* add to the local resource tracking to establish a sort order */
cxl/acpi: Cleanup __cxl_parse_cfmws() As a follow on to the recent rework of __cxl_parse_cfmws() to always return errors [1], use cleanup.h helpers to remove goto and other cleanups now that logging is moved to the cxl_parse_cfmws() wrapper. This ends up adding more code than it deletes, but __cxl_parse_cfmws() itself does get smaller. The takeaway from the cond_no_free_ptr() discussion [2] was to not add new macros to handle the cases where no_free_ptr() is awkward, instead rework the code to have helpers and clearer delineation of responsibility. Now one might say that __free(del_cxl_resource) is excessive given it is immediately registered with add_or_reset_cxl_resource(). The rationale for keeping it is that it forces use of "no_free_ptr()" on the argument passed to add_or_reset_cxl_resource(). That in turn makes it clear that @res is NULL for the rest of the function which is part of the point of the cleanup helpers, to turn subtle use after free errors [3] into loud NULL pointer de-references. Link: http://lore.kernel.org/r/170820177238.631006.1012639681618409284.stgit@dwillia2-xfh.jf.intel.com [1] Link: http://lore.kernel.org/r/CAHk-=whBVhnh=KSeBBRet=E7qJAwnPR_aj5em187Q3FiD+LXnA@mail.gmail.com [2] Link: http://lore.kernel.org/r/20230714093146.2253438-1-leitao@debian.org [3] Reported-by: Jonathan Cameron <Jonathan.Cameron@Huawei.com> Closes: http://lore.kernel.org/r/20240219124041.00002bda@Huawei.com Reviewed-by: Alison Schofield <alison.schofield@intel.com> Reviewed-by: Vishal Verma <vishal.l.verma@intel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Link: https://lore.kernel.org/r/171235474028.2718248.14109646123143505522.stgit@dwillia2-xfh.jf.intel.com Signed-off-by: Dave Jiang <dave.jiang@intel.com>
2024-04-05 22:05:50 +00:00
rc = add_or_reset_cxl_resource(ctx->cxl_res, no_free_ptr(res));
cxl/acpi: Track CXL resources in iomem_resource Recall that CXL capable address ranges, on ACPI platforms, are published in the CEDT.CFMWS (CXL Early Discovery Table: CXL Fixed Memory Window Structures). These windows represent both the actively mapped capacity and the potential address space that can be dynamically assigned to a new CXL decode configuration (region / interleave-set). CXL endpoints like DDR DIMMs can be mapped at any physical address including 0 and legacy ranges. There is an expectation and requirement that the /proc/iomem interface and the iomem_resource tree in the kernel reflect the full set of platform address ranges. I.e. that every address range that platform firmware and bus drivers enumerate be reflected as an iomem_resource entry. The hard requirement to do this for CXL arises from the fact that facilities like CONFIG_DEVICE_PRIVATE expect to be able to treat empty iomem_resource ranges as free for software to use as proxy address space. Without CXL publishing its potential address ranges in iomem_resource, the CONFIG_DEVICE_PRIVATE mechanism may inadvertently steal capacity reserved for runtime provisioning of new CXL regions. So, iomem_resource needs to know about both active and potential CXL resource ranges. The active CXL resources might already be reflected in iomem_resource as "System RAM". insert_resource_expand_to_fit() handles re-parenting "System RAM" underneath a CXL window. The "_expand_to_fit()" behavior handles cases where a CXL window is not a strict superset of an existing entry in the iomem_resource tree. The "_expand_to_fit()" behavior is acceptable from the perspective of resource allocation. The expansion happens because a conflicting resource range is already populated, which means the resource boundary expansion does not result in any additional free CXL address space being made available. CXL address space allocation is always bounded by the orginal unexpanded address range. However, the potential for expansion does mean that something like walk_iomem_res_desc(IORES_DESC_CXL...) can only return fuzzy answers on corner case platforms that cause the resource tree to expand a CXL window resource over a range that is not decoded by CXL. This would be an odd platform configuration, but if it becomes a problem in practice the CXL subsytem could just publish an API that returns definitive answers. Cc: Andrew Morton <akpm@linux-foundation.org> Cc: David Hildenbrand <david@redhat.com> Cc: Jason Gunthorpe <jgg@nvidia.com> Cc: Tony Luck <tony.luck@intel.com> Cc: Christoph Hellwig <hch@lst.de> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Link: https://lore.kernel.org/r/165784325943.1758207.5310344844375305118.stgit@dwillia2-xfh.jf.intel.com Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2022-07-13 01:37:54 +00:00
if (rc)
cxl/acpi: Cleanup __cxl_parse_cfmws() As a follow on to the recent rework of __cxl_parse_cfmws() to always return errors [1], use cleanup.h helpers to remove goto and other cleanups now that logging is moved to the cxl_parse_cfmws() wrapper. This ends up adding more code than it deletes, but __cxl_parse_cfmws() itself does get smaller. The takeaway from the cond_no_free_ptr() discussion [2] was to not add new macros to handle the cases where no_free_ptr() is awkward, instead rework the code to have helpers and clearer delineation of responsibility. Now one might say that __free(del_cxl_resource) is excessive given it is immediately registered with add_or_reset_cxl_resource(). The rationale for keeping it is that it forces use of "no_free_ptr()" on the argument passed to add_or_reset_cxl_resource(). That in turn makes it clear that @res is NULL for the rest of the function which is part of the point of the cleanup helpers, to turn subtle use after free errors [3] into loud NULL pointer de-references. Link: http://lore.kernel.org/r/170820177238.631006.1012639681618409284.stgit@dwillia2-xfh.jf.intel.com [1] Link: http://lore.kernel.org/r/CAHk-=whBVhnh=KSeBBRet=E7qJAwnPR_aj5em187Q3FiD+LXnA@mail.gmail.com [2] Link: http://lore.kernel.org/r/20230714093146.2253438-1-leitao@debian.org [3] Reported-by: Jonathan Cameron <Jonathan.Cameron@Huawei.com> Closes: http://lore.kernel.org/r/20240219124041.00002bda@Huawei.com Reviewed-by: Alison Schofield <alison.schofield@intel.com> Reviewed-by: Vishal Verma <vishal.l.verma@intel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Link: https://lore.kernel.org/r/171235474028.2718248.14109646123143505522.stgit@dwillia2-xfh.jf.intel.com Signed-off-by: Dave Jiang <dave.jiang@intel.com>
2024-04-05 22:05:50 +00:00
return rc;
cxl/acpi: Track CXL resources in iomem_resource Recall that CXL capable address ranges, on ACPI platforms, are published in the CEDT.CFMWS (CXL Early Discovery Table: CXL Fixed Memory Window Structures). These windows represent both the actively mapped capacity and the potential address space that can be dynamically assigned to a new CXL decode configuration (region / interleave-set). CXL endpoints like DDR DIMMs can be mapped at any physical address including 0 and legacy ranges. There is an expectation and requirement that the /proc/iomem interface and the iomem_resource tree in the kernel reflect the full set of platform address ranges. I.e. that every address range that platform firmware and bus drivers enumerate be reflected as an iomem_resource entry. The hard requirement to do this for CXL arises from the fact that facilities like CONFIG_DEVICE_PRIVATE expect to be able to treat empty iomem_resource ranges as free for software to use as proxy address space. Without CXL publishing its potential address ranges in iomem_resource, the CONFIG_DEVICE_PRIVATE mechanism may inadvertently steal capacity reserved for runtime provisioning of new CXL regions. So, iomem_resource needs to know about both active and potential CXL resource ranges. The active CXL resources might already be reflected in iomem_resource as "System RAM". insert_resource_expand_to_fit() handles re-parenting "System RAM" underneath a CXL window. The "_expand_to_fit()" behavior handles cases where a CXL window is not a strict superset of an existing entry in the iomem_resource tree. The "_expand_to_fit()" behavior is acceptable from the perspective of resource allocation. The expansion happens because a conflicting resource range is already populated, which means the resource boundary expansion does not result in any additional free CXL address space being made available. CXL address space allocation is always bounded by the orginal unexpanded address range. However, the potential for expansion does mean that something like walk_iomem_res_desc(IORES_DESC_CXL...) can only return fuzzy answers on corner case platforms that cause the resource tree to expand a CXL window resource over a range that is not decoded by CXL. This would be an odd platform configuration, but if it becomes a problem in practice the CXL subsytem could just publish an API that returns definitive answers. Cc: Andrew Morton <akpm@linux-foundation.org> Cc: David Hildenbrand <david@redhat.com> Cc: Jason Gunthorpe <jgg@nvidia.com> Cc: Tony Luck <tony.luck@intel.com> Cc: Christoph Hellwig <hch@lst.de> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Link: https://lore.kernel.org/r/165784325943.1758207.5310344844375305118.stgit@dwillia2-xfh.jf.intel.com Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2022-07-13 01:37:54 +00:00
cxl/acpi: Cleanup __cxl_parse_cfmws() As a follow on to the recent rework of __cxl_parse_cfmws() to always return errors [1], use cleanup.h helpers to remove goto and other cleanups now that logging is moved to the cxl_parse_cfmws() wrapper. This ends up adding more code than it deletes, but __cxl_parse_cfmws() itself does get smaller. The takeaway from the cond_no_free_ptr() discussion [2] was to not add new macros to handle the cases where no_free_ptr() is awkward, instead rework the code to have helpers and clearer delineation of responsibility. Now one might say that __free(del_cxl_resource) is excessive given it is immediately registered with add_or_reset_cxl_resource(). The rationale for keeping it is that it forces use of "no_free_ptr()" on the argument passed to add_or_reset_cxl_resource(). That in turn makes it clear that @res is NULL for the rest of the function which is part of the point of the cleanup helpers, to turn subtle use after free errors [3] into loud NULL pointer de-references. Link: http://lore.kernel.org/r/170820177238.631006.1012639681618409284.stgit@dwillia2-xfh.jf.intel.com [1] Link: http://lore.kernel.org/r/CAHk-=whBVhnh=KSeBBRet=E7qJAwnPR_aj5em187Q3FiD+LXnA@mail.gmail.com [2] Link: http://lore.kernel.org/r/20230714093146.2253438-1-leitao@debian.org [3] Reported-by: Jonathan Cameron <Jonathan.Cameron@Huawei.com> Closes: http://lore.kernel.org/r/20240219124041.00002bda@Huawei.com Reviewed-by: Alison Schofield <alison.schofield@intel.com> Reviewed-by: Vishal Verma <vishal.l.verma@intel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Link: https://lore.kernel.org/r/171235474028.2718248.14109646123143505522.stgit@dwillia2-xfh.jf.intel.com Signed-off-by: Dave Jiang <dave.jiang@intel.com>
2024-04-05 22:05:50 +00:00
struct cxl_root_decoder *cxlrd __free(put_cxlrd) =
cxl_root_decoder_alloc(root_port, ways);
if (IS_ERR(cxlrd))
return PTR_ERR(cxlrd);
cxld = &cxlrd->cxlsd.cxld;
cxld->flags = cfmws_to_decoder_flags(cfmws->restrictions);
cxld->target_type = CXL_DECODER_HOSTONLYMEM;
cxld->hpa_range = (struct range) {
cxl/acpi: Cleanup __cxl_parse_cfmws() As a follow on to the recent rework of __cxl_parse_cfmws() to always return errors [1], use cleanup.h helpers to remove goto and other cleanups now that logging is moved to the cxl_parse_cfmws() wrapper. This ends up adding more code than it deletes, but __cxl_parse_cfmws() itself does get smaller. The takeaway from the cond_no_free_ptr() discussion [2] was to not add new macros to handle the cases where no_free_ptr() is awkward, instead rework the code to have helpers and clearer delineation of responsibility. Now one might say that __free(del_cxl_resource) is excessive given it is immediately registered with add_or_reset_cxl_resource(). The rationale for keeping it is that it forces use of "no_free_ptr()" on the argument passed to add_or_reset_cxl_resource(). That in turn makes it clear that @res is NULL for the rest of the function which is part of the point of the cleanup helpers, to turn subtle use after free errors [3] into loud NULL pointer de-references. Link: http://lore.kernel.org/r/170820177238.631006.1012639681618409284.stgit@dwillia2-xfh.jf.intel.com [1] Link: http://lore.kernel.org/r/CAHk-=whBVhnh=KSeBBRet=E7qJAwnPR_aj5em187Q3FiD+LXnA@mail.gmail.com [2] Link: http://lore.kernel.org/r/20230714093146.2253438-1-leitao@debian.org [3] Reported-by: Jonathan Cameron <Jonathan.Cameron@Huawei.com> Closes: http://lore.kernel.org/r/20240219124041.00002bda@Huawei.com Reviewed-by: Alison Schofield <alison.schofield@intel.com> Reviewed-by: Vishal Verma <vishal.l.verma@intel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Link: https://lore.kernel.org/r/171235474028.2718248.14109646123143505522.stgit@dwillia2-xfh.jf.intel.com Signed-off-by: Dave Jiang <dave.jiang@intel.com>
2024-04-05 22:05:50 +00:00
.start = cfmws->base_hpa,
.end = cfmws->base_hpa + cfmws->window_size - 1,
};
cxld->interleave_ways = ways;
/*
* Minimize the x1 granularity to advertise support for any
* valid region granularity
*/
if (ways == 1)
ig = CXL_DECODER_MIN_GRANULARITY;
cxld->interleave_granularity = ig;
if (cfmws->interleave_arithmetic == ACPI_CEDT_CFMWS_ARITHMETIC_XOR) {
if (ways != 1 && ways != 3) {
cxims_ctx = (struct cxl_cxims_context) {
.dev = dev,
.cxlrd = cxlrd,
};
rc = acpi_table_parse_cedt(ACPI_CEDT_TYPE_CXIMS,
cxl_parse_cxims, &cxims_ctx);
if (rc < 0)
cxl/acpi: Cleanup __cxl_parse_cfmws() As a follow on to the recent rework of __cxl_parse_cfmws() to always return errors [1], use cleanup.h helpers to remove goto and other cleanups now that logging is moved to the cxl_parse_cfmws() wrapper. This ends up adding more code than it deletes, but __cxl_parse_cfmws() itself does get smaller. The takeaway from the cond_no_free_ptr() discussion [2] was to not add new macros to handle the cases where no_free_ptr() is awkward, instead rework the code to have helpers and clearer delineation of responsibility. Now one might say that __free(del_cxl_resource) is excessive given it is immediately registered with add_or_reset_cxl_resource(). The rationale for keeping it is that it forces use of "no_free_ptr()" on the argument passed to add_or_reset_cxl_resource(). That in turn makes it clear that @res is NULL for the rest of the function which is part of the point of the cleanup helpers, to turn subtle use after free errors [3] into loud NULL pointer de-references. Link: http://lore.kernel.org/r/170820177238.631006.1012639681618409284.stgit@dwillia2-xfh.jf.intel.com [1] Link: http://lore.kernel.org/r/CAHk-=whBVhnh=KSeBBRet=E7qJAwnPR_aj5em187Q3FiD+LXnA@mail.gmail.com [2] Link: http://lore.kernel.org/r/20230714093146.2253438-1-leitao@debian.org [3] Reported-by: Jonathan Cameron <Jonathan.Cameron@Huawei.com> Closes: http://lore.kernel.org/r/20240219124041.00002bda@Huawei.com Reviewed-by: Alison Schofield <alison.schofield@intel.com> Reviewed-by: Vishal Verma <vishal.l.verma@intel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Link: https://lore.kernel.org/r/171235474028.2718248.14109646123143505522.stgit@dwillia2-xfh.jf.intel.com Signed-off-by: Dave Jiang <dave.jiang@intel.com>
2024-04-05 22:05:50 +00:00
return rc;
if (!cxlrd->platform_data) {
dev_err(dev, "No CXIMS for HBIG %u\n", ig);
cxl/acpi: Cleanup __cxl_parse_cfmws() As a follow on to the recent rework of __cxl_parse_cfmws() to always return errors [1], use cleanup.h helpers to remove goto and other cleanups now that logging is moved to the cxl_parse_cfmws() wrapper. This ends up adding more code than it deletes, but __cxl_parse_cfmws() itself does get smaller. The takeaway from the cond_no_free_ptr() discussion [2] was to not add new macros to handle the cases where no_free_ptr() is awkward, instead rework the code to have helpers and clearer delineation of responsibility. Now one might say that __free(del_cxl_resource) is excessive given it is immediately registered with add_or_reset_cxl_resource(). The rationale for keeping it is that it forces use of "no_free_ptr()" on the argument passed to add_or_reset_cxl_resource(). That in turn makes it clear that @res is NULL for the rest of the function which is part of the point of the cleanup helpers, to turn subtle use after free errors [3] into loud NULL pointer de-references. Link: http://lore.kernel.org/r/170820177238.631006.1012639681618409284.stgit@dwillia2-xfh.jf.intel.com [1] Link: http://lore.kernel.org/r/CAHk-=whBVhnh=KSeBBRet=E7qJAwnPR_aj5em187Q3FiD+LXnA@mail.gmail.com [2] Link: http://lore.kernel.org/r/20230714093146.2253438-1-leitao@debian.org [3] Reported-by: Jonathan Cameron <Jonathan.Cameron@Huawei.com> Closes: http://lore.kernel.org/r/20240219124041.00002bda@Huawei.com Reviewed-by: Alison Schofield <alison.schofield@intel.com> Reviewed-by: Vishal Verma <vishal.l.verma@intel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Link: https://lore.kernel.org/r/171235474028.2718248.14109646123143505522.stgit@dwillia2-xfh.jf.intel.com Signed-off-by: Dave Jiang <dave.jiang@intel.com>
2024-04-05 22:05:50 +00:00
return -EINVAL;
}
}
}
cxlrd->qos_class = cfmws->qtg_id;
cxl: Restore XOR'd position bits during address translation When a device reports a DPA in events like poison, general_media, and dram, the driver translates that DPA back to an HPA. Presently, the CXL driver translation only considers the Modulo position and will report the wrong HPA for XOR configured root decoders. Add a helper function that restores the XOR'd bits during DPA->HPA address translation. Plumb a root decoder callback to the new helper when XOR interleave arithmetic is in use. For Modulo arithmetic, just let the callback be NULL - as in no extra work required. Upon completion of a DPA->HPA translation a couple of checks are performed on the result. One simply confirms that the calculated HPA is within the address range of the region. That test is useful for both Modulo and XOR interleave arithmetic decodes. A second check confirms that the HPA is within an expected chunk based on the endpoints position in the region and the region granularity. An XOR decode disrupts the Modulo pattern making the chunk check useless. To align the checks with the proper decode, pull the region range check inline and use the helper to do the chunk check for Modulo decodes only. A cxl-test unit test is posted for upstream review here: https://lore.kernel.org/20240624210644.495563-1-alison.schofield@intel.com/ Fixes: 28a3ae4ff66c ("cxl/trace: Add an HPA to cxl_poison trace events") Signed-off-by: Alison Schofield <alison.schofield@intel.com> Tested-by: Diego Garcia Rodriguez <diego.garcia.rodriguez@intel.com> Reviewed-by: Dan Williams <dan.j.williams@intel.com> Link: https://patch.msgid.link/1a1ac880d9f889bd6384e657e810431b9a0a72e5.1719980933.git.alison.schofield@intel.com Signed-off-by: Dave Jiang <dave.jiang@intel.com>
2024-07-03 05:29:50 +00:00
if (cfmws->interleave_arithmetic == ACPI_CEDT_CFMWS_ARITHMETIC_XOR)
cxlrd->hpa_to_spa = cxl_xor_hpa_to_spa;
rc = cxl_decoder_add(cxld, target_map);
if (rc)
cxl/acpi: Cleanup __cxl_parse_cfmws() As a follow on to the recent rework of __cxl_parse_cfmws() to always return errors [1], use cleanup.h helpers to remove goto and other cleanups now that logging is moved to the cxl_parse_cfmws() wrapper. This ends up adding more code than it deletes, but __cxl_parse_cfmws() itself does get smaller. The takeaway from the cond_no_free_ptr() discussion [2] was to not add new macros to handle the cases where no_free_ptr() is awkward, instead rework the code to have helpers and clearer delineation of responsibility. Now one might say that __free(del_cxl_resource) is excessive given it is immediately registered with add_or_reset_cxl_resource(). The rationale for keeping it is that it forces use of "no_free_ptr()" on the argument passed to add_or_reset_cxl_resource(). That in turn makes it clear that @res is NULL for the rest of the function which is part of the point of the cleanup helpers, to turn subtle use after free errors [3] into loud NULL pointer de-references. Link: http://lore.kernel.org/r/170820177238.631006.1012639681618409284.stgit@dwillia2-xfh.jf.intel.com [1] Link: http://lore.kernel.org/r/CAHk-=whBVhnh=KSeBBRet=E7qJAwnPR_aj5em187Q3FiD+LXnA@mail.gmail.com [2] Link: http://lore.kernel.org/r/20230714093146.2253438-1-leitao@debian.org [3] Reported-by: Jonathan Cameron <Jonathan.Cameron@Huawei.com> Closes: http://lore.kernel.org/r/20240219124041.00002bda@Huawei.com Reviewed-by: Alison Schofield <alison.schofield@intel.com> Reviewed-by: Vishal Verma <vishal.l.verma@intel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Link: https://lore.kernel.org/r/171235474028.2718248.14109646123143505522.stgit@dwillia2-xfh.jf.intel.com Signed-off-by: Dave Jiang <dave.jiang@intel.com>
2024-04-05 22:05:50 +00:00
return rc;
return cxl_root_decoder_autoremove(dev, no_free_ptr(cxlrd));
}
static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
const unsigned long end)
{
struct acpi_cedt_cfmws *cfmws = (struct acpi_cedt_cfmws *)header;
struct cxl_cfmws_context *ctx = arg;
struct device *dev = ctx->dev;
int rc;
rc = __cxl_parse_cfmws(cfmws, ctx);
if (rc)
dev_err(dev,
"Failed to add decode range: [%#llx - %#llx] (%d)\n",
cfmws->base_hpa,
cfmws->base_hpa + cfmws->window_size - 1, rc);
else
dev_dbg(dev, "decode range: node: %d range [%#llx - %#llx]\n",
phys_to_target_node(cfmws->base_hpa), cfmws->base_hpa,
cfmws->base_hpa + cfmws->window_size - 1);
/* never fail cxl_acpi load for a single window failure */
return 0;
}
tools/testing/cxl: Introduce a mocked-up CXL port hierarchy Create an environment for CXL plumbing unit tests. Especially when it comes to an algorithm for HDM Decoder (Host-managed Device Memory Decoder) programming, the availability of an in-kernel-tree emulation environment for CXL configuration complexity and corner cases speeds development and deters regressions. The approach taken mirrors what was done for tools/testing/nvdimm/. I.e. an external module, cxl_test.ko built out of the tools/testing/cxl/ directory, provides mock implementations of kernel APIs and kernel objects to simulate a real world device hierarchy. One feedback for the tools/testing/nvdimm/ proposal was "why not do this in QEMU?". In fact, the CXL development community has developed a QEMU model for CXL [1]. However, there are a few blocking issues that keep QEMU from being a tight fit for topology + provisioning unit tests: 1/ The QEMU community has yet to show interest in merging any of this support that has had patches on the list since November 2020. So, testing CXL to date involves building custom QEMU with out-of-tree patches. 2/ CXL mechanisms like cross-host-bridge interleave do not have a clear path to be emulated by QEMU without major infrastructure work. This is easier to achieve with the alloc_mock_res() approach taken in this patch to shortcut-define emulated system physical address ranges with interleave behavior. The QEMU enabling has been critical to get the driver off the ground, and may still move forward, but it does not address the ongoing needs of a regression testing environment and test driven development. This patch adds an ACPI CXL Platform definition with emulated CXL multi-ported host-bridges. A follow on patch adds emulated memory expander devices. Acked-by: Ben Widawsky <ben.widawsky@intel.com> Reported-by: Vishal Verma <vishal.l.verma@intel.com> Link: https://lore.kernel.org/r/20210202005948.241655-1-ben.widawsky@intel.com [1] Link: https://lore.kernel.org/r/163164680798.2831381.838684634806668012.stgit@dwillia2-desk3.amr.corp.intel.com Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2021-09-14 19:14:22 +00:00
__mock struct acpi_device *to_cxl_host_bridge(struct device *host,
struct device *dev)
{
struct acpi_device *adev = to_acpi_device(dev);
if (!acpi_pci_find_root(adev->handle))
return NULL;
if (strcmp(acpi_device_hid(adev), "ACPI0016") == 0)
return adev;
return NULL;
}
2023-06-25 18:35:20 +00:00
/* Note, @dev is used by mock_acpi_table_parse_cedt() */
struct cxl_chbs_context {
struct device *dev;
unsigned long long uid;
2023-06-25 18:35:20 +00:00
resource_size_t base;
2022-12-03 08:40:29 +00:00
u32 cxl_version;
cxl/acpi: Warn on mixed CXL VH and RCH/RCD Hierarchy Each Host Bridge instance has a corresponding CXL Host Bridge Structure (CHBS) ACPI table that identifies its capabilities. CHBS tables can be two types (CXL 3.1 Table 9-21): The PCIe Root Complex Register Block (RCRB) and CXL Host Bridge Component Registers (CHBCR). If a Host Bridge is attached to a device that is operating in Restricted CXL Device Mode (RCD), BIOS publishes an RCRB with the base address of registers that describe its capabilities (CXL 3.1 sec. 9.11). Instead, the new (CXL 2.0+) Component registers can only be accessed by means of a base address published with a CHBCR (CXL 3.1 sec. 9.12). If an eRCD (a device that forces the host-bridge into CXL 1.1 Restricted CXL Host mode) is attached to a CXL 2.0+ Host-Bridge, the current CXL specification does not define a mechanism for finding CXL-2.0-only root-port component registers like HDM decoders and Extended Security capability. An algorithm to locate a CHBCR associated with an RCRB, would be too invasive to land without some concrete motivation. Therefore, just print a message to inform of unsupported config. Count how many different CHBS "Version" types are detected by cxl_get_chbs_iter(). Then make cxl_get_chbs() print a warning if that sum is greater than 1. Tested-by: Alison Schofield <alison.schofield@intel.com> Signed-off-by: Fabio M. De Francesco <fabio.m.de.francesco@linux.intel.com> Reviewed-by: Davidlohr Bueso <dave@stgolabs.net> Reviewed-by: Alison Schofield <alison.schofield@intel.com> Link: https://patch.msgid.link/20240628175535.272472-1-fabio.m.de.francesco@linux.intel.com Signed-off-by: Dave Jiang <dave.jiang@intel.com>
2024-06-28 17:48:07 +00:00
int nr_versions;
u32 saved_version;
};
cxl/acpi: Directly bind the CEDT detected CHBCR to the Host Bridge's port During a Host Bridge's downstream port enumeration the CHBS entries in the CEDT table are parsed, its Component Register base address extracted and then stored in struct cxl_dport. The CHBS may contain either the RCRB (RCH mode) or the Host Bridge's Component Registers (CHBCR, VH mode). The RCRB further contains the CXL downstream port register base address, while in VH mode the CXL Downstream Switch Ports are visible in the PCI hierarchy and the DP's component regs are disovered using the CXL DVSEC register locator capability. The Component Registers derived from the CHBS for both modes are different and thus also must be treated differently. That is, in RCH mode, the component regs base should be bound to the dport, but in VH mode to the CXL host bridge's port object. The current implementation stores the CHBCR in addition in struct cxl_dport and copies it later from there to struct cxl_port. As a result, the dport contains the wrong Component Registers base address and, e.g. the RAS capability of a CXL Root Port cannot be detected. To fix the CHBCR binding, attach it directly to the Host Bridge's @cxl_port structure. Do this during port creation of the Host Bridge in add_host_bridge_uport(). Factor out CHBS parsing code in add_host_bridge_dport() and use it in both functions. Co-developed-by: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Robert Richter <rrichter@amd.com> Signed-off-by: Terry Bowman <terry.bowman@amd.com> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Link: https://lore.kernel.org/r/20230622205523.85375-10-terry.bowman@amd.com Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2023-06-22 20:55:05 +00:00
static int cxl_get_chbs_iter(union acpi_subtable_headers *header, void *arg,
const unsigned long end)
{
struct cxl_chbs_context *ctx = arg;
struct acpi_cedt_chbs *chbs;
chbs = (struct acpi_cedt_chbs *) header;
cxl/acpi: Warn on mixed CXL VH and RCH/RCD Hierarchy Each Host Bridge instance has a corresponding CXL Host Bridge Structure (CHBS) ACPI table that identifies its capabilities. CHBS tables can be two types (CXL 3.1 Table 9-21): The PCIe Root Complex Register Block (RCRB) and CXL Host Bridge Component Registers (CHBCR). If a Host Bridge is attached to a device that is operating in Restricted CXL Device Mode (RCD), BIOS publishes an RCRB with the base address of registers that describe its capabilities (CXL 3.1 sec. 9.11). Instead, the new (CXL 2.0+) Component registers can only be accessed by means of a base address published with a CHBCR (CXL 3.1 sec. 9.12). If an eRCD (a device that forces the host-bridge into CXL 1.1 Restricted CXL Host mode) is attached to a CXL 2.0+ Host-Bridge, the current CXL specification does not define a mechanism for finding CXL-2.0-only root-port component registers like HDM decoders and Extended Security capability. An algorithm to locate a CHBCR associated with an RCRB, would be too invasive to land without some concrete motivation. Therefore, just print a message to inform of unsupported config. Count how many different CHBS "Version" types are detected by cxl_get_chbs_iter(). Then make cxl_get_chbs() print a warning if that sum is greater than 1. Tested-by: Alison Schofield <alison.schofield@intel.com> Signed-off-by: Fabio M. De Francesco <fabio.m.de.francesco@linux.intel.com> Reviewed-by: Davidlohr Bueso <dave@stgolabs.net> Reviewed-by: Alison Schofield <alison.schofield@intel.com> Link: https://patch.msgid.link/20240628175535.272472-1-fabio.m.de.francesco@linux.intel.com Signed-off-by: Dave Jiang <dave.jiang@intel.com>
2024-06-28 17:48:07 +00:00
if (chbs->cxl_version == ACPI_CEDT_CHBS_VERSION_CXL11 &&
chbs->length != CXL_RCRB_SIZE)
return 0;
2022-12-03 08:40:29 +00:00
if (!chbs->base)
return 0;
cxl/acpi: Warn on mixed CXL VH and RCH/RCD Hierarchy Each Host Bridge instance has a corresponding CXL Host Bridge Structure (CHBS) ACPI table that identifies its capabilities. CHBS tables can be two types (CXL 3.1 Table 9-21): The PCIe Root Complex Register Block (RCRB) and CXL Host Bridge Component Registers (CHBCR). If a Host Bridge is attached to a device that is operating in Restricted CXL Device Mode (RCD), BIOS publishes an RCRB with the base address of registers that describe its capabilities (CXL 3.1 sec. 9.11). Instead, the new (CXL 2.0+) Component registers can only be accessed by means of a base address published with a CHBCR (CXL 3.1 sec. 9.12). If an eRCD (a device that forces the host-bridge into CXL 1.1 Restricted CXL Host mode) is attached to a CXL 2.0+ Host-Bridge, the current CXL specification does not define a mechanism for finding CXL-2.0-only root-port component registers like HDM decoders and Extended Security capability. An algorithm to locate a CHBCR associated with an RCRB, would be too invasive to land without some concrete motivation. Therefore, just print a message to inform of unsupported config. Count how many different CHBS "Version" types are detected by cxl_get_chbs_iter(). Then make cxl_get_chbs() print a warning if that sum is greater than 1. Tested-by: Alison Schofield <alison.schofield@intel.com> Signed-off-by: Fabio M. De Francesco <fabio.m.de.francesco@linux.intel.com> Reviewed-by: Davidlohr Bueso <dave@stgolabs.net> Reviewed-by: Alison Schofield <alison.schofield@intel.com> Link: https://patch.msgid.link/20240628175535.272472-1-fabio.m.de.francesco@linux.intel.com Signed-off-by: Dave Jiang <dave.jiang@intel.com>
2024-06-28 17:48:07 +00:00
if (ctx->saved_version != chbs->cxl_version) {
/*
* cxl_version cannot be overwritten before the next two
* checks, then use saved_version
*/
ctx->saved_version = chbs->cxl_version;
ctx->nr_versions++;
}
if (ctx->base != CXL_RESOURCE_NONE)
2022-12-03 08:40:29 +00:00
return 0;
cxl/acpi: Warn on mixed CXL VH and RCH/RCD Hierarchy Each Host Bridge instance has a corresponding CXL Host Bridge Structure (CHBS) ACPI table that identifies its capabilities. CHBS tables can be two types (CXL 3.1 Table 9-21): The PCIe Root Complex Register Block (RCRB) and CXL Host Bridge Component Registers (CHBCR). If a Host Bridge is attached to a device that is operating in Restricted CXL Device Mode (RCD), BIOS publishes an RCRB with the base address of registers that describe its capabilities (CXL 3.1 sec. 9.11). Instead, the new (CXL 2.0+) Component registers can only be accessed by means of a base address published with a CHBCR (CXL 3.1 sec. 9.12). If an eRCD (a device that forces the host-bridge into CXL 1.1 Restricted CXL Host mode) is attached to a CXL 2.0+ Host-Bridge, the current CXL specification does not define a mechanism for finding CXL-2.0-only root-port component registers like HDM decoders and Extended Security capability. An algorithm to locate a CHBCR associated with an RCRB, would be too invasive to land without some concrete motivation. Therefore, just print a message to inform of unsupported config. Count how many different CHBS "Version" types are detected by cxl_get_chbs_iter(). Then make cxl_get_chbs() print a warning if that sum is greater than 1. Tested-by: Alison Schofield <alison.schofield@intel.com> Signed-off-by: Fabio M. De Francesco <fabio.m.de.francesco@linux.intel.com> Reviewed-by: Davidlohr Bueso <dave@stgolabs.net> Reviewed-by: Alison Schofield <alison.schofield@intel.com> Link: https://patch.msgid.link/20240628175535.272472-1-fabio.m.de.francesco@linux.intel.com Signed-off-by: Dave Jiang <dave.jiang@intel.com>
2024-06-28 17:48:07 +00:00
if (ctx->uid != chbs->uid)
return 0;
ctx->cxl_version = chbs->cxl_version;
2023-06-25 18:35:20 +00:00
ctx->base = chbs->base;
return 0;
}
cxl/acpi: Directly bind the CEDT detected CHBCR to the Host Bridge's port During a Host Bridge's downstream port enumeration the CHBS entries in the CEDT table are parsed, its Component Register base address extracted and then stored in struct cxl_dport. The CHBS may contain either the RCRB (RCH mode) or the Host Bridge's Component Registers (CHBCR, VH mode). The RCRB further contains the CXL downstream port register base address, while in VH mode the CXL Downstream Switch Ports are visible in the PCI hierarchy and the DP's component regs are disovered using the CXL DVSEC register locator capability. The Component Registers derived from the CHBS for both modes are different and thus also must be treated differently. That is, in RCH mode, the component regs base should be bound to the dport, but in VH mode to the CXL host bridge's port object. The current implementation stores the CHBCR in addition in struct cxl_dport and copies it later from there to struct cxl_port. As a result, the dport contains the wrong Component Registers base address and, e.g. the RAS capability of a CXL Root Port cannot be detected. To fix the CHBCR binding, attach it directly to the Host Bridge's @cxl_port structure. Do this during port creation of the Host Bridge in add_host_bridge_uport(). Factor out CHBS parsing code in add_host_bridge_dport() and use it in both functions. Co-developed-by: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Robert Richter <rrichter@amd.com> Signed-off-by: Terry Bowman <terry.bowman@amd.com> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Link: https://lore.kernel.org/r/20230622205523.85375-10-terry.bowman@amd.com Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2023-06-22 20:55:05 +00:00
static int cxl_get_chbs(struct device *dev, struct acpi_device *hb,
struct cxl_chbs_context *ctx)
{
unsigned long long uid;
int rc;
rc = acpi_evaluate_integer(hb->handle, METHOD_NAME__UID, NULL, &uid);
if (rc != AE_OK) {
dev_err(dev, "unable to retrieve _UID\n");
return -ENOENT;
2022-12-03 08:40:29 +00:00
}
cxl/acpi: Directly bind the CEDT detected CHBCR to the Host Bridge's port During a Host Bridge's downstream port enumeration the CHBS entries in the CEDT table are parsed, its Component Register base address extracted and then stored in struct cxl_dport. The CHBS may contain either the RCRB (RCH mode) or the Host Bridge's Component Registers (CHBCR, VH mode). The RCRB further contains the CXL downstream port register base address, while in VH mode the CXL Downstream Switch Ports are visible in the PCI hierarchy and the DP's component regs are disovered using the CXL DVSEC register locator capability. The Component Registers derived from the CHBS for both modes are different and thus also must be treated differently. That is, in RCH mode, the component regs base should be bound to the dport, but in VH mode to the CXL host bridge's port object. The current implementation stores the CHBCR in addition in struct cxl_dport and copies it later from there to struct cxl_port. As a result, the dport contains the wrong Component Registers base address and, e.g. the RAS capability of a CXL Root Port cannot be detected. To fix the CHBCR binding, attach it directly to the Host Bridge's @cxl_port structure. Do this during port creation of the Host Bridge in add_host_bridge_uport(). Factor out CHBS parsing code in add_host_bridge_dport() and use it in both functions. Co-developed-by: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Robert Richter <rrichter@amd.com> Signed-off-by: Terry Bowman <terry.bowman@amd.com> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Link: https://lore.kernel.org/r/20230622205523.85375-10-terry.bowman@amd.com Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2023-06-22 20:55:05 +00:00
dev_dbg(dev, "UID found: %lld\n", uid);
*ctx = (struct cxl_chbs_context) {
.dev = dev,
.uid = uid,
.base = CXL_RESOURCE_NONE,
.cxl_version = UINT_MAX,
cxl/acpi: Warn on mixed CXL VH and RCH/RCD Hierarchy Each Host Bridge instance has a corresponding CXL Host Bridge Structure (CHBS) ACPI table that identifies its capabilities. CHBS tables can be two types (CXL 3.1 Table 9-21): The PCIe Root Complex Register Block (RCRB) and CXL Host Bridge Component Registers (CHBCR). If a Host Bridge is attached to a device that is operating in Restricted CXL Device Mode (RCD), BIOS publishes an RCRB with the base address of registers that describe its capabilities (CXL 3.1 sec. 9.11). Instead, the new (CXL 2.0+) Component registers can only be accessed by means of a base address published with a CHBCR (CXL 3.1 sec. 9.12). If an eRCD (a device that forces the host-bridge into CXL 1.1 Restricted CXL Host mode) is attached to a CXL 2.0+ Host-Bridge, the current CXL specification does not define a mechanism for finding CXL-2.0-only root-port component registers like HDM decoders and Extended Security capability. An algorithm to locate a CHBCR associated with an RCRB, would be too invasive to land without some concrete motivation. Therefore, just print a message to inform of unsupported config. Count how many different CHBS "Version" types are detected by cxl_get_chbs_iter(). Then make cxl_get_chbs() print a warning if that sum is greater than 1. Tested-by: Alison Schofield <alison.schofield@intel.com> Signed-off-by: Fabio M. De Francesco <fabio.m.de.francesco@linux.intel.com> Reviewed-by: Davidlohr Bueso <dave@stgolabs.net> Reviewed-by: Alison Schofield <alison.schofield@intel.com> Link: https://patch.msgid.link/20240628175535.272472-1-fabio.m.de.francesco@linux.intel.com Signed-off-by: Dave Jiang <dave.jiang@intel.com>
2024-06-28 17:48:07 +00:00
.saved_version = UINT_MAX,
cxl/acpi: Directly bind the CEDT detected CHBCR to the Host Bridge's port During a Host Bridge's downstream port enumeration the CHBS entries in the CEDT table are parsed, its Component Register base address extracted and then stored in struct cxl_dport. The CHBS may contain either the RCRB (RCH mode) or the Host Bridge's Component Registers (CHBCR, VH mode). The RCRB further contains the CXL downstream port register base address, while in VH mode the CXL Downstream Switch Ports are visible in the PCI hierarchy and the DP's component regs are disovered using the CXL DVSEC register locator capability. The Component Registers derived from the CHBS for both modes are different and thus also must be treated differently. That is, in RCH mode, the component regs base should be bound to the dport, but in VH mode to the CXL host bridge's port object. The current implementation stores the CHBCR in addition in struct cxl_dport and copies it later from there to struct cxl_port. As a result, the dport contains the wrong Component Registers base address and, e.g. the RAS capability of a CXL Root Port cannot be detected. To fix the CHBCR binding, attach it directly to the Host Bridge's @cxl_port structure. Do this during port creation of the Host Bridge in add_host_bridge_uport(). Factor out CHBS parsing code in add_host_bridge_dport() and use it in both functions. Co-developed-by: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Robert Richter <rrichter@amd.com> Signed-off-by: Terry Bowman <terry.bowman@amd.com> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Link: https://lore.kernel.org/r/20230622205523.85375-10-terry.bowman@amd.com Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2023-06-22 20:55:05 +00:00
};
2022-12-03 08:40:29 +00:00
cxl/acpi: Directly bind the CEDT detected CHBCR to the Host Bridge's port During a Host Bridge's downstream port enumeration the CHBS entries in the CEDT table are parsed, its Component Register base address extracted and then stored in struct cxl_dport. The CHBS may contain either the RCRB (RCH mode) or the Host Bridge's Component Registers (CHBCR, VH mode). The RCRB further contains the CXL downstream port register base address, while in VH mode the CXL Downstream Switch Ports are visible in the PCI hierarchy and the DP's component regs are disovered using the CXL DVSEC register locator capability. The Component Registers derived from the CHBS for both modes are different and thus also must be treated differently. That is, in RCH mode, the component regs base should be bound to the dport, but in VH mode to the CXL host bridge's port object. The current implementation stores the CHBCR in addition in struct cxl_dport and copies it later from there to struct cxl_port. As a result, the dport contains the wrong Component Registers base address and, e.g. the RAS capability of a CXL Root Port cannot be detected. To fix the CHBCR binding, attach it directly to the Host Bridge's @cxl_port structure. Do this during port creation of the Host Bridge in add_host_bridge_uport(). Factor out CHBS parsing code in add_host_bridge_dport() and use it in both functions. Co-developed-by: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Robert Richter <rrichter@amd.com> Signed-off-by: Terry Bowman <terry.bowman@amd.com> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Link: https://lore.kernel.org/r/20230622205523.85375-10-terry.bowman@amd.com Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2023-06-22 20:55:05 +00:00
acpi_table_parse_cedt(ACPI_CEDT_TYPE_CHBS, cxl_get_chbs_iter, ctx);
cxl/acpi: Warn on mixed CXL VH and RCH/RCD Hierarchy Each Host Bridge instance has a corresponding CXL Host Bridge Structure (CHBS) ACPI table that identifies its capabilities. CHBS tables can be two types (CXL 3.1 Table 9-21): The PCIe Root Complex Register Block (RCRB) and CXL Host Bridge Component Registers (CHBCR). If a Host Bridge is attached to a device that is operating in Restricted CXL Device Mode (RCD), BIOS publishes an RCRB with the base address of registers that describe its capabilities (CXL 3.1 sec. 9.11). Instead, the new (CXL 2.0+) Component registers can only be accessed by means of a base address published with a CHBCR (CXL 3.1 sec. 9.12). If an eRCD (a device that forces the host-bridge into CXL 1.1 Restricted CXL Host mode) is attached to a CXL 2.0+ Host-Bridge, the current CXL specification does not define a mechanism for finding CXL-2.0-only root-port component registers like HDM decoders and Extended Security capability. An algorithm to locate a CHBCR associated with an RCRB, would be too invasive to land without some concrete motivation. Therefore, just print a message to inform of unsupported config. Count how many different CHBS "Version" types are detected by cxl_get_chbs_iter(). Then make cxl_get_chbs() print a warning if that sum is greater than 1. Tested-by: Alison Schofield <alison.schofield@intel.com> Signed-off-by: Fabio M. De Francesco <fabio.m.de.francesco@linux.intel.com> Reviewed-by: Davidlohr Bueso <dave@stgolabs.net> Reviewed-by: Alison Schofield <alison.schofield@intel.com> Link: https://patch.msgid.link/20240628175535.272472-1-fabio.m.de.francesco@linux.intel.com Signed-off-by: Dave Jiang <dave.jiang@intel.com>
2024-06-28 17:48:07 +00:00
if (ctx->nr_versions > 1) {
/*
* Disclaim eRCD support given some component register may
* only be found via CHBCR
*/
dev_info(dev, "Unsupported platform config, mixed Virtual Host and Restricted CXL Host hierarchy.");
}
return 0;
}
static int get_genport_coordinates(struct device *dev, struct cxl_dport *dport)
{
struct acpi_device *hb = to_cxl_host_bridge(NULL, dev);
u32 uid;
if (kstrtou32(acpi_device_uid(hb), 0, &uid))
return -EINVAL;
return acpi_get_genport_coordinates(uid, dport->coord);
}
static int add_host_bridge_dport(struct device *match, void *arg)
{
int ret;
cxl/ACPI: Register CXL host ports by bridge device A port of a CXL host bridge links to the bridge's ACPI device (&adev->dev) with its corresponding uport/dport device (uport_dev and dport_dev respectively). The device is not a direct parent device in the PCI topology as pdev->dev.parent points to a PCI bridge's (struct pci_host_bridge) device. The following CXL memory device hierarchy would be valid for an endpoint once an RCD EP would be enabled (note this will be done in a later patch): VH mode: cxlmd->dev.parent->parent ^^^\^^^^^^\ ^^^^^^\ \ \ pci_dev (Type 1, Downstream Port) \ pci_dev (Type 0, PCI Express Endpoint) cxl mem device RCD mode: cxlmd->dev.parent->parent ^^^\^^^^^^\ ^^^^^^\ \ \ pci_host_bridge \ pci_dev (Type 0, RCiEP) cxl mem device In VH mode a downstream port is created by port enumeration and thus always exists. Now, in RCD mode the host bridge also already exists but it references to an ACPI device. A port lookup by the PCI device's parent device will fail as a direct link to the registered port is missing. The ACPI device of the bridge must be determined first. To prevent this, change port registration of a CXL host to use the bridge device instead. Do this also for the VH case as port topology will better reflect the PCI topology then. Signed-off-by: Robert Richter <rrichter@amd.com> [djbw: rebase on brige mocking] Reviewed-by: Robert Richter <rrichter@amd.com> Reviewed-by: Dave Jiang <dave.jiang@intel.com> Link: https://lore.kernel.org/r/166993043978.1882361.16238060349889579369.stgit@dwillia2-xfh.jf.intel.com Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2022-12-01 21:33:59 +00:00
acpi_status rc;
struct device *bridge;
struct cxl_dport *dport;
struct cxl_chbs_context ctx;
cxl/ACPI: Register CXL host ports by bridge device A port of a CXL host bridge links to the bridge's ACPI device (&adev->dev) with its corresponding uport/dport device (uport_dev and dport_dev respectively). The device is not a direct parent device in the PCI topology as pdev->dev.parent points to a PCI bridge's (struct pci_host_bridge) device. The following CXL memory device hierarchy would be valid for an endpoint once an RCD EP would be enabled (note this will be done in a later patch): VH mode: cxlmd->dev.parent->parent ^^^\^^^^^^\ ^^^^^^\ \ \ pci_dev (Type 1, Downstream Port) \ pci_dev (Type 0, PCI Express Endpoint) cxl mem device RCD mode: cxlmd->dev.parent->parent ^^^\^^^^^^\ ^^^^^^\ \ \ pci_host_bridge \ pci_dev (Type 0, RCiEP) cxl mem device In VH mode a downstream port is created by port enumeration and thus always exists. Now, in RCD mode the host bridge also already exists but it references to an ACPI device. A port lookup by the PCI device's parent device will fail as a direct link to the registered port is missing. The ACPI device of the bridge must be determined first. To prevent this, change port registration of a CXL host to use the bridge device instead. Do this also for the VH case as port topology will better reflect the PCI topology then. Signed-off-by: Robert Richter <rrichter@amd.com> [djbw: rebase on brige mocking] Reviewed-by: Robert Richter <rrichter@amd.com> Reviewed-by: Dave Jiang <dave.jiang@intel.com> Link: https://lore.kernel.org/r/166993043978.1882361.16238060349889579369.stgit@dwillia2-xfh.jf.intel.com Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2022-12-01 21:33:59 +00:00
struct acpi_pci_root *pci_root;
struct cxl_port *root_port = arg;
struct device *host = root_port->dev.parent;
cxl/ACPI: Register CXL host ports by bridge device A port of a CXL host bridge links to the bridge's ACPI device (&adev->dev) with its corresponding uport/dport device (uport_dev and dport_dev respectively). The device is not a direct parent device in the PCI topology as pdev->dev.parent points to a PCI bridge's (struct pci_host_bridge) device. The following CXL memory device hierarchy would be valid for an endpoint once an RCD EP would be enabled (note this will be done in a later patch): VH mode: cxlmd->dev.parent->parent ^^^\^^^^^^\ ^^^^^^\ \ \ pci_dev (Type 1, Downstream Port) \ pci_dev (Type 0, PCI Express Endpoint) cxl mem device RCD mode: cxlmd->dev.parent->parent ^^^\^^^^^^\ ^^^^^^\ \ \ pci_host_bridge \ pci_dev (Type 0, RCiEP) cxl mem device In VH mode a downstream port is created by port enumeration and thus always exists. Now, in RCD mode the host bridge also already exists but it references to an ACPI device. A port lookup by the PCI device's parent device will fail as a direct link to the registered port is missing. The ACPI device of the bridge must be determined first. To prevent this, change port registration of a CXL host to use the bridge device instead. Do this also for the VH case as port topology will better reflect the PCI topology then. Signed-off-by: Robert Richter <rrichter@amd.com> [djbw: rebase on brige mocking] Reviewed-by: Robert Richter <rrichter@amd.com> Reviewed-by: Dave Jiang <dave.jiang@intel.com> Link: https://lore.kernel.org/r/166993043978.1882361.16238060349889579369.stgit@dwillia2-xfh.jf.intel.com Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2022-12-01 21:33:59 +00:00
struct acpi_device *hb = to_cxl_host_bridge(host, match);
cxl/ACPI: Register CXL host ports by bridge device A port of a CXL host bridge links to the bridge's ACPI device (&adev->dev) with its corresponding uport/dport device (uport_dev and dport_dev respectively). The device is not a direct parent device in the PCI topology as pdev->dev.parent points to a PCI bridge's (struct pci_host_bridge) device. The following CXL memory device hierarchy would be valid for an endpoint once an RCD EP would be enabled (note this will be done in a later patch): VH mode: cxlmd->dev.parent->parent ^^^\^^^^^^\ ^^^^^^\ \ \ pci_dev (Type 1, Downstream Port) \ pci_dev (Type 0, PCI Express Endpoint) cxl mem device RCD mode: cxlmd->dev.parent->parent ^^^\^^^^^^\ ^^^^^^\ \ \ pci_host_bridge \ pci_dev (Type 0, RCiEP) cxl mem device In VH mode a downstream port is created by port enumeration and thus always exists. Now, in RCD mode the host bridge also already exists but it references to an ACPI device. A port lookup by the PCI device's parent device will fail as a direct link to the registered port is missing. The ACPI device of the bridge must be determined first. To prevent this, change port registration of a CXL host to use the bridge device instead. Do this also for the VH case as port topology will better reflect the PCI topology then. Signed-off-by: Robert Richter <rrichter@amd.com> [djbw: rebase on brige mocking] Reviewed-by: Robert Richter <rrichter@amd.com> Reviewed-by: Dave Jiang <dave.jiang@intel.com> Link: https://lore.kernel.org/r/166993043978.1882361.16238060349889579369.stgit@dwillia2-xfh.jf.intel.com Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2022-12-01 21:33:59 +00:00
if (!hb)
return 0;
cxl/acpi: Directly bind the CEDT detected CHBCR to the Host Bridge's port During a Host Bridge's downstream port enumeration the CHBS entries in the CEDT table are parsed, its Component Register base address extracted and then stored in struct cxl_dport. The CHBS may contain either the RCRB (RCH mode) or the Host Bridge's Component Registers (CHBCR, VH mode). The RCRB further contains the CXL downstream port register base address, while in VH mode the CXL Downstream Switch Ports are visible in the PCI hierarchy and the DP's component regs are disovered using the CXL DVSEC register locator capability. The Component Registers derived from the CHBS for both modes are different and thus also must be treated differently. That is, in RCH mode, the component regs base should be bound to the dport, but in VH mode to the CXL host bridge's port object. The current implementation stores the CHBCR in addition in struct cxl_dport and copies it later from there to struct cxl_port. As a result, the dport contains the wrong Component Registers base address and, e.g. the RAS capability of a CXL Root Port cannot be detected. To fix the CHBCR binding, attach it directly to the Host Bridge's @cxl_port structure. Do this during port creation of the Host Bridge in add_host_bridge_uport(). Factor out CHBS parsing code in add_host_bridge_dport() and use it in both functions. Co-developed-by: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Robert Richter <rrichter@amd.com> Signed-off-by: Terry Bowman <terry.bowman@amd.com> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Link: https://lore.kernel.org/r/20230622205523.85375-10-terry.bowman@amd.com Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2023-06-22 20:55:05 +00:00
rc = cxl_get_chbs(match, hb, &ctx);
if (rc)
return rc;
cxl/acpi: Directly bind the CEDT detected CHBCR to the Host Bridge's port During a Host Bridge's downstream port enumeration the CHBS entries in the CEDT table are parsed, its Component Register base address extracted and then stored in struct cxl_dport. The CHBS may contain either the RCRB (RCH mode) or the Host Bridge's Component Registers (CHBCR, VH mode). The RCRB further contains the CXL downstream port register base address, while in VH mode the CXL Downstream Switch Ports are visible in the PCI hierarchy and the DP's component regs are disovered using the CXL DVSEC register locator capability. The Component Registers derived from the CHBS for both modes are different and thus also must be treated differently. That is, in RCH mode, the component regs base should be bound to the dport, but in VH mode to the CXL host bridge's port object. The current implementation stores the CHBCR in addition in struct cxl_dport and copies it later from there to struct cxl_port. As a result, the dport contains the wrong Component Registers base address and, e.g. the RAS capability of a CXL Root Port cannot be detected. To fix the CHBCR binding, attach it directly to the Host Bridge's @cxl_port structure. Do this during port creation of the Host Bridge in add_host_bridge_uport(). Factor out CHBS parsing code in add_host_bridge_dport() and use it in both functions. Co-developed-by: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Robert Richter <rrichter@amd.com> Signed-off-by: Terry Bowman <terry.bowman@amd.com> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Link: https://lore.kernel.org/r/20230622205523.85375-10-terry.bowman@amd.com Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2023-06-22 20:55:05 +00:00
if (ctx.cxl_version == UINT_MAX) {
2022-12-03 08:40:29 +00:00
dev_warn(match, "No CHBS found for Host Bridge (UID %lld)\n",
cxl/acpi: Directly bind the CEDT detected CHBCR to the Host Bridge's port During a Host Bridge's downstream port enumeration the CHBS entries in the CEDT table are parsed, its Component Register base address extracted and then stored in struct cxl_dport. The CHBS may contain either the RCRB (RCH mode) or the Host Bridge's Component Registers (CHBCR, VH mode). The RCRB further contains the CXL downstream port register base address, while in VH mode the CXL Downstream Switch Ports are visible in the PCI hierarchy and the DP's component regs are disovered using the CXL DVSEC register locator capability. The Component Registers derived from the CHBS for both modes are different and thus also must be treated differently. That is, in RCH mode, the component regs base should be bound to the dport, but in VH mode to the CXL host bridge's port object. The current implementation stores the CHBCR in addition in struct cxl_dport and copies it later from there to struct cxl_port. As a result, the dport contains the wrong Component Registers base address and, e.g. the RAS capability of a CXL Root Port cannot be detected. To fix the CHBCR binding, attach it directly to the Host Bridge's @cxl_port structure. Do this during port creation of the Host Bridge in add_host_bridge_uport(). Factor out CHBS parsing code in add_host_bridge_dport() and use it in both functions. Co-developed-by: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Robert Richter <rrichter@amd.com> Signed-off-by: Terry Bowman <terry.bowman@amd.com> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Link: https://lore.kernel.org/r/20230622205523.85375-10-terry.bowman@amd.com Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2023-06-22 20:55:05 +00:00
ctx.uid);
2022-12-03 08:40:29 +00:00
return 0;
}
2023-06-25 18:35:20 +00:00
if (ctx.base == CXL_RESOURCE_NONE) {
dev_warn(match, "CHBS invalid for Host Bridge (UID %lld)\n",
cxl/acpi: Directly bind the CEDT detected CHBCR to the Host Bridge's port During a Host Bridge's downstream port enumeration the CHBS entries in the CEDT table are parsed, its Component Register base address extracted and then stored in struct cxl_dport. The CHBS may contain either the RCRB (RCH mode) or the Host Bridge's Component Registers (CHBCR, VH mode). The RCRB further contains the CXL downstream port register base address, while in VH mode the CXL Downstream Switch Ports are visible in the PCI hierarchy and the DP's component regs are disovered using the CXL DVSEC register locator capability. The Component Registers derived from the CHBS for both modes are different and thus also must be treated differently. That is, in RCH mode, the component regs base should be bound to the dport, but in VH mode to the CXL host bridge's port object. The current implementation stores the CHBCR in addition in struct cxl_dport and copies it later from there to struct cxl_port. As a result, the dport contains the wrong Component Registers base address and, e.g. the RAS capability of a CXL Root Port cannot be detected. To fix the CHBCR binding, attach it directly to the Host Bridge's @cxl_port structure. Do this during port creation of the Host Bridge in add_host_bridge_uport(). Factor out CHBS parsing code in add_host_bridge_dport() and use it in both functions. Co-developed-by: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Robert Richter <rrichter@amd.com> Signed-off-by: Terry Bowman <terry.bowman@amd.com> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Link: https://lore.kernel.org/r/20230622205523.85375-10-terry.bowman@amd.com Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2023-06-22 20:55:05 +00:00
ctx.uid);
return 0;
}
cxl/ACPI: Register CXL host ports by bridge device A port of a CXL host bridge links to the bridge's ACPI device (&adev->dev) with its corresponding uport/dport device (uport_dev and dport_dev respectively). The device is not a direct parent device in the PCI topology as pdev->dev.parent points to a PCI bridge's (struct pci_host_bridge) device. The following CXL memory device hierarchy would be valid for an endpoint once an RCD EP would be enabled (note this will be done in a later patch): VH mode: cxlmd->dev.parent->parent ^^^\^^^^^^\ ^^^^^^\ \ \ pci_dev (Type 1, Downstream Port) \ pci_dev (Type 0, PCI Express Endpoint) cxl mem device RCD mode: cxlmd->dev.parent->parent ^^^\^^^^^^\ ^^^^^^\ \ \ pci_host_bridge \ pci_dev (Type 0, RCiEP) cxl mem device In VH mode a downstream port is created by port enumeration and thus always exists. Now, in RCD mode the host bridge also already exists but it references to an ACPI device. A port lookup by the PCI device's parent device will fail as a direct link to the registered port is missing. The ACPI device of the bridge must be determined first. To prevent this, change port registration of a CXL host to use the bridge device instead. Do this also for the VH case as port topology will better reflect the PCI topology then. Signed-off-by: Robert Richter <rrichter@amd.com> [djbw: rebase on brige mocking] Reviewed-by: Robert Richter <rrichter@amd.com> Reviewed-by: Dave Jiang <dave.jiang@intel.com> Link: https://lore.kernel.org/r/166993043978.1882361.16238060349889579369.stgit@dwillia2-xfh.jf.intel.com Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2022-12-01 21:33:59 +00:00
pci_root = acpi_pci_find_root(hb->handle);
bridge = pci_root->bus->bridge;
2023-06-25 18:35:20 +00:00
cxl/acpi: Directly bind the CEDT detected CHBCR to the Host Bridge's port During a Host Bridge's downstream port enumeration the CHBS entries in the CEDT table are parsed, its Component Register base address extracted and then stored in struct cxl_dport. The CHBS may contain either the RCRB (RCH mode) or the Host Bridge's Component Registers (CHBCR, VH mode). The RCRB further contains the CXL downstream port register base address, while in VH mode the CXL Downstream Switch Ports are visible in the PCI hierarchy and the DP's component regs are disovered using the CXL DVSEC register locator capability. The Component Registers derived from the CHBS for both modes are different and thus also must be treated differently. That is, in RCH mode, the component regs base should be bound to the dport, but in VH mode to the CXL host bridge's port object. The current implementation stores the CHBCR in addition in struct cxl_dport and copies it later from there to struct cxl_port. As a result, the dport contains the wrong Component Registers base address and, e.g. the RAS capability of a CXL Root Port cannot be detected. To fix the CHBCR binding, attach it directly to the Host Bridge's @cxl_port structure. Do this during port creation of the Host Bridge in add_host_bridge_uport(). Factor out CHBS parsing code in add_host_bridge_dport() and use it in both functions. Co-developed-by: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Robert Richter <rrichter@amd.com> Signed-off-by: Terry Bowman <terry.bowman@amd.com> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Link: https://lore.kernel.org/r/20230622205523.85375-10-terry.bowman@amd.com Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2023-06-22 20:55:05 +00:00
/*
* In RCH mode, bind the component regs base to the dport. In
* VH mode it will be bound to the CXL host bridge's port
* object later in add_host_bridge_uport().
*/
2023-06-25 18:35:20 +00:00
if (ctx.cxl_version == ACPI_CEDT_CHBS_VERSION_CXL11) {
cxl/acpi: Directly bind the CEDT detected CHBCR to the Host Bridge's port During a Host Bridge's downstream port enumeration the CHBS entries in the CEDT table are parsed, its Component Register base address extracted and then stored in struct cxl_dport. The CHBS may contain either the RCRB (RCH mode) or the Host Bridge's Component Registers (CHBCR, VH mode). The RCRB further contains the CXL downstream port register base address, while in VH mode the CXL Downstream Switch Ports are visible in the PCI hierarchy and the DP's component regs are disovered using the CXL DVSEC register locator capability. The Component Registers derived from the CHBS for both modes are different and thus also must be treated differently. That is, in RCH mode, the component regs base should be bound to the dport, but in VH mode to the CXL host bridge's port object. The current implementation stores the CHBCR in addition in struct cxl_dport and copies it later from there to struct cxl_port. As a result, the dport contains the wrong Component Registers base address and, e.g. the RAS capability of a CXL Root Port cannot be detected. To fix the CHBCR binding, attach it directly to the Host Bridge's @cxl_port structure. Do this during port creation of the Host Bridge in add_host_bridge_uport(). Factor out CHBS parsing code in add_host_bridge_dport() and use it in both functions. Co-developed-by: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Robert Richter <rrichter@amd.com> Signed-off-by: Terry Bowman <terry.bowman@amd.com> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Link: https://lore.kernel.org/r/20230622205523.85375-10-terry.bowman@amd.com Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2023-06-22 20:55:05 +00:00
dev_dbg(match, "RCRB found for UID %lld: %pa\n", ctx.uid,
&ctx.base);
dport = devm_cxl_add_rch_dport(root_port, bridge, ctx.uid,
ctx.base);
2023-06-25 18:35:20 +00:00
} else {
cxl/acpi: Directly bind the CEDT detected CHBCR to the Host Bridge's port During a Host Bridge's downstream port enumeration the CHBS entries in the CEDT table are parsed, its Component Register base address extracted and then stored in struct cxl_dport. The CHBS may contain either the RCRB (RCH mode) or the Host Bridge's Component Registers (CHBCR, VH mode). The RCRB further contains the CXL downstream port register base address, while in VH mode the CXL Downstream Switch Ports are visible in the PCI hierarchy and the DP's component regs are disovered using the CXL DVSEC register locator capability. The Component Registers derived from the CHBS for both modes are different and thus also must be treated differently. That is, in RCH mode, the component regs base should be bound to the dport, but in VH mode to the CXL host bridge's port object. The current implementation stores the CHBCR in addition in struct cxl_dport and copies it later from there to struct cxl_port. As a result, the dport contains the wrong Component Registers base address and, e.g. the RAS capability of a CXL Root Port cannot be detected. To fix the CHBCR binding, attach it directly to the Host Bridge's @cxl_port structure. Do this during port creation of the Host Bridge in add_host_bridge_uport(). Factor out CHBS parsing code in add_host_bridge_dport() and use it in both functions. Co-developed-by: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Robert Richter <rrichter@amd.com> Signed-off-by: Terry Bowman <terry.bowman@amd.com> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Link: https://lore.kernel.org/r/20230622205523.85375-10-terry.bowman@amd.com Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2023-06-22 20:55:05 +00:00
dport = devm_cxl_add_dport(root_port, bridge, ctx.uid,
CXL_RESOURCE_NONE);
2023-06-25 18:35:20 +00:00
}
if (IS_ERR(dport))
return PTR_ERR(dport);
ret = get_genport_coordinates(match, dport);
if (ret)
dev_dbg(match, "Failed to get generic port perf coordinates.\n");
return 0;
}
/*
* A host bridge is a dport to a CFMWS decode and it is a uport to the
* dport (PCIe Root Ports) in the host bridge.
*/
static int add_host_bridge_uport(struct device *match, void *arg)
{
struct cxl_port *root_port = arg;
struct device *host = root_port->dev.parent;
struct acpi_device *hb = to_cxl_host_bridge(host, match);
struct acpi_pci_root *pci_root;
struct cxl_dport *dport;
struct cxl_port *port;
struct device *bridge;
cxl/acpi: Directly bind the CEDT detected CHBCR to the Host Bridge's port During a Host Bridge's downstream port enumeration the CHBS entries in the CEDT table are parsed, its Component Register base address extracted and then stored in struct cxl_dport. The CHBS may contain either the RCRB (RCH mode) or the Host Bridge's Component Registers (CHBCR, VH mode). The RCRB further contains the CXL downstream port register base address, while in VH mode the CXL Downstream Switch Ports are visible in the PCI hierarchy and the DP's component regs are disovered using the CXL DVSEC register locator capability. The Component Registers derived from the CHBS for both modes are different and thus also must be treated differently. That is, in RCH mode, the component regs base should be bound to the dport, but in VH mode to the CXL host bridge's port object. The current implementation stores the CHBCR in addition in struct cxl_dport and copies it later from there to struct cxl_port. As a result, the dport contains the wrong Component Registers base address and, e.g. the RAS capability of a CXL Root Port cannot be detected. To fix the CHBCR binding, attach it directly to the Host Bridge's @cxl_port structure. Do this during port creation of the Host Bridge in add_host_bridge_uport(). Factor out CHBS parsing code in add_host_bridge_dport() and use it in both functions. Co-developed-by: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Robert Richter <rrichter@amd.com> Signed-off-by: Terry Bowman <terry.bowman@amd.com> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Link: https://lore.kernel.org/r/20230622205523.85375-10-terry.bowman@amd.com Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2023-06-22 20:55:05 +00:00
struct cxl_chbs_context ctx;
resource_size_t component_reg_phys;
int rc;
if (!hb)
return 0;
pci_root = acpi_pci_find_root(hb->handle);
bridge = pci_root->bus->bridge;
dport = cxl_find_dport_by_dev(root_port, bridge);
if (!dport) {
dev_dbg(host, "host bridge expected and not found\n");
return 0;
}
if (dport->rch) {
dev_info(bridge, "host supports CXL (restricted)\n");
return 0;
}
cxl/acpi: Directly bind the CEDT detected CHBCR to the Host Bridge's port During a Host Bridge's downstream port enumeration the CHBS entries in the CEDT table are parsed, its Component Register base address extracted and then stored in struct cxl_dport. The CHBS may contain either the RCRB (RCH mode) or the Host Bridge's Component Registers (CHBCR, VH mode). The RCRB further contains the CXL downstream port register base address, while in VH mode the CXL Downstream Switch Ports are visible in the PCI hierarchy and the DP's component regs are disovered using the CXL DVSEC register locator capability. The Component Registers derived from the CHBS for both modes are different and thus also must be treated differently. That is, in RCH mode, the component regs base should be bound to the dport, but in VH mode to the CXL host bridge's port object. The current implementation stores the CHBCR in addition in struct cxl_dport and copies it later from there to struct cxl_port. As a result, the dport contains the wrong Component Registers base address and, e.g. the RAS capability of a CXL Root Port cannot be detected. To fix the CHBCR binding, attach it directly to the Host Bridge's @cxl_port structure. Do this during port creation of the Host Bridge in add_host_bridge_uport(). Factor out CHBS parsing code in add_host_bridge_dport() and use it in both functions. Co-developed-by: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Robert Richter <rrichter@amd.com> Signed-off-by: Terry Bowman <terry.bowman@amd.com> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Link: https://lore.kernel.org/r/20230622205523.85375-10-terry.bowman@amd.com Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2023-06-22 20:55:05 +00:00
rc = cxl_get_chbs(match, hb, &ctx);
if (rc)
return rc;
if (ctx.cxl_version == ACPI_CEDT_CHBS_VERSION_CXL11) {
dev_warn(bridge,
"CXL CHBS version mismatch, skip port registration\n");
return 0;
}
component_reg_phys = ctx.base;
if (component_reg_phys != CXL_RESOURCE_NONE)
dev_dbg(match, "CHBCR found for UID %lld: %pa\n",
ctx.uid, &component_reg_phys);
rc = devm_cxl_register_pci_bus(host, bridge, pci_root->bus);
if (rc)
return rc;
cxl/acpi: Directly bind the CEDT detected CHBCR to the Host Bridge's port During a Host Bridge's downstream port enumeration the CHBS entries in the CEDT table are parsed, its Component Register base address extracted and then stored in struct cxl_dport. The CHBS may contain either the RCRB (RCH mode) or the Host Bridge's Component Registers (CHBCR, VH mode). The RCRB further contains the CXL downstream port register base address, while in VH mode the CXL Downstream Switch Ports are visible in the PCI hierarchy and the DP's component regs are disovered using the CXL DVSEC register locator capability. The Component Registers derived from the CHBS for both modes are different and thus also must be treated differently. That is, in RCH mode, the component regs base should be bound to the dport, but in VH mode to the CXL host bridge's port object. The current implementation stores the CHBCR in addition in struct cxl_dport and copies it later from there to struct cxl_port. As a result, the dport contains the wrong Component Registers base address and, e.g. the RAS capability of a CXL Root Port cannot be detected. To fix the CHBCR binding, attach it directly to the Host Bridge's @cxl_port structure. Do this during port creation of the Host Bridge in add_host_bridge_uport(). Factor out CHBS parsing code in add_host_bridge_dport() and use it in both functions. Co-developed-by: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Robert Richter <rrichter@amd.com> Signed-off-by: Terry Bowman <terry.bowman@amd.com> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Link: https://lore.kernel.org/r/20230622205523.85375-10-terry.bowman@amd.com Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2023-06-22 20:55:05 +00:00
port = devm_cxl_add_port(host, bridge, component_reg_phys, dport);
if (IS_ERR(port))
return PTR_ERR(port);
dev_info(bridge, "host supports CXL\n");
return 0;
}
static int add_root_nvdimm_bridge(struct device *match, void *data)
{
struct cxl_decoder *cxld;
struct cxl_port *root_port = data;
struct cxl_nvdimm_bridge *cxl_nvb;
struct device *host = root_port->dev.parent;
if (!is_root_decoder(match))
return 0;
cxld = to_cxl_decoder(match);
if (!(cxld->flags & CXL_DECODER_F_PMEM))
return 0;
cxl_nvb = devm_cxl_add_nvdimm_bridge(host, root_port);
if (IS_ERR(cxl_nvb)) {
dev_dbg(host, "failed to register pmem\n");
return PTR_ERR(cxl_nvb);
}
dev_dbg(host, "%s: add: %s\n", dev_name(&root_port->dev),
dev_name(&cxl_nvb->dev));
return 1;
}
cxl/acpi: Add root device lockdep validation The CXL "root" device, ACPI0017, is an attach point for coordinating platform level CXL resources and is the parent device for a CXL port topology tree. As such it has distinct locking rules relative to other CXL subsystem objects, but because it is an ACPI device the lock class is established well before it is given to the cxl_acpi driver. However, the lockdep API does support changing the lock class "live" for situations like this. Add a device_lock_set_class() helper that a driver can use in ->probe() to set a custom lock class, and device_lock_reset_class() to return to the default "no validate" class before the custom lock class key goes out of scope after ->remove(). Note the helpers are all macros to support dead code elimination in the CONFIG_PROVE_LOCKING=n case, however device_set_lock_class() still needs #ifdef CONFIG_PROVE_LOCKING since lockdep_match_class() explicitly does not have a helper in the CONFIG_PROVE_LOCKING=n case (see comment in lockdep.h). The lockdep API needs 2 small tweaks to prevent "unused" warnings for the @key argument to lock_set_class(), and a new lock_set_novalidate_class() is added to supplement lockdep_set_novalidate_class() in the cases where the lock class is converted while the lock is held. Suggested-by: Peter Zijlstra <peterz@infradead.org> Cc: "Rafael J. Wysocki" <rafael@kernel.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Will Deacon <will@kernel.org> Cc: Waiman Long <longman@redhat.com> Cc: Boqun Feng <boqun.feng@gmail.com> Cc: Alison Schofield <alison.schofield@intel.com> Cc: Vishal Verma <vishal.l.verma@intel.com> Cc: Ben Widawsky <ben.widawsky@intel.com> Cc: Jonathan Cameron <Jonathan.Cameron@huawei.com> Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Reviewed-by: Ira Weiny <ira.weiny@intel.com> Link: https://lore.kernel.org/r/165100081305.1528964.11138612430659737238.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2022-04-26 19:22:44 +00:00
static struct lock_class_key cxl_root_key;
static void cxl_acpi_lock_reset_class(void *dev)
{
device_lock_reset_class(dev);
}
cxl/acpi: Track CXL resources in iomem_resource Recall that CXL capable address ranges, on ACPI platforms, are published in the CEDT.CFMWS (CXL Early Discovery Table: CXL Fixed Memory Window Structures). These windows represent both the actively mapped capacity and the potential address space that can be dynamically assigned to a new CXL decode configuration (region / interleave-set). CXL endpoints like DDR DIMMs can be mapped at any physical address including 0 and legacy ranges. There is an expectation and requirement that the /proc/iomem interface and the iomem_resource tree in the kernel reflect the full set of platform address ranges. I.e. that every address range that platform firmware and bus drivers enumerate be reflected as an iomem_resource entry. The hard requirement to do this for CXL arises from the fact that facilities like CONFIG_DEVICE_PRIVATE expect to be able to treat empty iomem_resource ranges as free for software to use as proxy address space. Without CXL publishing its potential address ranges in iomem_resource, the CONFIG_DEVICE_PRIVATE mechanism may inadvertently steal capacity reserved for runtime provisioning of new CXL regions. So, iomem_resource needs to know about both active and potential CXL resource ranges. The active CXL resources might already be reflected in iomem_resource as "System RAM". insert_resource_expand_to_fit() handles re-parenting "System RAM" underneath a CXL window. The "_expand_to_fit()" behavior handles cases where a CXL window is not a strict superset of an existing entry in the iomem_resource tree. The "_expand_to_fit()" behavior is acceptable from the perspective of resource allocation. The expansion happens because a conflicting resource range is already populated, which means the resource boundary expansion does not result in any additional free CXL address space being made available. CXL address space allocation is always bounded by the orginal unexpanded address range. However, the potential for expansion does mean that something like walk_iomem_res_desc(IORES_DESC_CXL...) can only return fuzzy answers on corner case platforms that cause the resource tree to expand a CXL window resource over a range that is not decoded by CXL. This would be an odd platform configuration, but if it becomes a problem in practice the CXL subsytem could just publish an API that returns definitive answers. Cc: Andrew Morton <akpm@linux-foundation.org> Cc: David Hildenbrand <david@redhat.com> Cc: Jason Gunthorpe <jgg@nvidia.com> Cc: Tony Luck <tony.luck@intel.com> Cc: Christoph Hellwig <hch@lst.de> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Link: https://lore.kernel.org/r/165784325943.1758207.5310344844375305118.stgit@dwillia2-xfh.jf.intel.com Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2022-07-13 01:37:54 +00:00
static void cxl_set_public_resource(struct resource *priv, struct resource *pub)
{
priv->desc = (unsigned long) pub;
}
static struct resource *cxl_get_public_resource(struct resource *priv)
{
return (struct resource *) priv->desc;
}
static void remove_cxl_resources(void *data)
{
struct resource *res, *next, *cxl = data;
for (res = cxl->child; res; res = next) {
struct resource *victim = cxl_get_public_resource(res);
next = res->sibling;
remove_resource(res);
if (victim) {
remove_resource(victim);
kfree(victim);
}
del_cxl_resource(res);
}
}
/**
* add_cxl_resources() - reflect CXL fixed memory windows in iomem_resource
* @cxl_res: A standalone resource tree where each CXL window is a sibling
*
* Walk each CXL window in @cxl_res and add it to iomem_resource potentially
* expanding its boundaries to ensure that any conflicting resources become
* children. If a window is expanded it may then conflict with a another window
* entry and require the window to be truncated or trimmed. Consider this
* situation:
*
* |-- "CXL Window 0" --||----- "CXL Window 1" -----|
* |--------------- "System RAM" -------------|
*
* ...where platform firmware has established as System RAM resource across 2
* windows, but has left some portion of window 1 for dynamic CXL region
* provisioning. In this case "Window 0" will span the entirety of the "System
* RAM" span, and "CXL Window 1" is truncated to the remaining tail past the end
* of that "System RAM" resource.
*/
static int add_cxl_resources(struct resource *cxl_res)
{
struct resource *res, *new, *next;
for (res = cxl_res->child; res; res = next) {
new = kzalloc(sizeof(*new), GFP_KERNEL);
if (!new)
return -ENOMEM;
new->name = res->name;
new->start = res->start;
new->end = res->end;
new->flags = IORESOURCE_MEM;
new->desc = IORES_DESC_CXL;
/*
* Record the public resource in the private cxl_res tree for
* later removal.
*/
cxl_set_public_resource(res, new);
insert_resource_expand_to_fit(&iomem_resource, new);
next = res->sibling;
while (next && resource_overlaps(new, next)) {
if (resource_contains(new, next)) {
struct resource *_next = next->sibling;
remove_resource(next);
del_cxl_resource(next);
next = _next;
} else
next->start = new->end + 1;
}
}
return 0;
}
static int pair_cxl_resource(struct device *dev, void *data)
{
struct resource *cxl_res = data;
struct resource *p;
if (!is_root_decoder(dev))
return 0;
for (p = cxl_res->child; p; p = p->sibling) {
struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld;
struct resource res = {
.start = cxld->hpa_range.start,
.end = cxld->hpa_range.end,
.flags = IORESOURCE_MEM,
};
if (resource_contains(p, &res)) {
cxlrd->res = cxl_get_public_resource(p);
break;
}
}
return 0;
}
static int cxl_acpi_probe(struct platform_device *pdev)
{
int rc;
cxl/acpi: Track CXL resources in iomem_resource Recall that CXL capable address ranges, on ACPI platforms, are published in the CEDT.CFMWS (CXL Early Discovery Table: CXL Fixed Memory Window Structures). These windows represent both the actively mapped capacity and the potential address space that can be dynamically assigned to a new CXL decode configuration (region / interleave-set). CXL endpoints like DDR DIMMs can be mapped at any physical address including 0 and legacy ranges. There is an expectation and requirement that the /proc/iomem interface and the iomem_resource tree in the kernel reflect the full set of platform address ranges. I.e. that every address range that platform firmware and bus drivers enumerate be reflected as an iomem_resource entry. The hard requirement to do this for CXL arises from the fact that facilities like CONFIG_DEVICE_PRIVATE expect to be able to treat empty iomem_resource ranges as free for software to use as proxy address space. Without CXL publishing its potential address ranges in iomem_resource, the CONFIG_DEVICE_PRIVATE mechanism may inadvertently steal capacity reserved for runtime provisioning of new CXL regions. So, iomem_resource needs to know about both active and potential CXL resource ranges. The active CXL resources might already be reflected in iomem_resource as "System RAM". insert_resource_expand_to_fit() handles re-parenting "System RAM" underneath a CXL window. The "_expand_to_fit()" behavior handles cases where a CXL window is not a strict superset of an existing entry in the iomem_resource tree. The "_expand_to_fit()" behavior is acceptable from the perspective of resource allocation. The expansion happens because a conflicting resource range is already populated, which means the resource boundary expansion does not result in any additional free CXL address space being made available. CXL address space allocation is always bounded by the orginal unexpanded address range. However, the potential for expansion does mean that something like walk_iomem_res_desc(IORES_DESC_CXL...) can only return fuzzy answers on corner case platforms that cause the resource tree to expand a CXL window resource over a range that is not decoded by CXL. This would be an odd platform configuration, but if it becomes a problem in practice the CXL subsytem could just publish an API that returns definitive answers. Cc: Andrew Morton <akpm@linux-foundation.org> Cc: David Hildenbrand <david@redhat.com> Cc: Jason Gunthorpe <jgg@nvidia.com> Cc: Tony Luck <tony.luck@intel.com> Cc: Christoph Hellwig <hch@lst.de> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Link: https://lore.kernel.org/r/165784325943.1758207.5310344844375305118.stgit@dwillia2-xfh.jf.intel.com Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2022-07-13 01:37:54 +00:00
struct resource *cxl_res;
struct cxl_root *cxl_root;
struct cxl_port *root_port;
struct device *host = &pdev->dev;
struct acpi_device *adev = ACPI_COMPANION(host);
struct cxl_cfmws_context ctx;
cxl/acpi: Add root device lockdep validation The CXL "root" device, ACPI0017, is an attach point for coordinating platform level CXL resources and is the parent device for a CXL port topology tree. As such it has distinct locking rules relative to other CXL subsystem objects, but because it is an ACPI device the lock class is established well before it is given to the cxl_acpi driver. However, the lockdep API does support changing the lock class "live" for situations like this. Add a device_lock_set_class() helper that a driver can use in ->probe() to set a custom lock class, and device_lock_reset_class() to return to the default "no validate" class before the custom lock class key goes out of scope after ->remove(). Note the helpers are all macros to support dead code elimination in the CONFIG_PROVE_LOCKING=n case, however device_set_lock_class() still needs #ifdef CONFIG_PROVE_LOCKING since lockdep_match_class() explicitly does not have a helper in the CONFIG_PROVE_LOCKING=n case (see comment in lockdep.h). The lockdep API needs 2 small tweaks to prevent "unused" warnings for the @key argument to lock_set_class(), and a new lock_set_novalidate_class() is added to supplement lockdep_set_novalidate_class() in the cases where the lock class is converted while the lock is held. Suggested-by: Peter Zijlstra <peterz@infradead.org> Cc: "Rafael J. Wysocki" <rafael@kernel.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Will Deacon <will@kernel.org> Cc: Waiman Long <longman@redhat.com> Cc: Boqun Feng <boqun.feng@gmail.com> Cc: Alison Schofield <alison.schofield@intel.com> Cc: Vishal Verma <vishal.l.verma@intel.com> Cc: Ben Widawsky <ben.widawsky@intel.com> Cc: Jonathan Cameron <Jonathan.Cameron@huawei.com> Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Reviewed-by: Ira Weiny <ira.weiny@intel.com> Link: https://lore.kernel.org/r/165100081305.1528964.11138612430659737238.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2022-04-26 19:22:44 +00:00
device_lock_set_class(&pdev->dev, &cxl_root_key);
rc = devm_add_action_or_reset(&pdev->dev, cxl_acpi_lock_reset_class,
&pdev->dev);
if (rc)
return rc;
cxl/acpi: Track CXL resources in iomem_resource Recall that CXL capable address ranges, on ACPI platforms, are published in the CEDT.CFMWS (CXL Early Discovery Table: CXL Fixed Memory Window Structures). These windows represent both the actively mapped capacity and the potential address space that can be dynamically assigned to a new CXL decode configuration (region / interleave-set). CXL endpoints like DDR DIMMs can be mapped at any physical address including 0 and legacy ranges. There is an expectation and requirement that the /proc/iomem interface and the iomem_resource tree in the kernel reflect the full set of platform address ranges. I.e. that every address range that platform firmware and bus drivers enumerate be reflected as an iomem_resource entry. The hard requirement to do this for CXL arises from the fact that facilities like CONFIG_DEVICE_PRIVATE expect to be able to treat empty iomem_resource ranges as free for software to use as proxy address space. Without CXL publishing its potential address ranges in iomem_resource, the CONFIG_DEVICE_PRIVATE mechanism may inadvertently steal capacity reserved for runtime provisioning of new CXL regions. So, iomem_resource needs to know about both active and potential CXL resource ranges. The active CXL resources might already be reflected in iomem_resource as "System RAM". insert_resource_expand_to_fit() handles re-parenting "System RAM" underneath a CXL window. The "_expand_to_fit()" behavior handles cases where a CXL window is not a strict superset of an existing entry in the iomem_resource tree. The "_expand_to_fit()" behavior is acceptable from the perspective of resource allocation. The expansion happens because a conflicting resource range is already populated, which means the resource boundary expansion does not result in any additional free CXL address space being made available. CXL address space allocation is always bounded by the orginal unexpanded address range. However, the potential for expansion does mean that something like walk_iomem_res_desc(IORES_DESC_CXL...) can only return fuzzy answers on corner case platforms that cause the resource tree to expand a CXL window resource over a range that is not decoded by CXL. This would be an odd platform configuration, but if it becomes a problem in practice the CXL subsytem could just publish an API that returns definitive answers. Cc: Andrew Morton <akpm@linux-foundation.org> Cc: David Hildenbrand <david@redhat.com> Cc: Jason Gunthorpe <jgg@nvidia.com> Cc: Tony Luck <tony.luck@intel.com> Cc: Christoph Hellwig <hch@lst.de> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Link: https://lore.kernel.org/r/165784325943.1758207.5310344844375305118.stgit@dwillia2-xfh.jf.intel.com Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2022-07-13 01:37:54 +00:00
cxl_res = devm_kzalloc(host, sizeof(*cxl_res), GFP_KERNEL);
if (!cxl_res)
return -ENOMEM;
cxl_res->name = "CXL mem";
cxl_res->start = 0;
cxl_res->end = -1;
cxl_res->flags = IORESOURCE_MEM;
cxl_root = devm_cxl_add_root(host, &acpi_root_ops);
if (IS_ERR(cxl_root))
return PTR_ERR(cxl_root);
root_port = &cxl_root->port;
rc = bus_for_each_dev(adev->dev.bus, NULL, root_port,
add_host_bridge_dport);
if (rc < 0)
return rc;
cxl/acpi: Track CXL resources in iomem_resource Recall that CXL capable address ranges, on ACPI platforms, are published in the CEDT.CFMWS (CXL Early Discovery Table: CXL Fixed Memory Window Structures). These windows represent both the actively mapped capacity and the potential address space that can be dynamically assigned to a new CXL decode configuration (region / interleave-set). CXL endpoints like DDR DIMMs can be mapped at any physical address including 0 and legacy ranges. There is an expectation and requirement that the /proc/iomem interface and the iomem_resource tree in the kernel reflect the full set of platform address ranges. I.e. that every address range that platform firmware and bus drivers enumerate be reflected as an iomem_resource entry. The hard requirement to do this for CXL arises from the fact that facilities like CONFIG_DEVICE_PRIVATE expect to be able to treat empty iomem_resource ranges as free for software to use as proxy address space. Without CXL publishing its potential address ranges in iomem_resource, the CONFIG_DEVICE_PRIVATE mechanism may inadvertently steal capacity reserved for runtime provisioning of new CXL regions. So, iomem_resource needs to know about both active and potential CXL resource ranges. The active CXL resources might already be reflected in iomem_resource as "System RAM". insert_resource_expand_to_fit() handles re-parenting "System RAM" underneath a CXL window. The "_expand_to_fit()" behavior handles cases where a CXL window is not a strict superset of an existing entry in the iomem_resource tree. The "_expand_to_fit()" behavior is acceptable from the perspective of resource allocation. The expansion happens because a conflicting resource range is already populated, which means the resource boundary expansion does not result in any additional free CXL address space being made available. CXL address space allocation is always bounded by the orginal unexpanded address range. However, the potential for expansion does mean that something like walk_iomem_res_desc(IORES_DESC_CXL...) can only return fuzzy answers on corner case platforms that cause the resource tree to expand a CXL window resource over a range that is not decoded by CXL. This would be an odd platform configuration, but if it becomes a problem in practice the CXL subsytem could just publish an API that returns definitive answers. Cc: Andrew Morton <akpm@linux-foundation.org> Cc: David Hildenbrand <david@redhat.com> Cc: Jason Gunthorpe <jgg@nvidia.com> Cc: Tony Luck <tony.luck@intel.com> Cc: Christoph Hellwig <hch@lst.de> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Link: https://lore.kernel.org/r/165784325943.1758207.5310344844375305118.stgit@dwillia2-xfh.jf.intel.com Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2022-07-13 01:37:54 +00:00
rc = devm_add_action_or_reset(host, remove_cxl_resources, cxl_res);
if (rc)
return rc;
ctx = (struct cxl_cfmws_context) {
.dev = host,
.root_port = root_port,
cxl/acpi: Track CXL resources in iomem_resource Recall that CXL capable address ranges, on ACPI platforms, are published in the CEDT.CFMWS (CXL Early Discovery Table: CXL Fixed Memory Window Structures). These windows represent both the actively mapped capacity and the potential address space that can be dynamically assigned to a new CXL decode configuration (region / interleave-set). CXL endpoints like DDR DIMMs can be mapped at any physical address including 0 and legacy ranges. There is an expectation and requirement that the /proc/iomem interface and the iomem_resource tree in the kernel reflect the full set of platform address ranges. I.e. that every address range that platform firmware and bus drivers enumerate be reflected as an iomem_resource entry. The hard requirement to do this for CXL arises from the fact that facilities like CONFIG_DEVICE_PRIVATE expect to be able to treat empty iomem_resource ranges as free for software to use as proxy address space. Without CXL publishing its potential address ranges in iomem_resource, the CONFIG_DEVICE_PRIVATE mechanism may inadvertently steal capacity reserved for runtime provisioning of new CXL regions. So, iomem_resource needs to know about both active and potential CXL resource ranges. The active CXL resources might already be reflected in iomem_resource as "System RAM". insert_resource_expand_to_fit() handles re-parenting "System RAM" underneath a CXL window. The "_expand_to_fit()" behavior handles cases where a CXL window is not a strict superset of an existing entry in the iomem_resource tree. The "_expand_to_fit()" behavior is acceptable from the perspective of resource allocation. The expansion happens because a conflicting resource range is already populated, which means the resource boundary expansion does not result in any additional free CXL address space being made available. CXL address space allocation is always bounded by the orginal unexpanded address range. However, the potential for expansion does mean that something like walk_iomem_res_desc(IORES_DESC_CXL...) can only return fuzzy answers on corner case platforms that cause the resource tree to expand a CXL window resource over a range that is not decoded by CXL. This would be an odd platform configuration, but if it becomes a problem in practice the CXL subsytem could just publish an API that returns definitive answers. Cc: Andrew Morton <akpm@linux-foundation.org> Cc: David Hildenbrand <david@redhat.com> Cc: Jason Gunthorpe <jgg@nvidia.com> Cc: Tony Luck <tony.luck@intel.com> Cc: Christoph Hellwig <hch@lst.de> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Link: https://lore.kernel.org/r/165784325943.1758207.5310344844375305118.stgit@dwillia2-xfh.jf.intel.com Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2022-07-13 01:37:54 +00:00
.cxl_res = cxl_res,
};
cxl/acpi: Track CXL resources in iomem_resource Recall that CXL capable address ranges, on ACPI platforms, are published in the CEDT.CFMWS (CXL Early Discovery Table: CXL Fixed Memory Window Structures). These windows represent both the actively mapped capacity and the potential address space that can be dynamically assigned to a new CXL decode configuration (region / interleave-set). CXL endpoints like DDR DIMMs can be mapped at any physical address including 0 and legacy ranges. There is an expectation and requirement that the /proc/iomem interface and the iomem_resource tree in the kernel reflect the full set of platform address ranges. I.e. that every address range that platform firmware and bus drivers enumerate be reflected as an iomem_resource entry. The hard requirement to do this for CXL arises from the fact that facilities like CONFIG_DEVICE_PRIVATE expect to be able to treat empty iomem_resource ranges as free for software to use as proxy address space. Without CXL publishing its potential address ranges in iomem_resource, the CONFIG_DEVICE_PRIVATE mechanism may inadvertently steal capacity reserved for runtime provisioning of new CXL regions. So, iomem_resource needs to know about both active and potential CXL resource ranges. The active CXL resources might already be reflected in iomem_resource as "System RAM". insert_resource_expand_to_fit() handles re-parenting "System RAM" underneath a CXL window. The "_expand_to_fit()" behavior handles cases where a CXL window is not a strict superset of an existing entry in the iomem_resource tree. The "_expand_to_fit()" behavior is acceptable from the perspective of resource allocation. The expansion happens because a conflicting resource range is already populated, which means the resource boundary expansion does not result in any additional free CXL address space being made available. CXL address space allocation is always bounded by the orginal unexpanded address range. However, the potential for expansion does mean that something like walk_iomem_res_desc(IORES_DESC_CXL...) can only return fuzzy answers on corner case platforms that cause the resource tree to expand a CXL window resource over a range that is not decoded by CXL. This would be an odd platform configuration, but if it becomes a problem in practice the CXL subsytem could just publish an API that returns definitive answers. Cc: Andrew Morton <akpm@linux-foundation.org> Cc: David Hildenbrand <david@redhat.com> Cc: Jason Gunthorpe <jgg@nvidia.com> Cc: Tony Luck <tony.luck@intel.com> Cc: Christoph Hellwig <hch@lst.de> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Link: https://lore.kernel.org/r/165784325943.1758207.5310344844375305118.stgit@dwillia2-xfh.jf.intel.com Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2022-07-13 01:37:54 +00:00
rc = acpi_table_parse_cedt(ACPI_CEDT_TYPE_CFMWS, cxl_parse_cfmws, &ctx);
if (rc < 0)
return -ENXIO;
rc = add_cxl_resources(cxl_res);
if (rc)
return rc;
/*
* Populate the root decoders with their related iomem resource,
* if present
*/
device_for_each_child(&root_port->dev, cxl_res, pair_cxl_resource);
/*
* Root level scanned with host-bridge as dports, now scan host-bridges
* for their role as CXL uports to their CXL-capable PCIe Root Ports.
*/
rc = bus_for_each_dev(adev->dev.bus, NULL, root_port,
add_host_bridge_uport);
if (rc < 0)
return rc;
if (IS_ENABLED(CONFIG_CXL_PMEM))
rc = device_for_each_child(&root_port->dev, root_port,
add_root_nvdimm_bridge);
if (rc < 0)
return rc;
cxl/mem: Add the cxl_mem driver At this point the subsystem can enumerate all CXL ports (CXL.mem decode resources in upstream switch ports and host bridges) in a system. The last mile is connecting those ports to endpoints. The cxl_mem driver connects an endpoint device to the platform CXL.mem protoctol decode-topology. At ->probe() time it walks its device-topology-ancestry and adds a CXL Port object at every Upstream Port hop until it gets to CXL root. The CXL root object is only present after a platform firmware driver registers platform CXL resources. For ACPI based platform this is managed by the ACPI0017 device and the cxl_acpi driver. The ports are registered such that disabling a given port automatically unregisters all descendant ports, and the chain can only be registered after the root is established. Given ACPI device scanning may run asynchronously compared to PCI device scanning the root driver is tasked with rescanning the bus after the root successfully probes. Conversely if any ports in a chain between the root and an endpoint becomes disconnected it subsequently triggers the endpoint to unregister. Given lock depenedencies the endpoint unregistration happens in a workqueue asynchronously. If userspace cares about synchronizing delayed work after port events the /sys/bus/cxl/flush attribute is available for that purpose. Reported-by: Randy Dunlap <rdunlap@infradead.org> Signed-off-by: Ben Widawsky <ben.widawsky@intel.com> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> [djbw: clarify changelog, rework hotplug support] Link: https://lore.kernel.org/r/164398782997.903003.9725273241627693186.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2022-02-04 15:18:31 +00:00
/* In case PCI is scanned before ACPI re-trigger memdev attach */
cxl_bus_rescan();
return 0;
}
static const struct acpi_device_id cxl_acpi_ids[] = {
{ "ACPI0017" },
tools/testing/cxl: Introduce a mocked-up CXL port hierarchy Create an environment for CXL plumbing unit tests. Especially when it comes to an algorithm for HDM Decoder (Host-managed Device Memory Decoder) programming, the availability of an in-kernel-tree emulation environment for CXL configuration complexity and corner cases speeds development and deters regressions. The approach taken mirrors what was done for tools/testing/nvdimm/. I.e. an external module, cxl_test.ko built out of the tools/testing/cxl/ directory, provides mock implementations of kernel APIs and kernel objects to simulate a real world device hierarchy. One feedback for the tools/testing/nvdimm/ proposal was "why not do this in QEMU?". In fact, the CXL development community has developed a QEMU model for CXL [1]. However, there are a few blocking issues that keep QEMU from being a tight fit for topology + provisioning unit tests: 1/ The QEMU community has yet to show interest in merging any of this support that has had patches on the list since November 2020. So, testing CXL to date involves building custom QEMU with out-of-tree patches. 2/ CXL mechanisms like cross-host-bridge interleave do not have a clear path to be emulated by QEMU without major infrastructure work. This is easier to achieve with the alloc_mock_res() approach taken in this patch to shortcut-define emulated system physical address ranges with interleave behavior. The QEMU enabling has been critical to get the driver off the ground, and may still move forward, but it does not address the ongoing needs of a regression testing environment and test driven development. This patch adds an ACPI CXL Platform definition with emulated CXL multi-ported host-bridges. A follow on patch adds emulated memory expander devices. Acked-by: Ben Widawsky <ben.widawsky@intel.com> Reported-by: Vishal Verma <vishal.l.verma@intel.com> Link: https://lore.kernel.org/r/20210202005948.241655-1-ben.widawsky@intel.com [1] Link: https://lore.kernel.org/r/163164680798.2831381.838684634806668012.stgit@dwillia2-desk3.amr.corp.intel.com Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2021-09-14 19:14:22 +00:00
{ },
};
MODULE_DEVICE_TABLE(acpi, cxl_acpi_ids);
static const struct platform_device_id cxl_test_ids[] = {
{ "cxl_acpi" },
{ },
};
MODULE_DEVICE_TABLE(platform, cxl_test_ids);
static struct platform_driver cxl_acpi_driver = {
.probe = cxl_acpi_probe,
.driver = {
.name = KBUILD_MODNAME,
.acpi_match_table = cxl_acpi_ids,
},
.id_table = cxl_test_ids,
};
static int __init cxl_acpi_init(void)
{
return platform_driver_register(&cxl_acpi_driver);
}
static void __exit cxl_acpi_exit(void)
{
platform_driver_unregister(&cxl_acpi_driver);
cxl_bus_drain();
}
/* load before dax_hmem sees 'Soft Reserved' CXL ranges */
subsys_initcall(cxl_acpi_init);
/*
* Arrange for host-bridge ports to be active synchronous with
* cxl_acpi_probe() exit.
*/
MODULE_SOFTDEP("pre: cxl_port");
module_exit(cxl_acpi_exit);
MODULE_DESCRIPTION("CXL ACPI: Platform Support");
MODULE_LICENSE("GPL v2");
MODULE_IMPORT_NS(CXL);
MODULE_IMPORT_NS(ACPI);