Merge tag 'cxl-for-5.16' of git://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl

Pull cxl updates from Dan Williams:
 "More preparation and plumbing work in the CXL subsystem.

  From an end user perspective the highlight here is lighting up the CXL
  Persistent Memory related commands (label read / write) with the
  generic ioctl() front-end in LIBNVDIMM.

  Otherwise, the ability to instantiate new persistent and volatile
  memory regions is still on track for v5.17.

  Summary:

   - Fix support for platforms that do not enumerate every ACPI0016 (CXL
     Host Bridge) in the CHBS (ACPI Host Bridge Structure).

   - Introduce a common pci_find_dvsec_capability() helper, clean up
     open coded implementations in various drivers.

   - Add 'cxl_test' for regression testing CXL subsystem ABIs.
     'cxl_test' is a module built from tools/testing/cxl/ that mocks up
     a CXL topology to augment the nascent support for emulation of CXL
     devices in QEMU.

   - Convert libnvdimm to use the uuid API.

   - Complete the definition of CXL namespace labels in libnvdimm.

   - Tunnel libnvdimm label operations from nd_ioctl() back to the CXL
     mailbox driver. Enable 'ndctl {read,write}-labels' for CXL.

   - Continue to sort and refactor functionality into distinct driver
     and core-infrastructure buckets. For example, mailbox handling is
     now a generic core capability consumed by the PCI and cxl_test
     drivers"

* tag 'cxl-for-5.16' of git://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl: (34 commits)
  ocxl: Use pci core's DVSEC functionality
  cxl/pci: Use pci core's DVSEC functionality
  PCI: Add pci_find_dvsec_capability to find designated VSEC
  cxl/pci: Split cxl_pci_setup_regs()
  cxl/pci: Add @base to cxl_register_map
  cxl/pci: Make more use of cxl_register_map
  cxl/pci: Remove pci request/release regions
  cxl/pci: Fix NULL vs ERR_PTR confusion
  cxl/pci: Remove dev_dbg for unknown register blocks
  cxl/pci: Convert register block identifiers to an enum
  cxl/acpi: Do not fail cxl_acpi_probe() based on a missing CHBS
  cxl/pci: Disambiguate cxl_pci further from cxl_mem
  Documentation/cxl: Add bus internal docs
  cxl/core: Split decoder setup into alloc + add
  tools/testing/cxl: Introduce a mock memory device + driver
  cxl/mbox: Move command definitions to common location
  cxl/bus: Populate the target list at decoder create
  tools/testing/cxl: Introduce a mocked-up CXL port hierarchy
  cxl/pmem: Add support for multiple nvdimm-bridge objects
  cxl/pmem: Translate NVDIMM label commands to CXL label commands
  ...
This commit is contained in:
Linus Torvalds
2021-11-08 11:49:48 -08:00
36 changed files with 3275 additions and 1496 deletions

38
tools/testing/cxl/Kbuild Normal file
View File

@@ -0,0 +1,38 @@
# SPDX-License-Identifier: GPL-2.0
ldflags-y += --wrap=is_acpi_device_node
ldflags-y += --wrap=acpi_get_table
ldflags-y += --wrap=acpi_put_table
ldflags-y += --wrap=acpi_evaluate_integer
ldflags-y += --wrap=acpi_pci_find_root
ldflags-y += --wrap=pci_walk_bus
ldflags-y += --wrap=nvdimm_bus_register
DRIVERS := ../../../drivers
CXL_SRC := $(DRIVERS)/cxl
CXL_CORE_SRC := $(DRIVERS)/cxl/core
ccflags-y := -I$(srctree)/drivers/cxl/
ccflags-y += -D__mock=__weak
obj-m += cxl_acpi.o
cxl_acpi-y := $(CXL_SRC)/acpi.o
cxl_acpi-y += mock_acpi.o
cxl_acpi-y += config_check.o
obj-m += cxl_pmem.o
cxl_pmem-y := $(CXL_SRC)/pmem.o
cxl_pmem-y += config_check.o
obj-m += cxl_core.o
cxl_core-y := $(CXL_CORE_SRC)/bus.o
cxl_core-y += $(CXL_CORE_SRC)/pmem.o
cxl_core-y += $(CXL_CORE_SRC)/regs.o
cxl_core-y += $(CXL_CORE_SRC)/memdev.o
cxl_core-y += $(CXL_CORE_SRC)/mbox.o
cxl_core-y += config_check.o
cxl_core-y += mock_pmem.o
obj-m += test/

View File

@@ -0,0 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/bug.h>
void check(void)
{
/*
* These kconfig symbols must be set to "m" for cxl_test to load
* and operate.
*/
BUILD_BUG_ON(!IS_MODULE(CONFIG_CXL_BUS));
BUILD_BUG_ON(!IS_MODULE(CONFIG_CXL_ACPI));
BUILD_BUG_ON(!IS_MODULE(CONFIG_CXL_PMEM));
}

View File

@@ -0,0 +1,109 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2021 Intel Corporation. All rights reserved. */
#include <linux/platform_device.h>
#include <linux/device.h>
#include <linux/acpi.h>
#include <linux/pci.h>
#include <cxl.h>
#include "test/mock.h"
struct acpi_device *to_cxl_host_bridge(struct device *host, struct device *dev)
{
int index;
struct acpi_device *adev, *found = NULL;
struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
if (ops && ops->is_mock_bridge(dev)) {
found = ACPI_COMPANION(dev);
goto out;
}
if (dev->bus == &platform_bus_type)
goto out;
adev = to_acpi_device(dev);
if (!acpi_pci_find_root(adev->handle))
goto out;
if (strcmp(acpi_device_hid(adev), "ACPI0016") == 0) {
found = adev;
dev_dbg(host, "found host bridge %s\n", dev_name(&adev->dev));
}
out:
put_cxl_mock_ops(index);
return found;
}
static int match_add_root_port(struct pci_dev *pdev, void *data)
{
struct cxl_walk_context *ctx = data;
struct pci_bus *root_bus = ctx->root;
struct cxl_port *port = ctx->port;
int type = pci_pcie_type(pdev);
struct device *dev = ctx->dev;
u32 lnkcap, port_num;
int rc;
if (pdev->bus != root_bus)
return 0;
if (!pci_is_pcie(pdev))
return 0;
if (type != PCI_EXP_TYPE_ROOT_PORT)
return 0;
if (pci_read_config_dword(pdev, pci_pcie_cap(pdev) + PCI_EXP_LNKCAP,
&lnkcap) != PCIBIOS_SUCCESSFUL)
return 0;
/* TODO walk DVSEC to find component register base */
port_num = FIELD_GET(PCI_EXP_LNKCAP_PN, lnkcap);
rc = cxl_add_dport(port, &pdev->dev, port_num, CXL_RESOURCE_NONE);
if (rc) {
dev_err(dev, "failed to add dport: %s (%d)\n",
dev_name(&pdev->dev), rc);
ctx->error = rc;
return rc;
}
ctx->count++;
dev_dbg(dev, "add dport%d: %s\n", port_num, dev_name(&pdev->dev));
return 0;
}
static int mock_add_root_port(struct platform_device *pdev, void *data)
{
struct cxl_walk_context *ctx = data;
struct cxl_port *port = ctx->port;
struct device *dev = ctx->dev;
int rc;
rc = cxl_add_dport(port, &pdev->dev, pdev->id, CXL_RESOURCE_NONE);
if (rc) {
dev_err(dev, "failed to add dport: %s (%d)\n",
dev_name(&pdev->dev), rc);
ctx->error = rc;
return rc;
}
ctx->count++;
dev_dbg(dev, "add dport%d: %s\n", pdev->id, dev_name(&pdev->dev));
return 0;
}
int match_add_root_ports(struct pci_dev *dev, void *data)
{
int index, rc;
struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
struct platform_device *pdev = (struct platform_device *) dev;
if (ops && ops->is_mock_port(pdev))
rc = mock_add_root_port(pdev, data);
else
rc = match_add_root_port(dev, data);
put_cxl_mock_ops(index);
return rc;
}

View File

@@ -0,0 +1,24 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2021 Intel Corporation. All rights reserved. */
#include <cxl.h>
#include "test/mock.h"
#include <core/core.h>
int match_nvdimm_bridge(struct device *dev, const void *data)
{
int index, rc = 0;
struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
const struct cxl_nvdimm *cxl_nvd = data;
if (ops) {
if (dev->type == &cxl_nvdimm_bridge_type &&
(ops->is_mock_dev(dev->parent->parent) ==
ops->is_mock_dev(cxl_nvd->dev.parent->parent)))
rc = 1;
} else
rc = dev->type == &cxl_nvdimm_bridge_type;
put_cxl_mock_ops(index);
return rc;
}

View File

@@ -0,0 +1,10 @@
# SPDX-License-Identifier: GPL-2.0
ccflags-y := -I$(srctree)/drivers/cxl/
obj-m += cxl_test.o
obj-m += cxl_mock.o
obj-m += cxl_mock_mem.o
cxl_test-y := cxl.o
cxl_mock-y := mock.o
cxl_mock_mem-y := mem.o

View File

@@ -0,0 +1,576 @@
// SPDX-License-Identifier: GPL-2.0-only
// Copyright(c) 2021 Intel Corporation. All rights reserved.
#include <linux/platform_device.h>
#include <linux/genalloc.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/acpi.h>
#include <linux/pci.h>
#include <linux/mm.h>
#include "mock.h"
#define NR_CXL_HOST_BRIDGES 4
#define NR_CXL_ROOT_PORTS 2
static struct platform_device *cxl_acpi;
static struct platform_device *cxl_host_bridge[NR_CXL_HOST_BRIDGES];
static struct platform_device
*cxl_root_port[NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS];
struct platform_device *cxl_mem[NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS];
static struct acpi_device acpi0017_mock;
static struct acpi_device host_bridge[NR_CXL_HOST_BRIDGES] = {
[0] = {
.handle = &host_bridge[0],
},
[1] = {
.handle = &host_bridge[1],
},
[2] = {
.handle = &host_bridge[2],
},
[3] = {
.handle = &host_bridge[3],
},
};
static bool is_mock_dev(struct device *dev)
{
int i;
for (i = 0; i < ARRAY_SIZE(cxl_mem); i++)
if (dev == &cxl_mem[i]->dev)
return true;
if (dev == &cxl_acpi->dev)
return true;
return false;
}
static bool is_mock_adev(struct acpi_device *adev)
{
int i;
if (adev == &acpi0017_mock)
return true;
for (i = 0; i < ARRAY_SIZE(host_bridge); i++)
if (adev == &host_bridge[i])
return true;
return false;
}
static struct {
struct acpi_table_cedt cedt;
struct acpi_cedt_chbs chbs[NR_CXL_HOST_BRIDGES];
struct {
struct acpi_cedt_cfmws cfmws;
u32 target[1];
} cfmws0;
struct {
struct acpi_cedt_cfmws cfmws;
u32 target[4];
} cfmws1;
struct {
struct acpi_cedt_cfmws cfmws;
u32 target[1];
} cfmws2;
struct {
struct acpi_cedt_cfmws cfmws;
u32 target[4];
} cfmws3;
} __packed mock_cedt = {
.cedt = {
.header = {
.signature = "CEDT",
.length = sizeof(mock_cedt),
.revision = 1,
},
},
.chbs[0] = {
.header = {
.type = ACPI_CEDT_TYPE_CHBS,
.length = sizeof(mock_cedt.chbs[0]),
},
.uid = 0,
.cxl_version = ACPI_CEDT_CHBS_VERSION_CXL20,
},
.chbs[1] = {
.header = {
.type = ACPI_CEDT_TYPE_CHBS,
.length = sizeof(mock_cedt.chbs[0]),
},
.uid = 1,
.cxl_version = ACPI_CEDT_CHBS_VERSION_CXL20,
},
.chbs[2] = {
.header = {
.type = ACPI_CEDT_TYPE_CHBS,
.length = sizeof(mock_cedt.chbs[0]),
},
.uid = 2,
.cxl_version = ACPI_CEDT_CHBS_VERSION_CXL20,
},
.chbs[3] = {
.header = {
.type = ACPI_CEDT_TYPE_CHBS,
.length = sizeof(mock_cedt.chbs[0]),
},
.uid = 3,
.cxl_version = ACPI_CEDT_CHBS_VERSION_CXL20,
},
.cfmws0 = {
.cfmws = {
.header = {
.type = ACPI_CEDT_TYPE_CFMWS,
.length = sizeof(mock_cedt.cfmws0),
},
.interleave_ways = 0,
.granularity = 4,
.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
ACPI_CEDT_CFMWS_RESTRICT_VOLATILE,
.qtg_id = 0,
.window_size = SZ_256M,
},
.target = { 0 },
},
.cfmws1 = {
.cfmws = {
.header = {
.type = ACPI_CEDT_TYPE_CFMWS,
.length = sizeof(mock_cedt.cfmws1),
},
.interleave_ways = 2,
.granularity = 4,
.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
ACPI_CEDT_CFMWS_RESTRICT_VOLATILE,
.qtg_id = 1,
.window_size = SZ_256M * 4,
},
.target = { 0, 1, 2, 3 },
},
.cfmws2 = {
.cfmws = {
.header = {
.type = ACPI_CEDT_TYPE_CFMWS,
.length = sizeof(mock_cedt.cfmws2),
},
.interleave_ways = 0,
.granularity = 4,
.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
ACPI_CEDT_CFMWS_RESTRICT_PMEM,
.qtg_id = 2,
.window_size = SZ_256M,
},
.target = { 0 },
},
.cfmws3 = {
.cfmws = {
.header = {
.type = ACPI_CEDT_TYPE_CFMWS,
.length = sizeof(mock_cedt.cfmws3),
},
.interleave_ways = 2,
.granularity = 4,
.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
ACPI_CEDT_CFMWS_RESTRICT_PMEM,
.qtg_id = 3,
.window_size = SZ_256M * 4,
},
.target = { 0, 1, 2, 3 },
},
};
struct cxl_mock_res {
struct list_head list;
struct range range;
};
static LIST_HEAD(mock_res);
static DEFINE_MUTEX(mock_res_lock);
static struct gen_pool *cxl_mock_pool;
static void depopulate_all_mock_resources(void)
{
struct cxl_mock_res *res, *_res;
mutex_lock(&mock_res_lock);
list_for_each_entry_safe(res, _res, &mock_res, list) {
gen_pool_free(cxl_mock_pool, res->range.start,
range_len(&res->range));
list_del(&res->list);
kfree(res);
}
mutex_unlock(&mock_res_lock);
}
static struct cxl_mock_res *alloc_mock_res(resource_size_t size)
{
struct cxl_mock_res *res = kzalloc(sizeof(*res), GFP_KERNEL);
struct genpool_data_align data = {
.align = SZ_256M,
};
unsigned long phys;
INIT_LIST_HEAD(&res->list);
phys = gen_pool_alloc_algo(cxl_mock_pool, size,
gen_pool_first_fit_align, &data);
if (!phys)
return NULL;
res->range = (struct range) {
.start = phys,
.end = phys + size - 1,
};
mutex_lock(&mock_res_lock);
list_add(&res->list, &mock_res);
mutex_unlock(&mock_res_lock);
return res;
}
static int populate_cedt(void)
{
struct acpi_cedt_cfmws *cfmws[4] = {
[0] = &mock_cedt.cfmws0.cfmws,
[1] = &mock_cedt.cfmws1.cfmws,
[2] = &mock_cedt.cfmws2.cfmws,
[3] = &mock_cedt.cfmws3.cfmws,
};
struct cxl_mock_res *res;
int i;
for (i = 0; i < ARRAY_SIZE(mock_cedt.chbs); i++) {
struct acpi_cedt_chbs *chbs = &mock_cedt.chbs[i];
resource_size_t size;
if (chbs->cxl_version == ACPI_CEDT_CHBS_VERSION_CXL20)
size = ACPI_CEDT_CHBS_LENGTH_CXL20;
else
size = ACPI_CEDT_CHBS_LENGTH_CXL11;
res = alloc_mock_res(size);
if (!res)
return -ENOMEM;
chbs->base = res->range.start;
chbs->length = size;
}
for (i = 0; i < ARRAY_SIZE(cfmws); i++) {
struct acpi_cedt_cfmws *window = cfmws[i];
res = alloc_mock_res(window->window_size);
if (!res)
return -ENOMEM;
window->base_hpa = res->range.start;
}
return 0;
}
static acpi_status mock_acpi_get_table(char *signature, u32 instance,
struct acpi_table_header **out_table)
{
if (instance < U32_MAX || strcmp(signature, ACPI_SIG_CEDT) != 0)
return acpi_get_table(signature, instance, out_table);
*out_table = (struct acpi_table_header *) &mock_cedt;
return AE_OK;
}
static void mock_acpi_put_table(struct acpi_table_header *table)
{
if (table == (struct acpi_table_header *) &mock_cedt)
return;
acpi_put_table(table);
}
static bool is_mock_bridge(struct device *dev)
{
int i;
for (i = 0; i < ARRAY_SIZE(cxl_host_bridge); i++)
if (dev == &cxl_host_bridge[i]->dev)
return true;
return false;
}
static int host_bridge_index(struct acpi_device *adev)
{
return adev - host_bridge;
}
static struct acpi_device *find_host_bridge(acpi_handle handle)
{
int i;
for (i = 0; i < ARRAY_SIZE(host_bridge); i++)
if (handle == host_bridge[i].handle)
return &host_bridge[i];
return NULL;
}
static acpi_status
mock_acpi_evaluate_integer(acpi_handle handle, acpi_string pathname,
struct acpi_object_list *arguments,
unsigned long long *data)
{
struct acpi_device *adev = find_host_bridge(handle);
if (!adev || strcmp(pathname, METHOD_NAME__UID) != 0)
return acpi_evaluate_integer(handle, pathname, arguments, data);
*data = host_bridge_index(adev);
return AE_OK;
}
static struct pci_bus mock_pci_bus[NR_CXL_HOST_BRIDGES];
static struct acpi_pci_root mock_pci_root[NR_CXL_HOST_BRIDGES] = {
[0] = {
.bus = &mock_pci_bus[0],
},
[1] = {
.bus = &mock_pci_bus[1],
},
[2] = {
.bus = &mock_pci_bus[2],
},
[3] = {
.bus = &mock_pci_bus[3],
},
};
static struct platform_device *mock_cxl_root_port(struct pci_bus *bus, int index)
{
int i;
for (i = 0; i < ARRAY_SIZE(mock_pci_bus); i++)
if (bus == &mock_pci_bus[i])
return cxl_root_port[index + i * NR_CXL_ROOT_PORTS];
return NULL;
}
static bool is_mock_port(struct platform_device *pdev)
{
int i;
for (i = 0; i < ARRAY_SIZE(cxl_root_port); i++)
if (pdev == cxl_root_port[i])
return true;
return false;
}
static bool is_mock_bus(struct pci_bus *bus)
{
int i;
for (i = 0; i < ARRAY_SIZE(mock_pci_bus); i++)
if (bus == &mock_pci_bus[i])
return true;
return false;
}
static struct acpi_pci_root *mock_acpi_pci_find_root(acpi_handle handle)
{
struct acpi_device *adev = find_host_bridge(handle);
if (!adev)
return acpi_pci_find_root(handle);
return &mock_pci_root[host_bridge_index(adev)];
}
static struct cxl_mock_ops cxl_mock_ops = {
.is_mock_adev = is_mock_adev,
.is_mock_bridge = is_mock_bridge,
.is_mock_bus = is_mock_bus,
.is_mock_port = is_mock_port,
.is_mock_dev = is_mock_dev,
.mock_port = mock_cxl_root_port,
.acpi_get_table = mock_acpi_get_table,
.acpi_put_table = mock_acpi_put_table,
.acpi_evaluate_integer = mock_acpi_evaluate_integer,
.acpi_pci_find_root = mock_acpi_pci_find_root,
.list = LIST_HEAD_INIT(cxl_mock_ops.list),
};
static void mock_companion(struct acpi_device *adev, struct device *dev)
{
device_initialize(&adev->dev);
fwnode_init(&adev->fwnode, NULL);
dev->fwnode = &adev->fwnode;
adev->fwnode.dev = dev;
}
#ifndef SZ_64G
#define SZ_64G (SZ_32G * 2)
#endif
#ifndef SZ_512G
#define SZ_512G (SZ_64G * 8)
#endif
static struct platform_device *alloc_memdev(int id)
{
struct resource res[] = {
[0] = {
.flags = IORESOURCE_MEM,
},
[1] = {
.flags = IORESOURCE_MEM,
.desc = IORES_DESC_PERSISTENT_MEMORY,
},
};
struct platform_device *pdev;
int i, rc;
for (i = 0; i < ARRAY_SIZE(res); i++) {
struct cxl_mock_res *r = alloc_mock_res(SZ_256M);
if (!r)
return NULL;
res[i].start = r->range.start;
res[i].end = r->range.end;
}
pdev = platform_device_alloc("cxl_mem", id);
if (!pdev)
return NULL;
rc = platform_device_add_resources(pdev, res, ARRAY_SIZE(res));
if (rc)
goto err;
return pdev;
err:
platform_device_put(pdev);
return NULL;
}
static __init int cxl_test_init(void)
{
int rc, i;
register_cxl_mock_ops(&cxl_mock_ops);
cxl_mock_pool = gen_pool_create(ilog2(SZ_2M), NUMA_NO_NODE);
if (!cxl_mock_pool) {
rc = -ENOMEM;
goto err_gen_pool_create;
}
rc = gen_pool_add(cxl_mock_pool, SZ_512G, SZ_64G, NUMA_NO_NODE);
if (rc)
goto err_gen_pool_add;
rc = populate_cedt();
if (rc)
goto err_populate;
for (i = 0; i < ARRAY_SIZE(cxl_host_bridge); i++) {
struct acpi_device *adev = &host_bridge[i];
struct platform_device *pdev;
pdev = platform_device_alloc("cxl_host_bridge", i);
if (!pdev)
goto err_bridge;
mock_companion(adev, &pdev->dev);
rc = platform_device_add(pdev);
if (rc) {
platform_device_put(pdev);
goto err_bridge;
}
cxl_host_bridge[i] = pdev;
}
for (i = 0; i < ARRAY_SIZE(cxl_root_port); i++) {
struct platform_device *bridge =
cxl_host_bridge[i / NR_CXL_ROOT_PORTS];
struct platform_device *pdev;
pdev = platform_device_alloc("cxl_root_port", i);
if (!pdev)
goto err_port;
pdev->dev.parent = &bridge->dev;
rc = platform_device_add(pdev);
if (rc) {
platform_device_put(pdev);
goto err_port;
}
cxl_root_port[i] = pdev;
}
BUILD_BUG_ON(ARRAY_SIZE(cxl_mem) != ARRAY_SIZE(cxl_root_port));
for (i = 0; i < ARRAY_SIZE(cxl_mem); i++) {
struct platform_device *port = cxl_root_port[i];
struct platform_device *pdev;
pdev = alloc_memdev(i);
if (!pdev)
goto err_mem;
pdev->dev.parent = &port->dev;
rc = platform_device_add(pdev);
if (rc) {
platform_device_put(pdev);
goto err_mem;
}
cxl_mem[i] = pdev;
}
cxl_acpi = platform_device_alloc("cxl_acpi", 0);
if (!cxl_acpi)
goto err_mem;
mock_companion(&acpi0017_mock, &cxl_acpi->dev);
acpi0017_mock.dev.bus = &platform_bus_type;
rc = platform_device_add(cxl_acpi);
if (rc)
goto err_add;
return 0;
err_add:
platform_device_put(cxl_acpi);
err_mem:
for (i = ARRAY_SIZE(cxl_mem) - 1; i >= 0; i--)
platform_device_unregister(cxl_mem[i]);
err_port:
for (i = ARRAY_SIZE(cxl_root_port) - 1; i >= 0; i--)
platform_device_unregister(cxl_root_port[i]);
err_bridge:
for (i = ARRAY_SIZE(cxl_host_bridge) - 1; i >= 0; i--)
platform_device_unregister(cxl_host_bridge[i]);
err_populate:
depopulate_all_mock_resources();
err_gen_pool_add:
gen_pool_destroy(cxl_mock_pool);
err_gen_pool_create:
unregister_cxl_mock_ops(&cxl_mock_ops);
return rc;
}
static __exit void cxl_test_exit(void)
{
int i;
platform_device_unregister(cxl_acpi);
for (i = ARRAY_SIZE(cxl_mem) - 1; i >= 0; i--)
platform_device_unregister(cxl_mem[i]);
for (i = ARRAY_SIZE(cxl_root_port) - 1; i >= 0; i--)
platform_device_unregister(cxl_root_port[i]);
for (i = ARRAY_SIZE(cxl_host_bridge) - 1; i >= 0; i--)
platform_device_unregister(cxl_host_bridge[i]);
depopulate_all_mock_resources();
gen_pool_destroy(cxl_mock_pool);
unregister_cxl_mock_ops(&cxl_mock_ops);
}
module_init(cxl_test_init);
module_exit(cxl_test_exit);
MODULE_LICENSE("GPL v2");

View File

@@ -0,0 +1,256 @@
// SPDX-License-Identifier: GPL-2.0-only
// Copyright(c) 2021 Intel Corporation. All rights reserved.
#include <linux/platform_device.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/sizes.h>
#include <linux/bits.h>
#include <cxlmem.h>
#define LSA_SIZE SZ_128K
#define EFFECT(x) (1U << x)
static struct cxl_cel_entry mock_cel[] = {
{
.opcode = cpu_to_le16(CXL_MBOX_OP_GET_SUPPORTED_LOGS),
.effect = cpu_to_le16(0),
},
{
.opcode = cpu_to_le16(CXL_MBOX_OP_IDENTIFY),
.effect = cpu_to_le16(0),
},
{
.opcode = cpu_to_le16(CXL_MBOX_OP_GET_LSA),
.effect = cpu_to_le16(0),
},
{
.opcode = cpu_to_le16(CXL_MBOX_OP_SET_LSA),
.effect = cpu_to_le16(EFFECT(1) | EFFECT(2)),
},
};
static struct {
struct cxl_mbox_get_supported_logs gsl;
struct cxl_gsl_entry entry;
} mock_gsl_payload = {
.gsl = {
.entries = cpu_to_le16(1),
},
.entry = {
.uuid = DEFINE_CXL_CEL_UUID,
.size = cpu_to_le32(sizeof(mock_cel)),
},
};
static int mock_gsl(struct cxl_mbox_cmd *cmd)
{
if (cmd->size_out < sizeof(mock_gsl_payload))
return -EINVAL;
memcpy(cmd->payload_out, &mock_gsl_payload, sizeof(mock_gsl_payload));
cmd->size_out = sizeof(mock_gsl_payload);
return 0;
}
static int mock_get_log(struct cxl_mem *cxlm, struct cxl_mbox_cmd *cmd)
{
struct cxl_mbox_get_log *gl = cmd->payload_in;
u32 offset = le32_to_cpu(gl->offset);
u32 length = le32_to_cpu(gl->length);
uuid_t uuid = DEFINE_CXL_CEL_UUID;
void *data = &mock_cel;
if (cmd->size_in < sizeof(*gl))
return -EINVAL;
if (length > cxlm->payload_size)
return -EINVAL;
if (offset + length > sizeof(mock_cel))
return -EINVAL;
if (!uuid_equal(&gl->uuid, &uuid))
return -EINVAL;
if (length > cmd->size_out)
return -EINVAL;
memcpy(cmd->payload_out, data + offset, length);
return 0;
}
static int mock_id(struct cxl_mem *cxlm, struct cxl_mbox_cmd *cmd)
{
struct platform_device *pdev = to_platform_device(cxlm->dev);
struct cxl_mbox_identify id = {
.fw_revision = { "mock fw v1 " },
.lsa_size = cpu_to_le32(LSA_SIZE),
/* FIXME: Add partition support */
.partition_align = cpu_to_le64(0),
};
u64 capacity = 0;
int i;
if (cmd->size_out < sizeof(id))
return -EINVAL;
for (i = 0; i < 2; i++) {
struct resource *res;
res = platform_get_resource(pdev, IORESOURCE_MEM, i);
if (!res)
break;
capacity += resource_size(res) / CXL_CAPACITY_MULTIPLIER;
if (le64_to_cpu(id.partition_align))
continue;
if (res->desc == IORES_DESC_PERSISTENT_MEMORY)
id.persistent_capacity = cpu_to_le64(
resource_size(res) / CXL_CAPACITY_MULTIPLIER);
else
id.volatile_capacity = cpu_to_le64(
resource_size(res) / CXL_CAPACITY_MULTIPLIER);
}
id.total_capacity = cpu_to_le64(capacity);
memcpy(cmd->payload_out, &id, sizeof(id));
return 0;
}
static int mock_get_lsa(struct cxl_mem *cxlm, struct cxl_mbox_cmd *cmd)
{
struct cxl_mbox_get_lsa *get_lsa = cmd->payload_in;
void *lsa = dev_get_drvdata(cxlm->dev);
u32 offset, length;
if (sizeof(*get_lsa) > cmd->size_in)
return -EINVAL;
offset = le32_to_cpu(get_lsa->offset);
length = le32_to_cpu(get_lsa->length);
if (offset + length > LSA_SIZE)
return -EINVAL;
if (length > cmd->size_out)
return -EINVAL;
memcpy(cmd->payload_out, lsa + offset, length);
return 0;
}
static int mock_set_lsa(struct cxl_mem *cxlm, struct cxl_mbox_cmd *cmd)
{
struct cxl_mbox_set_lsa *set_lsa = cmd->payload_in;
void *lsa = dev_get_drvdata(cxlm->dev);
u32 offset, length;
if (sizeof(*set_lsa) > cmd->size_in)
return -EINVAL;
offset = le32_to_cpu(set_lsa->offset);
length = cmd->size_in - sizeof(*set_lsa);
if (offset + length > LSA_SIZE)
return -EINVAL;
memcpy(lsa + offset, &set_lsa->data[0], length);
return 0;
}
static int cxl_mock_mbox_send(struct cxl_mem *cxlm, struct cxl_mbox_cmd *cmd)
{
struct device *dev = cxlm->dev;
int rc = -EIO;
switch (cmd->opcode) {
case CXL_MBOX_OP_GET_SUPPORTED_LOGS:
rc = mock_gsl(cmd);
break;
case CXL_MBOX_OP_GET_LOG:
rc = mock_get_log(cxlm, cmd);
break;
case CXL_MBOX_OP_IDENTIFY:
rc = mock_id(cxlm, cmd);
break;
case CXL_MBOX_OP_GET_LSA:
rc = mock_get_lsa(cxlm, cmd);
break;
case CXL_MBOX_OP_SET_LSA:
rc = mock_set_lsa(cxlm, cmd);
break;
default:
break;
}
dev_dbg(dev, "opcode: %#x sz_in: %zd sz_out: %zd rc: %d\n", cmd->opcode,
cmd->size_in, cmd->size_out, rc);
return rc;
}
static void label_area_release(void *lsa)
{
vfree(lsa);
}
static int cxl_mock_mem_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct cxl_memdev *cxlmd;
struct cxl_mem *cxlm;
void *lsa;
int rc;
lsa = vmalloc(LSA_SIZE);
if (!lsa)
return -ENOMEM;
rc = devm_add_action_or_reset(dev, label_area_release, lsa);
if (rc)
return rc;
dev_set_drvdata(dev, lsa);
cxlm = cxl_mem_create(dev);
if (IS_ERR(cxlm))
return PTR_ERR(cxlm);
cxlm->mbox_send = cxl_mock_mbox_send;
cxlm->payload_size = SZ_4K;
rc = cxl_mem_enumerate_cmds(cxlm);
if (rc)
return rc;
rc = cxl_mem_identify(cxlm);
if (rc)
return rc;
rc = cxl_mem_create_range_info(cxlm);
if (rc)
return rc;
cxlmd = devm_cxl_add_memdev(cxlm);
if (IS_ERR(cxlmd))
return PTR_ERR(cxlmd);
if (range_len(&cxlm->pmem_range) && IS_ENABLED(CONFIG_CXL_PMEM))
rc = devm_cxl_add_nvdimm(dev, cxlmd);
return 0;
}
static const struct platform_device_id cxl_mock_mem_ids[] = {
{ .name = "cxl_mem", },
{ },
};
MODULE_DEVICE_TABLE(platform, cxl_mock_mem_ids);
static struct platform_driver cxl_mock_mem_driver = {
.probe = cxl_mock_mem_probe,
.id_table = cxl_mock_mem_ids,
.driver = {
.name = KBUILD_MODNAME,
},
};
module_platform_driver(cxl_mock_mem_driver);
MODULE_LICENSE("GPL v2");
MODULE_IMPORT_NS(CXL);

View File

@@ -0,0 +1,171 @@
// SPDX-License-Identifier: GPL-2.0-only
//Copyright(c) 2021 Intel Corporation. All rights reserved.
#include <linux/libnvdimm.h>
#include <linux/rculist.h>
#include <linux/device.h>
#include <linux/export.h>
#include <linux/acpi.h>
#include <linux/pci.h>
#include "mock.h"
static LIST_HEAD(mock);
void register_cxl_mock_ops(struct cxl_mock_ops *ops)
{
list_add_rcu(&ops->list, &mock);
}
EXPORT_SYMBOL_GPL(register_cxl_mock_ops);
static DEFINE_SRCU(cxl_mock_srcu);
void unregister_cxl_mock_ops(struct cxl_mock_ops *ops)
{
list_del_rcu(&ops->list);
synchronize_srcu(&cxl_mock_srcu);
}
EXPORT_SYMBOL_GPL(unregister_cxl_mock_ops);
struct cxl_mock_ops *get_cxl_mock_ops(int *index)
{
*index = srcu_read_lock(&cxl_mock_srcu);
return list_first_or_null_rcu(&mock, struct cxl_mock_ops, list);
}
EXPORT_SYMBOL_GPL(get_cxl_mock_ops);
void put_cxl_mock_ops(int index)
{
srcu_read_unlock(&cxl_mock_srcu, index);
}
EXPORT_SYMBOL_GPL(put_cxl_mock_ops);
bool __wrap_is_acpi_device_node(const struct fwnode_handle *fwnode)
{
struct acpi_device *adev =
container_of(fwnode, struct acpi_device, fwnode);
int index;
struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
bool retval = false;
if (ops)
retval = ops->is_mock_adev(adev);
if (!retval)
retval = is_acpi_device_node(fwnode);
put_cxl_mock_ops(index);
return retval;
}
EXPORT_SYMBOL(__wrap_is_acpi_device_node);
acpi_status __wrap_acpi_get_table(char *signature, u32 instance,
struct acpi_table_header **out_table)
{
int index;
struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
acpi_status status;
if (ops)
status = ops->acpi_get_table(signature, instance, out_table);
else
status = acpi_get_table(signature, instance, out_table);
put_cxl_mock_ops(index);
return status;
}
EXPORT_SYMBOL(__wrap_acpi_get_table);
void __wrap_acpi_put_table(struct acpi_table_header *table)
{
int index;
struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
if (ops)
ops->acpi_put_table(table);
else
acpi_put_table(table);
put_cxl_mock_ops(index);
}
EXPORT_SYMBOL(__wrap_acpi_put_table);
acpi_status __wrap_acpi_evaluate_integer(acpi_handle handle,
acpi_string pathname,
struct acpi_object_list *arguments,
unsigned long long *data)
{
int index;
struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
acpi_status status;
if (ops)
status = ops->acpi_evaluate_integer(handle, pathname, arguments,
data);
else
status = acpi_evaluate_integer(handle, pathname, arguments,
data);
put_cxl_mock_ops(index);
return status;
}
EXPORT_SYMBOL(__wrap_acpi_evaluate_integer);
struct acpi_pci_root *__wrap_acpi_pci_find_root(acpi_handle handle)
{
int index;
struct acpi_pci_root *root;
struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
if (ops)
root = ops->acpi_pci_find_root(handle);
else
root = acpi_pci_find_root(handle);
put_cxl_mock_ops(index);
return root;
}
EXPORT_SYMBOL_GPL(__wrap_acpi_pci_find_root);
void __wrap_pci_walk_bus(struct pci_bus *bus,
int (*cb)(struct pci_dev *, void *), void *userdata)
{
int index;
struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
if (ops && ops->is_mock_bus(bus)) {
int rc, i;
/*
* Simulate 2 root ports per host-bridge and no
* depth recursion.
*/
for (i = 0; i < 2; i++) {
rc = cb((struct pci_dev *) ops->mock_port(bus, i),
userdata);
if (rc)
break;
}
} else
pci_walk_bus(bus, cb, userdata);
put_cxl_mock_ops(index);
}
EXPORT_SYMBOL_GPL(__wrap_pci_walk_bus);
struct nvdimm_bus *
__wrap_nvdimm_bus_register(struct device *dev,
struct nvdimm_bus_descriptor *nd_desc)
{
int index;
struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
if (ops && ops->is_mock_dev(dev->parent->parent))
nd_desc->provider_name = "cxl_test";
put_cxl_mock_ops(index);
return nvdimm_bus_register(dev, nd_desc);
}
EXPORT_SYMBOL_GPL(__wrap_nvdimm_bus_register);
MODULE_LICENSE("GPL v2");

View File

@@ -0,0 +1,27 @@
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/list.h>
#include <linux/acpi.h>
struct cxl_mock_ops {
struct list_head list;
bool (*is_mock_adev)(struct acpi_device *dev);
acpi_status (*acpi_get_table)(char *signature, u32 instance,
struct acpi_table_header **out_table);
void (*acpi_put_table)(struct acpi_table_header *table);
bool (*is_mock_bridge)(struct device *dev);
acpi_status (*acpi_evaluate_integer)(acpi_handle handle,
acpi_string pathname,
struct acpi_object_list *arguments,
unsigned long long *data);
struct acpi_pci_root *(*acpi_pci_find_root)(acpi_handle handle);
struct platform_device *(*mock_port)(struct pci_bus *bus, int index);
bool (*is_mock_bus)(struct pci_bus *bus);
bool (*is_mock_port)(struct platform_device *pdev);
bool (*is_mock_dev)(struct device *dev);
};
void register_cxl_mock_ops(struct cxl_mock_ops *ops);
void unregister_cxl_mock_ops(struct cxl_mock_ops *ops);
struct cxl_mock_ops *get_cxl_mock_ops(int *index);
void put_cxl_mock_ops(int index);