linux/drivers/cxl/mem.c
Dan Williams b39cb1052a cxl/mem: Register CXL memX devices
Create the /sys/bus/cxl hierarchy to enumerate:

* Memory Devices (per-endpoint control devices)

* Memory Address Space Devices (platform address ranges with
  interleaving, performance, and persistence attributes)

* Memory Regions (active provisioned memory from an address space device
  that is in use as System RAM or delegated to libnvdimm as Persistent
  Memory regions).

For now, only the per-endpoint control devices are registered on the
'cxl' bus. However, going forward it will provide a mechanism to
coordinate cross-device interleave.

Signed-off-by: Ben Widawsky <ben.widawsky@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> (v2)
Link: https://lore.kernel.org/r/20210217040958.1354670-4-ben.widawsky@intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2021-02-16 20:36:38 -08:00

917 lines
25 KiB
C

// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2020 Intel Corporation. All rights reserved. */
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/cdev.h>
#include <linux/idr.h>
#include <linux/pci.h>
#include <linux/io.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include "pci.h"
#include "cxl.h"
/**
* DOC: cxl mem
*
* This implements a CXL memory device ("type-3") as it is defined by the
* Compute Express Link specification.
*
* The driver has several responsibilities, mainly:
* - Create the memX device and register on the CXL bus.
* - Enumerate device's register interface and map them.
* - Probe the device attributes to establish sysfs interface.
* - Provide an IOCTL interface to userspace to communicate with the device for
* things like firmware update.
* - Support management of interleave sets.
* - Handle and manage error conditions.
*/
/*
* An entire PCI topology full of devices should be enough for any
* config
*/
#define CXL_MEM_MAX_DEVS 65536
#define cxl_doorbell_busy(cxlm) \
(readl((cxlm)->mbox_regs + CXLDEV_MBOX_CTRL_OFFSET) & \
CXLDEV_MBOX_CTRL_DOORBELL)
/* CXL 2.0 - 8.2.8.4 */
#define CXL_MAILBOX_TIMEOUT_MS (2 * HZ)
enum opcode {
CXL_MBOX_OP_IDENTIFY = 0x4000,
CXL_MBOX_OP_MAX = 0x10000
};
/**
* struct mbox_cmd - A command to be submitted to hardware.
* @opcode: (input) The command set and command submitted to hardware.
* @payload_in: (input) Pointer to the input payload.
* @payload_out: (output) Pointer to the output payload. Must be allocated by
* the caller.
* @size_in: (input) Number of bytes to load from @payload_in.
* @size_out: (input) Max number of bytes loaded into @payload_out.
* (output) Number of bytes generated by the device. For fixed size
* outputs commands this is always expected to be deterministic. For
* variable sized output commands, it tells the exact number of bytes
* written.
* @return_code: (output) Error code returned from hardware.
*
* This is the primary mechanism used to send commands to the hardware.
* All the fields except @payload_* correspond exactly to the fields described in
* Command Register section of the CXL 2.0 8.2.8.4.5. @payload_in and
* @payload_out are written to, and read from the Command Payload Registers
* defined in CXL 2.0 8.2.8.4.8.
*/
struct mbox_cmd {
u16 opcode;
void *payload_in;
void *payload_out;
size_t size_in;
size_t size_out;
u16 return_code;
#define CXL_MBOX_SUCCESS 0
};
/**
* struct cxl_memdev - CXL bus object representing a Type-3 Memory Device
* @dev: driver core device object
* @cdev: char dev core object for ioctl operations
* @cxlm: pointer to the parent device driver data
* @ops_active: active user of @cxlm in ops handlers
* @ops_dead: completion when all @cxlm ops users have exited
* @id: id number of this memdev instance.
*/
struct cxl_memdev {
struct device dev;
struct cdev cdev;
struct cxl_mem *cxlm;
struct percpu_ref ops_active;
struct completion ops_dead;
int id;
};
static int cxl_mem_major;
static DEFINE_IDA(cxl_memdev_ida);
static int cxl_mem_wait_for_doorbell(struct cxl_mem *cxlm)
{
const unsigned long start = jiffies;
unsigned long end = start;
while (cxl_doorbell_busy(cxlm)) {
end = jiffies;
if (time_after(end, start + CXL_MAILBOX_TIMEOUT_MS)) {
/* Check again in case preempted before timeout test */
if (!cxl_doorbell_busy(cxlm))
break;
return -ETIMEDOUT;
}
cpu_relax();
}
dev_dbg(&cxlm->pdev->dev, "Doorbell wait took %dms",
jiffies_to_msecs(end) - jiffies_to_msecs(start));
return 0;
}
static void cxl_mem_mbox_timeout(struct cxl_mem *cxlm,
struct mbox_cmd *mbox_cmd)
{
struct device *dev = &cxlm->pdev->dev;
dev_dbg(dev, "Mailbox command (opcode: %#x size: %zub) timed out\n",
mbox_cmd->opcode, mbox_cmd->size_in);
}
/**
* __cxl_mem_mbox_send_cmd() - Execute a mailbox command
* @cxlm: The CXL memory device to communicate with.
* @mbox_cmd: Command to send to the memory device.
*
* Context: Any context. Expects mbox_mutex to be held.
* Return: -ETIMEDOUT if timeout occurred waiting for completion. 0 on success.
* Caller should check the return code in @mbox_cmd to make sure it
* succeeded.
*
* This is a generic form of the CXL mailbox send command thus only using the
* registers defined by the mailbox capability ID - CXL 2.0 8.2.8.4. Memory
* devices, and perhaps other types of CXL devices may have further information
* available upon error conditions. Driver facilities wishing to send mailbox
* commands should use the wrapper command.
*
* The CXL spec allows for up to two mailboxes. The intention is for the primary
* mailbox to be OS controlled and the secondary mailbox to be used by system
* firmware. This allows the OS and firmware to communicate with the device and
* not need to coordinate with each other. The driver only uses the primary
* mailbox.
*/
static int __cxl_mem_mbox_send_cmd(struct cxl_mem *cxlm,
struct mbox_cmd *mbox_cmd)
{
void __iomem *payload = cxlm->mbox_regs + CXLDEV_MBOX_PAYLOAD_OFFSET;
u64 cmd_reg, status_reg;
size_t out_len;
int rc;
lockdep_assert_held(&cxlm->mbox_mutex);
/*
* Here are the steps from 8.2.8.4 of the CXL 2.0 spec.
* 1. Caller reads MB Control Register to verify doorbell is clear
* 2. Caller writes Command Register
* 3. Caller writes Command Payload Registers if input payload is non-empty
* 4. Caller writes MB Control Register to set doorbell
* 5. Caller either polls for doorbell to be clear or waits for interrupt if configured
* 6. Caller reads MB Status Register to fetch Return code
* 7. If command successful, Caller reads Command Register to get Payload Length
* 8. If output payload is non-empty, host reads Command Payload Registers
*
* Hardware is free to do whatever it wants before the doorbell is rung,
* and isn't allowed to change anything after it clears the doorbell. As
* such, steps 2 and 3 can happen in any order, and steps 6, 7, 8 can
* also happen in any order (though some orders might not make sense).
*/
/* #1 */
if (cxl_doorbell_busy(cxlm)) {
dev_err_ratelimited(&cxlm->pdev->dev,
"Mailbox re-busy after acquiring\n");
return -EBUSY;
}
cmd_reg = FIELD_PREP(CXLDEV_MBOX_CMD_COMMAND_OPCODE_MASK,
mbox_cmd->opcode);
if (mbox_cmd->size_in) {
if (WARN_ON(!mbox_cmd->payload_in))
return -EINVAL;
cmd_reg |= FIELD_PREP(CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK,
mbox_cmd->size_in);
memcpy_toio(payload, mbox_cmd->payload_in, mbox_cmd->size_in);
}
/* #2, #3 */
writeq(cmd_reg, cxlm->mbox_regs + CXLDEV_MBOX_CMD_OFFSET);
/* #4 */
dev_dbg(&cxlm->pdev->dev, "Sending command\n");
writel(CXLDEV_MBOX_CTRL_DOORBELL,
cxlm->mbox_regs + CXLDEV_MBOX_CTRL_OFFSET);
/* #5 */
rc = cxl_mem_wait_for_doorbell(cxlm);
if (rc == -ETIMEDOUT) {
cxl_mem_mbox_timeout(cxlm, mbox_cmd);
return rc;
}
/* #6 */
status_reg = readq(cxlm->mbox_regs + CXLDEV_MBOX_STATUS_OFFSET);
mbox_cmd->return_code =
FIELD_GET(CXLDEV_MBOX_STATUS_RET_CODE_MASK, status_reg);
if (mbox_cmd->return_code != 0) {
dev_dbg(&cxlm->pdev->dev, "Mailbox operation had an error\n");
return 0;
}
/* #7 */
cmd_reg = readq(cxlm->mbox_regs + CXLDEV_MBOX_CMD_OFFSET);
out_len = FIELD_GET(CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK, cmd_reg);
/* #8 */
if (out_len && mbox_cmd->payload_out) {
/*
* Sanitize the copy. If hardware misbehaves, out_len per the
* spec can actually be greater than the max allowed size (21
* bits available but spec defined 1M max). The caller also may
* have requested less data than the hardware supplied even
* within spec.
*/
size_t n = min3(mbox_cmd->size_out, cxlm->payload_size, out_len);
memcpy_fromio(mbox_cmd->payload_out, payload, n);
mbox_cmd->size_out = n;
} else {
mbox_cmd->size_out = 0;
}
return 0;
}
/**
* cxl_mem_mbox_get() - Acquire exclusive access to the mailbox.
* @cxlm: The memory device to gain access to.
*
* Context: Any context. Takes the mbox_mutex.
* Return: 0 if exclusive access was acquired.
*/
static int cxl_mem_mbox_get(struct cxl_mem *cxlm)
{
struct device *dev = &cxlm->pdev->dev;
u64 md_status;
int rc;
mutex_lock_io(&cxlm->mbox_mutex);
/*
* XXX: There is some amount of ambiguity in the 2.0 version of the spec
* around the mailbox interface ready (8.2.8.5.1.1). The purpose of the
* bit is to allow firmware running on the device to notify the driver
* that it's ready to receive commands. It is unclear if the bit needs
* to be read for each transaction mailbox, ie. the firmware can switch
* it on and off as needed. Second, there is no defined timeout for
* mailbox ready, like there is for the doorbell interface.
*
* Assumptions:
* 1. The firmware might toggle the Mailbox Interface Ready bit, check
* it for every command.
*
* 2. If the doorbell is clear, the firmware should have first set the
* Mailbox Interface Ready bit. Therefore, waiting for the doorbell
* to be ready is sufficient.
*/
rc = cxl_mem_wait_for_doorbell(cxlm);
if (rc) {
dev_warn(dev, "Mailbox interface not ready\n");
goto out;
}
md_status = readq(cxlm->memdev_regs + CXLMDEV_STATUS_OFFSET);
if (!(md_status & CXLMDEV_MBOX_IF_READY && CXLMDEV_READY(md_status))) {
dev_err(dev, "mbox: reported doorbell ready, but not mbox ready\n");
rc = -EBUSY;
goto out;
}
/*
* Hardware shouldn't allow a ready status but also have failure bits
* set. Spit out an error, this should be a bug report
*/
rc = -EFAULT;
if (md_status & CXLMDEV_DEV_FATAL) {
dev_err(dev, "mbox: reported ready, but fatal\n");
goto out;
}
if (md_status & CXLMDEV_FW_HALT) {
dev_err(dev, "mbox: reported ready, but halted\n");
goto out;
}
if (CXLMDEV_RESET_NEEDED(md_status)) {
dev_err(dev, "mbox: reported ready, but reset needed\n");
goto out;
}
/* with lock held */
return 0;
out:
mutex_unlock(&cxlm->mbox_mutex);
return rc;
}
/**
* cxl_mem_mbox_put() - Release exclusive access to the mailbox.
* @cxlm: The CXL memory device to communicate with.
*
* Context: Any context. Expects mbox_mutex to be held.
*/
static void cxl_mem_mbox_put(struct cxl_mem *cxlm)
{
mutex_unlock(&cxlm->mbox_mutex);
}
static long cxl_memdev_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct cxl_memdev *cxlmd;
struct inode *inode;
int rc = -ENOTTY;
inode = file_inode(file);
cxlmd = container_of(inode->i_cdev, typeof(*cxlmd), cdev);
if (!percpu_ref_tryget_live(&cxlmd->ops_active))
return -ENXIO;
/* TODO: ioctl body */
percpu_ref_put(&cxlmd->ops_active);
return rc;
}
static const struct file_operations cxl_memdev_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = cxl_memdev_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.llseek = noop_llseek,
};
/**
* cxl_mem_mbox_send_cmd() - Send a mailbox command to a memory device.
* @cxlm: The CXL memory device to communicate with.
* @opcode: Opcode for the mailbox command.
* @in: The input payload for the mailbox command.
* @in_size: The length of the input payload
* @out: Caller allocated buffer for the output.
* @out_size: Expected size of output.
*
* Context: Any context. Will acquire and release mbox_mutex.
* Return:
* * %>=0 - Number of bytes returned in @out.
* * %-E2BIG - Payload is too large for hardware.
* * %-EBUSY - Couldn't acquire exclusive mailbox access.
* * %-EFAULT - Hardware error occurred.
* * %-ENXIO - Command completed, but device reported an error.
* * %-EIO - Unexpected output size.
*
* Mailbox commands may execute successfully yet the device itself reported an
* error. While this distinction can be useful for commands from userspace, the
* kernel will only be able to use results when both are successful. It's
* expected that all callers of this function know exactly the size of the data
* they will consume from the hardware.
*
* See __cxl_mem_mbox_send_cmd()
*/
static int cxl_mem_mbox_send_cmd(struct cxl_mem *cxlm, u16 opcode,
void *in, size_t in_size,
void *out, size_t out_size)
{
struct mbox_cmd mbox_cmd = {
.opcode = opcode,
.payload_in = in,
.size_in = in_size,
.size_out = out_size,
.payload_out = out,
};
int rc;
if (out_size > cxlm->payload_size)
return -E2BIG;
rc = cxl_mem_mbox_get(cxlm);
if (rc)
return rc;
rc = __cxl_mem_mbox_send_cmd(cxlm, &mbox_cmd);
cxl_mem_mbox_put(cxlm);
if (rc)
return rc;
/* TODO: Map return code to proper kernel style errno */
if (mbox_cmd.return_code != CXL_MBOX_SUCCESS)
return -ENXIO;
if (mbox_cmd.size_out != out_size)
return -EIO;
return 0;
}
/**
* cxl_mem_setup_regs() - Setup necessary MMIO.
* @cxlm: The CXL memory device to communicate with.
*
* Return: 0 if all necessary registers mapped.
*
* A memory device is required by spec to implement a certain set of MMIO
* regions. The purpose of this function is to enumerate and map those
* registers.
*/
static int cxl_mem_setup_regs(struct cxl_mem *cxlm)
{
struct device *dev = &cxlm->pdev->dev;
int cap, cap_count;
u64 cap_array;
cap_array = readq(cxlm->regs + CXLDEV_CAP_ARRAY_OFFSET);
if (FIELD_GET(CXLDEV_CAP_ARRAY_ID_MASK, cap_array) !=
CXLDEV_CAP_ARRAY_CAP_ID)
return -ENODEV;
cap_count = FIELD_GET(CXLDEV_CAP_ARRAY_COUNT_MASK, cap_array);
for (cap = 1; cap <= cap_count; cap++) {
void __iomem *register_block;
u32 offset;
u16 cap_id;
cap_id = FIELD_GET(CXLDEV_CAP_HDR_CAP_ID_MASK,
readl(cxlm->regs + cap * 0x10));
offset = readl(cxlm->regs + cap * 0x10 + 0x4);
register_block = cxlm->regs + offset;
switch (cap_id) {
case CXLDEV_CAP_CAP_ID_DEVICE_STATUS:
dev_dbg(dev, "found Status capability (0x%x)\n", offset);
cxlm->status_regs = register_block;
break;
case CXLDEV_CAP_CAP_ID_PRIMARY_MAILBOX:
dev_dbg(dev, "found Mailbox capability (0x%x)\n", offset);
cxlm->mbox_regs = register_block;
break;
case CXLDEV_CAP_CAP_ID_SECONDARY_MAILBOX:
dev_dbg(dev, "found Secondary Mailbox capability (0x%x)\n", offset);
break;
case CXLDEV_CAP_CAP_ID_MEMDEV:
dev_dbg(dev, "found Memory Device capability (0x%x)\n", offset);
cxlm->memdev_regs = register_block;
break;
default:
dev_dbg(dev, "Unknown cap ID: %d (0x%x)\n", cap_id, offset);
break;
}
}
if (!cxlm->status_regs || !cxlm->mbox_regs || !cxlm->memdev_regs) {
dev_err(dev, "registers not found: %s%s%s\n",
!cxlm->status_regs ? "status " : "",
!cxlm->mbox_regs ? "mbox " : "",
!cxlm->memdev_regs ? "memdev" : "");
return -ENXIO;
}
return 0;
}
static int cxl_mem_setup_mailbox(struct cxl_mem *cxlm)
{
const int cap = readl(cxlm->mbox_regs + CXLDEV_MBOX_CAPS_OFFSET);
cxlm->payload_size =
1 << FIELD_GET(CXLDEV_MBOX_CAP_PAYLOAD_SIZE_MASK, cap);
/*
* CXL 2.0 8.2.8.4.3 Mailbox Capabilities Register
*
* If the size is too small, mandatory commands will not work and so
* there's no point in going forward. If the size is too large, there's
* no harm is soft limiting it.
*/
cxlm->payload_size = min_t(size_t, cxlm->payload_size, SZ_1M);
if (cxlm->payload_size < 256) {
dev_err(&cxlm->pdev->dev, "Mailbox is too small (%zub)",
cxlm->payload_size);
return -ENXIO;
}
dev_dbg(&cxlm->pdev->dev, "Mailbox payload sized %zu",
cxlm->payload_size);
return 0;
}
static struct cxl_mem *cxl_mem_create(struct pci_dev *pdev, u32 reg_lo,
u32 reg_hi)
{
struct device *dev = &pdev->dev;
struct cxl_mem *cxlm;
void __iomem *regs;
u64 offset;
u8 bar;
int rc;
cxlm = devm_kzalloc(&pdev->dev, sizeof(*cxlm), GFP_KERNEL);
if (!cxlm) {
dev_err(dev, "No memory available\n");
return NULL;
}
offset = ((u64)reg_hi << 32) | FIELD_GET(CXL_REGLOC_ADDR_MASK, reg_lo);
bar = FIELD_GET(CXL_REGLOC_BIR_MASK, reg_lo);
/* Basic sanity check that BAR is big enough */
if (pci_resource_len(pdev, bar) < offset) {
dev_err(dev, "BAR%d: %pr: too small (offset: %#llx)\n", bar,
&pdev->resource[bar], (unsigned long long)offset);
return NULL;
}
rc = pcim_iomap_regions(pdev, BIT(bar), pci_name(pdev));
if (rc) {
dev_err(dev, "failed to map registers\n");
return NULL;
}
regs = pcim_iomap_table(pdev)[bar];
mutex_init(&cxlm->mbox_mutex);
cxlm->pdev = pdev;
cxlm->regs = regs + offset;
dev_dbg(dev, "Mapped CXL Memory Device resource\n");
return cxlm;
}
static int cxl_mem_dvsec(struct pci_dev *pdev, int dvsec)
{
int pos;
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DVSEC);
if (!pos)
return 0;
while (pos) {
u16 vendor, id;
pci_read_config_word(pdev, pos + PCI_DVSEC_HEADER1, &vendor);
pci_read_config_word(pdev, pos + PCI_DVSEC_HEADER2, &id);
if (vendor == PCI_DVSEC_VENDOR_ID_CXL && dvsec == id)
return pos;
pos = pci_find_next_ext_capability(pdev, pos,
PCI_EXT_CAP_ID_DVSEC);
}
return 0;
}
static struct cxl_memdev *to_cxl_memdev(struct device *dev)
{
return container_of(dev, struct cxl_memdev, dev);
}
static void cxl_memdev_release(struct device *dev)
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
percpu_ref_exit(&cxlmd->ops_active);
ida_free(&cxl_memdev_ida, cxlmd->id);
kfree(cxlmd);
}
static char *cxl_memdev_devnode(struct device *dev, umode_t *mode, kuid_t *uid,
kgid_t *gid)
{
return kasprintf(GFP_KERNEL, "cxl/%s", dev_name(dev));
}
static ssize_t firmware_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
struct cxl_mem *cxlm = cxlmd->cxlm;
return sprintf(buf, "%.16s\n", cxlm->firmware_version);
}
static DEVICE_ATTR_RO(firmware_version);
static ssize_t payload_max_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
struct cxl_mem *cxlm = cxlmd->cxlm;
return sprintf(buf, "%zu\n", cxlm->payload_size);
}
static DEVICE_ATTR_RO(payload_max);
static ssize_t ram_size_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
struct cxl_mem *cxlm = cxlmd->cxlm;
unsigned long long len = range_len(&cxlm->ram_range);
return sprintf(buf, "%#llx\n", len);
}
static struct device_attribute dev_attr_ram_size =
__ATTR(size, 0444, ram_size_show, NULL);
static ssize_t pmem_size_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
struct cxl_mem *cxlm = cxlmd->cxlm;
unsigned long long len = range_len(&cxlm->pmem_range);
return sprintf(buf, "%#llx\n", len);
}
static struct device_attribute dev_attr_pmem_size =
__ATTR(size, 0444, pmem_size_show, NULL);
static struct attribute *cxl_memdev_attributes[] = {
&dev_attr_firmware_version.attr,
&dev_attr_payload_max.attr,
NULL,
};
static struct attribute *cxl_memdev_pmem_attributes[] = {
&dev_attr_pmem_size.attr,
NULL,
};
static struct attribute *cxl_memdev_ram_attributes[] = {
&dev_attr_ram_size.attr,
NULL,
};
static struct attribute_group cxl_memdev_attribute_group = {
.attrs = cxl_memdev_attributes,
};
static struct attribute_group cxl_memdev_ram_attribute_group = {
.name = "ram",
.attrs = cxl_memdev_ram_attributes,
};
static struct attribute_group cxl_memdev_pmem_attribute_group = {
.name = "pmem",
.attrs = cxl_memdev_pmem_attributes,
};
static const struct attribute_group *cxl_memdev_attribute_groups[] = {
&cxl_memdev_attribute_group,
&cxl_memdev_ram_attribute_group,
&cxl_memdev_pmem_attribute_group,
NULL,
};
static const struct device_type cxl_memdev_type = {
.name = "cxl_memdev",
.release = cxl_memdev_release,
.devnode = cxl_memdev_devnode,
.groups = cxl_memdev_attribute_groups,
};
static void cxlmdev_unregister(void *_cxlmd)
{
struct cxl_memdev *cxlmd = _cxlmd;
struct device *dev = &cxlmd->dev;
percpu_ref_kill(&cxlmd->ops_active);
cdev_device_del(&cxlmd->cdev, dev);
wait_for_completion(&cxlmd->ops_dead);
cxlmd->cxlm = NULL;
put_device(dev);
}
static void cxlmdev_ops_active_release(struct percpu_ref *ref)
{
struct cxl_memdev *cxlmd =
container_of(ref, typeof(*cxlmd), ops_active);
complete(&cxlmd->ops_dead);
}
static int cxl_mem_add_memdev(struct cxl_mem *cxlm)
{
struct pci_dev *pdev = cxlm->pdev;
struct cxl_memdev *cxlmd;
struct device *dev;
struct cdev *cdev;
int rc;
cxlmd = kzalloc(sizeof(*cxlmd), GFP_KERNEL);
if (!cxlmd)
return -ENOMEM;
init_completion(&cxlmd->ops_dead);
/*
* @cxlm is deallocated when the driver unbinds so operations
* that are using it need to hold a live reference.
*/
cxlmd->cxlm = cxlm;
rc = percpu_ref_init(&cxlmd->ops_active, cxlmdev_ops_active_release, 0,
GFP_KERNEL);
if (rc)
goto err_ref;
rc = ida_alloc_range(&cxl_memdev_ida, 0, CXL_MEM_MAX_DEVS, GFP_KERNEL);
if (rc < 0)
goto err_id;
cxlmd->id = rc;
dev = &cxlmd->dev;
device_initialize(dev);
dev->parent = &pdev->dev;
dev->bus = &cxl_bus_type;
dev->devt = MKDEV(cxl_mem_major, cxlmd->id);
dev->type = &cxl_memdev_type;
dev_set_name(dev, "mem%d", cxlmd->id);
cdev = &cxlmd->cdev;
cdev_init(cdev, &cxl_memdev_fops);
rc = cdev_device_add(cdev, dev);
if (rc)
goto err_add;
return devm_add_action_or_reset(dev->parent, cxlmdev_unregister, cxlmd);
err_add:
ida_free(&cxl_memdev_ida, cxlmd->id);
err_id:
/*
* Theoretically userspace could have already entered the fops,
* so flush ops_active.
*/
percpu_ref_kill(&cxlmd->ops_active);
wait_for_completion(&cxlmd->ops_dead);
percpu_ref_exit(&cxlmd->ops_active);
err_ref:
kfree(cxlmd);
return rc;
}
/**
* cxl_mem_identify() - Send the IDENTIFY command to the device.
* @cxlm: The device to identify.
*
* Return: 0 if identify was executed successfully.
*
* This will dispatch the identify command to the device and on success populate
* structures to be exported to sysfs.
*/
static int cxl_mem_identify(struct cxl_mem *cxlm)
{
struct cxl_mbox_identify {
char fw_revision[0x10];
__le64 total_capacity;
__le64 volatile_capacity;
__le64 persistent_capacity;
__le64 partition_align;
__le16 info_event_log_size;
__le16 warning_event_log_size;
__le16 failure_event_log_size;
__le16 fatal_event_log_size;
__le32 lsa_size;
u8 poison_list_max_mer[3];
__le16 inject_poison_limit;
u8 poison_caps;
u8 qos_telemetry_caps;
} __packed id;
int rc;
rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_IDENTIFY, NULL, 0, &id,
sizeof(id));
if (rc < 0)
return rc;
/*
* TODO: enumerate DPA map, as 'ram' and 'pmem' do not alias.
* For now, only the capacity is exported in sysfs
*/
cxlm->ram_range.start = 0;
cxlm->ram_range.end = le64_to_cpu(id.volatile_capacity) - 1;
cxlm->pmem_range.start = 0;
cxlm->pmem_range.end = le64_to_cpu(id.persistent_capacity) - 1;
memcpy(cxlm->firmware_version, id.fw_revision, sizeof(id.fw_revision));
return 0;
}
static int cxl_mem_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct device *dev = &pdev->dev;
struct cxl_mem *cxlm = NULL;
u32 regloc_size, regblocks;
int rc, regloc, i;
rc = pcim_enable_device(pdev);
if (rc)
return rc;
regloc = cxl_mem_dvsec(pdev, PCI_DVSEC_ID_CXL_REGLOC_OFFSET);
if (!regloc) {
dev_err(dev, "register location dvsec not found\n");
return -ENXIO;
}
/* Get the size of the Register Locator DVSEC */
pci_read_config_dword(pdev, regloc + PCI_DVSEC_HEADER1, &regloc_size);
regloc_size = FIELD_GET(PCI_DVSEC_HEADER1_LENGTH_MASK, regloc_size);
regloc += PCI_DVSEC_ID_CXL_REGLOC_BLOCK1_OFFSET;
regblocks = (regloc_size - PCI_DVSEC_ID_CXL_REGLOC_BLOCK1_OFFSET) / 8;
for (i = 0; i < regblocks; i++, regloc += 8) {
u32 reg_lo, reg_hi;
u8 reg_type;
/* "register low and high" contain other bits */
pci_read_config_dword(pdev, regloc, &reg_lo);
pci_read_config_dword(pdev, regloc + 4, &reg_hi);
reg_type = FIELD_GET(CXL_REGLOC_RBI_MASK, reg_lo);
if (reg_type == CXL_REGLOC_RBI_MEMDEV) {
cxlm = cxl_mem_create(pdev, reg_lo, reg_hi);
break;
}
}
if (!cxlm)
return -ENODEV;
rc = cxl_mem_setup_regs(cxlm);
if (rc)
return rc;
rc = cxl_mem_setup_mailbox(cxlm);
if (rc)
return rc;
rc = cxl_mem_identify(cxlm);
if (rc)
return rc;
return cxl_mem_add_memdev(cxlm);
}
static const struct pci_device_id cxl_mem_pci_tbl[] = {
/* PCI class code for CXL.mem Type-3 Devices */
{ PCI_DEVICE_CLASS((PCI_CLASS_MEMORY_CXL << 8 | CXL_MEMORY_PROGIF), ~0)},
{ /* terminate list */ },
};
MODULE_DEVICE_TABLE(pci, cxl_mem_pci_tbl);
static struct pci_driver cxl_mem_driver = {
.name = KBUILD_MODNAME,
.id_table = cxl_mem_pci_tbl,
.probe = cxl_mem_probe,
.driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
};
static __init int cxl_mem_init(void)
{
dev_t devt;
int rc;
rc = alloc_chrdev_region(&devt, 0, CXL_MEM_MAX_DEVS, "cxl");
if (rc)
return rc;
cxl_mem_major = MAJOR(devt);
rc = pci_register_driver(&cxl_mem_driver);
if (rc) {
unregister_chrdev_region(MKDEV(cxl_mem_major, 0),
CXL_MEM_MAX_DEVS);
return rc;
}
return 0;
}
static __exit void cxl_mem_exit(void)
{
pci_unregister_driver(&cxl_mem_driver);
unregister_chrdev_region(MKDEV(cxl_mem_major, 0), CXL_MEM_MAX_DEVS);
}
MODULE_LICENSE("GPL v2");
module_init(cxl_mem_init);
module_exit(cxl_mem_exit);
MODULE_IMPORT_NS(CXL);