linux/drivers/pci/iov.c

1232 lines
28 KiB
C
Raw Normal View History

// SPDX-License-Identifier: GPL-2.0
/*
* PCI Express I/O Virtualization (IOV) support
* Single Root IOV 1.0
* Address Translation Service 1.0
*
* Copyright (C) 2009 Intel Corporation, Yu Zhao <yu.zhao@intel.com>
*/
#include <linux/pci.h>
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h percpu.h is included by sched.h and module.h and thus ends up being included when building most .c files. percpu.h includes slab.h which in turn includes gfp.h making everything defined by the two files universally available and complicating inclusion dependencies. percpu.h -> slab.h dependency is about to be removed. Prepare for this change by updating users of gfp and slab facilities include those headers directly instead of assuming availability. As this conversion needs to touch large number of source files, the following script is used as the basis of conversion. http://userweb.kernel.org/~tj/misc/slabh-sweep.py The script does the followings. * Scan files for gfp and slab usages and update includes such that only the necessary includes are there. ie. if only gfp is used, gfp.h, if slab is used, slab.h. * When the script inserts a new include, it looks at the include blocks and try to put the new include such that its order conforms to its surrounding. It's put in the include block which contains core kernel includes, in the same order that the rest are ordered - alphabetical, Christmas tree, rev-Xmas-tree or at the end if there doesn't seem to be any matching order. * If the script can't find a place to put a new include (mostly because the file doesn't have fitting include block), it prints out an error message indicating which .h file needs to be added to the file. The conversion was done in the following steps. 1. The initial automatic conversion of all .c files updated slightly over 4000 files, deleting around 700 includes and adding ~480 gfp.h and ~3000 slab.h inclusions. The script emitted errors for ~400 files. 2. Each error was manually checked. Some didn't need the inclusion, some needed manual addition while adding it to implementation .h or embedding .c file was more appropriate for others. This step added inclusions to around 150 files. 3. The script was run again and the output was compared to the edits from #2 to make sure no file was left behind. 4. Several build tests were done and a couple of problems were fixed. e.g. lib/decompress_*.c used malloc/free() wrappers around slab APIs requiring slab.h to be added manually. 5. The script was run on all .h files but without automatically editing them as sprinkling gfp.h and slab.h inclusions around .h files could easily lead to inclusion dependency hell. Most gfp.h inclusion directives were ignored as stuff from gfp.h was usually wildly available and often used in preprocessor macros. Each slab.h inclusion directive was examined and added manually as necessary. 6. percpu.h was updated not to include slab.h. 7. Build test were done on the following configurations and failures were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my distributed build env didn't work with gcov compiles) and a few more options had to be turned off depending on archs to make things build (like ipr on powerpc/64 which failed due to missing writeq). * x86 and x86_64 UP and SMP allmodconfig and a custom test config. * powerpc and powerpc64 SMP allmodconfig * sparc and sparc64 SMP allmodconfig * ia64 SMP allmodconfig * s390 SMP allmodconfig * alpha SMP allmodconfig * um on x86_64 SMP allmodconfig 8. percpu.h modifications were reverted so that it could be applied as a separate patch and serve as bisection point. Given the fact that I had only a couple of failures from tests on step 6, I'm fairly confident about the coverage of this conversion patch. If there is a breakage, it's likely to be something in one of the arch headers which should be easily discoverable easily on most builds of the specific arch. Signed-off-by: Tejun Heo <tj@kernel.org> Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 08:04:11 +00:00
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/string.h>
#include <linux/delay.h>
#include "pci.h"
#define VIRTFN_ID_LEN 17 /* "virtfn%u\0" for 2^32 - 1 */
int pci_iov_virtfn_bus(struct pci_dev *dev, int vf_id)
{
if (!dev->is_physfn)
return -EINVAL;
return dev->bus->number + ((dev->devfn + dev->sriov->offset +
dev->sriov->stride * vf_id) >> 8);
}
int pci_iov_virtfn_devfn(struct pci_dev *dev, int vf_id)
{
if (!dev->is_physfn)
return -EINVAL;
return (dev->devfn + dev->sriov->offset +
dev->sriov->stride * vf_id) & 0xff;
}
PCI/IOV: Add sysfs MSI-X vector assignment interface A typical cloud provider SR-IOV use case is to create many VFs for use by guest VMs. The VFs may not be assigned to a VM until a customer requests a VM of a certain size, e.g., number of CPUs. A VF may need MSI-X vectors proportional to the number of CPUs in the VM, but there is no standard way to change the number of MSI-X vectors supported by a VF. Some Mellanox ConnectX devices support dynamic assignment of MSI-X vectors to SR-IOV VFs. This can be done by the PF driver after VFs are enabled, and it can be done without affecting VFs that are already in use. The hardware supports a limited pool of MSI-X vectors that can be assigned to the PF or to individual VFs. This is device-specific behavior that requires support in the PF driver. Add a read-only "sriov_vf_total_msix" sysfs file for the PF and a writable "sriov_vf_msix_count" file for each VF. Management software may use these to learn how many MSI-X vectors are available and to dynamically assign them to VFs before the VFs are passed through to a VM. If the PF driver implements the ->sriov_get_vf_total_msix() callback, "sriov_vf_total_msix" contains the total number of MSI-X vectors available for distribution among VFs. If no driver is bound to the VF, writing "N" to "sriov_vf_msix_count" uses the PF driver ->sriov_set_msix_vec_count() callback to assign "N" MSI-X vectors to the VF. When a VF driver subsequently reads the MSI-X Message Control register, it will see the new Table Size "N". Link: https://lore.kernel.org/linux-pci/20210314124256.70253-2-leon@kernel.org Acked-by: Bjorn Helgaas <bhelgaas@google.com> Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
2021-04-04 07:22:18 +00:00
EXPORT_SYMBOL_GPL(pci_iov_virtfn_devfn);
int pci_iov_vf_id(struct pci_dev *dev)
{
struct pci_dev *pf;
if (!dev->is_virtfn)
return -EINVAL;
pf = pci_physfn(dev);
return (pci_dev_id(dev) - (pci_dev_id(pf) + pf->sriov->offset)) /
pf->sriov->stride;
}
EXPORT_SYMBOL_GPL(pci_iov_vf_id);
PCI/IOV: Add pci_iov_get_pf_drvdata() to allow VF reaching the drvdata of a PF There are some cases where a SR-IOV VF driver will need to reach into and interact with the PF driver. This requires accessing the drvdata of the PF. Provide a function pci_iov_get_pf_drvdata() to return this PF drvdata in a safe way. Normally accessing a drvdata of a foreign struct device would be done using the device_lock() to protect against device driver probe()/remove() races. However, due to the design of pci_enable_sriov() this will result in a ABBA deadlock on the device_lock as the PF's device_lock is held during PF sriov_configure() while calling pci_enable_sriov() which in turn holds the VF's device_lock while calling VF probe(), and similarly for remove. This means the VF driver can never obtain the PF's device_lock. Instead use the implicit locking created by pci_enable/disable_sriov(). A VF driver can access its PF drvdata only while its own driver is attached, and the PF driver can control access to its own drvdata based on when it calls pci_enable/disable_sriov(). To use this API the PF driver will setup the PF drvdata in the probe() function. pci_enable_sriov() is only called from sriov_configure() which cannot happen until probe() completes, ensuring no VF races with drvdata setup. For removal, the PF driver must call pci_disable_sriov() in its remove function before destroying any of the drvdata. This ensures that all VF drivers are unbound before returning, fencing concurrent access to the drvdata. The introduction of a new function to do this access makes clear the special locking scheme and the documents the requirements on the PF/VF drivers using this. Link: https://lore.kernel.org/all/20220224142024.147653-5-yishaih@nvidia.com Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> Acked-by: Bjorn Helgaas <bhelgaas@google.com> Signed-off-by: Yishai Hadas <yishaih@nvidia.com> Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
2022-02-24 14:20:13 +00:00
/**
* pci_iov_get_pf_drvdata - Return the drvdata of a PF
* @dev: VF pci_dev
* @pf_driver: Device driver required to own the PF
PCI/IOV: Add pci_iov_get_pf_drvdata() to allow VF reaching the drvdata of a PF There are some cases where a SR-IOV VF driver will need to reach into and interact with the PF driver. This requires accessing the drvdata of the PF. Provide a function pci_iov_get_pf_drvdata() to return this PF drvdata in a safe way. Normally accessing a drvdata of a foreign struct device would be done using the device_lock() to protect against device driver probe()/remove() races. However, due to the design of pci_enable_sriov() this will result in a ABBA deadlock on the device_lock as the PF's device_lock is held during PF sriov_configure() while calling pci_enable_sriov() which in turn holds the VF's device_lock while calling VF probe(), and similarly for remove. This means the VF driver can never obtain the PF's device_lock. Instead use the implicit locking created by pci_enable/disable_sriov(). A VF driver can access its PF drvdata only while its own driver is attached, and the PF driver can control access to its own drvdata based on when it calls pci_enable/disable_sriov(). To use this API the PF driver will setup the PF drvdata in the probe() function. pci_enable_sriov() is only called from sriov_configure() which cannot happen until probe() completes, ensuring no VF races with drvdata setup. For removal, the PF driver must call pci_disable_sriov() in its remove function before destroying any of the drvdata. This ensures that all VF drivers are unbound before returning, fencing concurrent access to the drvdata. The introduction of a new function to do this access makes clear the special locking scheme and the documents the requirements on the PF/VF drivers using this. Link: https://lore.kernel.org/all/20220224142024.147653-5-yishaih@nvidia.com Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> Acked-by: Bjorn Helgaas <bhelgaas@google.com> Signed-off-by: Yishai Hadas <yishaih@nvidia.com> Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
2022-02-24 14:20:13 +00:00
*
* This must be called from a context that ensures that a VF driver is attached.
* The value returned is invalid once the VF driver completes its remove()
* callback.
*
* Locking is achieved by the driver core. A VF driver cannot be probed until
* pci_enable_sriov() is called and pci_disable_sriov() does not return until
* all VF drivers have completed their remove().
*
* The PF driver must call pci_disable_sriov() before it begins to destroy the
* drvdata.
*/
void *pci_iov_get_pf_drvdata(struct pci_dev *dev, struct pci_driver *pf_driver)
{
struct pci_dev *pf_dev;
if (!dev->is_virtfn)
return ERR_PTR(-EINVAL);
pf_dev = dev->physfn;
if (pf_dev->driver != pf_driver)
return ERR_PTR(-EINVAL);
return pci_get_drvdata(pf_dev);
}
EXPORT_SYMBOL_GPL(pci_iov_get_pf_drvdata);
/*
* Per SR-IOV spec sec 3.3.10 and 3.3.11, First VF Offset and VF Stride may
* change when NumVFs changes.
*
* Update iov->offset and iov->stride when NumVFs is written.
*/
static inline void pci_iov_set_numvfs(struct pci_dev *dev, int nr_virtfn)
{
struct pci_sriov *iov = dev->sriov;
pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, nr_virtfn);
pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
}
/*
* The PF consumes one bus number. NumVFs, First VF Offset, and VF Stride
* determine how many additional bus numbers will be consumed by VFs.
*
* Iterate over all valid NumVFs, validate offset and stride, and calculate
* the maximum number of bus numbers that could ever be required.
*/
static int compute_max_vf_buses(struct pci_dev *dev)
{
struct pci_sriov *iov = dev->sriov;
int nr_virtfn, busnr, rc = 0;
for (nr_virtfn = iov->total_VFs; nr_virtfn; nr_virtfn--) {
pci_iov_set_numvfs(dev, nr_virtfn);
if (!iov->offset || (nr_virtfn > 1 && !iov->stride)) {
rc = -EIO;
goto out;
}
busnr = pci_iov_virtfn_bus(dev, nr_virtfn - 1);
if (busnr > iov->max_VF_buses)
iov->max_VF_buses = busnr;
}
out:
pci_iov_set_numvfs(dev, 0);
return rc;
}
static struct pci_bus *virtfn_add_bus(struct pci_bus *bus, int busnr)
{
struct pci_bus *child;
if (bus->number == busnr)
return bus;
child = pci_find_bus(pci_domain_nr(bus), busnr);
if (child)
return child;
child = pci_add_new_bus(bus, NULL, busnr);
if (!child)
return NULL;
pci_bus_insert_busn_res(child, busnr, busnr);
return child;
}
static void virtfn_remove_bus(struct pci_bus *physbus, struct pci_bus *virtbus)
{
if (physbus != virtbus && list_empty(&virtbus->devices))
pci_remove_bus(virtbus);
}
resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno)
{
if (!dev->is_physfn)
return 0;
return dev->sriov->barsz[resno - PCI_IOV_RESOURCES];
}
static void pci_read_vf_config_common(struct pci_dev *virtfn)
{
struct pci_dev *physfn = virtfn->physfn;
/*
* Some config registers are the same across all associated VFs.
* Read them once from VF0 so we can skip reading them from the
* other VFs.
*
* PCIe r4.0, sec 9.3.4.1, technically doesn't require all VFs to
* have the same Revision ID and Subsystem ID, but we assume they
* do.
*/
pci_read_config_dword(virtfn, PCI_CLASS_REVISION,
&physfn->sriov->class);
pci_read_config_byte(virtfn, PCI_HEADER_TYPE,
&physfn->sriov->hdr_type);
pci_read_config_word(virtfn, PCI_SUBSYSTEM_VENDOR_ID,
&physfn->sriov->subsystem_vendor);
pci_read_config_word(virtfn, PCI_SUBSYSTEM_ID,
&physfn->sriov->subsystem_device);
}
int pci_iov_sysfs_link(struct pci_dev *dev,
struct pci_dev *virtfn, int id)
{
char buf[VIRTFN_ID_LEN];
int rc;
sprintf(buf, "virtfn%u", id);
rc = sysfs_create_link(&dev->dev.kobj, &virtfn->dev.kobj, buf);
if (rc)
goto failed;
rc = sysfs_create_link(&virtfn->dev.kobj, &dev->dev.kobj, "physfn");
if (rc)
goto failed1;
kobject_uevent(&virtfn->dev.kobj, KOBJ_CHANGE);
return 0;
failed1:
sysfs_remove_link(&dev->dev.kobj, buf);
failed:
return rc;
}
PCI/IOV: Add sysfs MSI-X vector assignment interface A typical cloud provider SR-IOV use case is to create many VFs for use by guest VMs. The VFs may not be assigned to a VM until a customer requests a VM of a certain size, e.g., number of CPUs. A VF may need MSI-X vectors proportional to the number of CPUs in the VM, but there is no standard way to change the number of MSI-X vectors supported by a VF. Some Mellanox ConnectX devices support dynamic assignment of MSI-X vectors to SR-IOV VFs. This can be done by the PF driver after VFs are enabled, and it can be done without affecting VFs that are already in use. The hardware supports a limited pool of MSI-X vectors that can be assigned to the PF or to individual VFs. This is device-specific behavior that requires support in the PF driver. Add a read-only "sriov_vf_total_msix" sysfs file for the PF and a writable "sriov_vf_msix_count" file for each VF. Management software may use these to learn how many MSI-X vectors are available and to dynamically assign them to VFs before the VFs are passed through to a VM. If the PF driver implements the ->sriov_get_vf_total_msix() callback, "sriov_vf_total_msix" contains the total number of MSI-X vectors available for distribution among VFs. If no driver is bound to the VF, writing "N" to "sriov_vf_msix_count" uses the PF driver ->sriov_set_msix_vec_count() callback to assign "N" MSI-X vectors to the VF. When a VF driver subsequently reads the MSI-X Message Control register, it will see the new Table Size "N". Link: https://lore.kernel.org/linux-pci/20210314124256.70253-2-leon@kernel.org Acked-by: Bjorn Helgaas <bhelgaas@google.com> Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
2021-04-04 07:22:18 +00:00
#ifdef CONFIG_PCI_MSI
static ssize_t sriov_vf_total_msix_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct pci_dev *pdev = to_pci_dev(dev);
u32 vf_total_msix = 0;
device_lock(dev);
if (!pdev->driver || !pdev->driver->sriov_get_vf_total_msix)
PCI/IOV: Add sysfs MSI-X vector assignment interface A typical cloud provider SR-IOV use case is to create many VFs for use by guest VMs. The VFs may not be assigned to a VM until a customer requests a VM of a certain size, e.g., number of CPUs. A VF may need MSI-X vectors proportional to the number of CPUs in the VM, but there is no standard way to change the number of MSI-X vectors supported by a VF. Some Mellanox ConnectX devices support dynamic assignment of MSI-X vectors to SR-IOV VFs. This can be done by the PF driver after VFs are enabled, and it can be done without affecting VFs that are already in use. The hardware supports a limited pool of MSI-X vectors that can be assigned to the PF or to individual VFs. This is device-specific behavior that requires support in the PF driver. Add a read-only "sriov_vf_total_msix" sysfs file for the PF and a writable "sriov_vf_msix_count" file for each VF. Management software may use these to learn how many MSI-X vectors are available and to dynamically assign them to VFs before the VFs are passed through to a VM. If the PF driver implements the ->sriov_get_vf_total_msix() callback, "sriov_vf_total_msix" contains the total number of MSI-X vectors available for distribution among VFs. If no driver is bound to the VF, writing "N" to "sriov_vf_msix_count" uses the PF driver ->sriov_set_msix_vec_count() callback to assign "N" MSI-X vectors to the VF. When a VF driver subsequently reads the MSI-X Message Control register, it will see the new Table Size "N". Link: https://lore.kernel.org/linux-pci/20210314124256.70253-2-leon@kernel.org Acked-by: Bjorn Helgaas <bhelgaas@google.com> Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
2021-04-04 07:22:18 +00:00
goto unlock;
vf_total_msix = pdev->driver->sriov_get_vf_total_msix(pdev);
PCI/IOV: Add sysfs MSI-X vector assignment interface A typical cloud provider SR-IOV use case is to create many VFs for use by guest VMs. The VFs may not be assigned to a VM until a customer requests a VM of a certain size, e.g., number of CPUs. A VF may need MSI-X vectors proportional to the number of CPUs in the VM, but there is no standard way to change the number of MSI-X vectors supported by a VF. Some Mellanox ConnectX devices support dynamic assignment of MSI-X vectors to SR-IOV VFs. This can be done by the PF driver after VFs are enabled, and it can be done without affecting VFs that are already in use. The hardware supports a limited pool of MSI-X vectors that can be assigned to the PF or to individual VFs. This is device-specific behavior that requires support in the PF driver. Add a read-only "sriov_vf_total_msix" sysfs file for the PF and a writable "sriov_vf_msix_count" file for each VF. Management software may use these to learn how many MSI-X vectors are available and to dynamically assign them to VFs before the VFs are passed through to a VM. If the PF driver implements the ->sriov_get_vf_total_msix() callback, "sriov_vf_total_msix" contains the total number of MSI-X vectors available for distribution among VFs. If no driver is bound to the VF, writing "N" to "sriov_vf_msix_count" uses the PF driver ->sriov_set_msix_vec_count() callback to assign "N" MSI-X vectors to the VF. When a VF driver subsequently reads the MSI-X Message Control register, it will see the new Table Size "N". Link: https://lore.kernel.org/linux-pci/20210314124256.70253-2-leon@kernel.org Acked-by: Bjorn Helgaas <bhelgaas@google.com> Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
2021-04-04 07:22:18 +00:00
unlock:
device_unlock(dev);
return sysfs_emit(buf, "%u\n", vf_total_msix);
}
static DEVICE_ATTR_RO(sriov_vf_total_msix);
static ssize_t sriov_vf_msix_count_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct pci_dev *vf_dev = to_pci_dev(dev);
struct pci_dev *pdev = pci_physfn(vf_dev);
int val, ret = 0;
PCI/IOV: Add sysfs MSI-X vector assignment interface A typical cloud provider SR-IOV use case is to create many VFs for use by guest VMs. The VFs may not be assigned to a VM until a customer requests a VM of a certain size, e.g., number of CPUs. A VF may need MSI-X vectors proportional to the number of CPUs in the VM, but there is no standard way to change the number of MSI-X vectors supported by a VF. Some Mellanox ConnectX devices support dynamic assignment of MSI-X vectors to SR-IOV VFs. This can be done by the PF driver after VFs are enabled, and it can be done without affecting VFs that are already in use. The hardware supports a limited pool of MSI-X vectors that can be assigned to the PF or to individual VFs. This is device-specific behavior that requires support in the PF driver. Add a read-only "sriov_vf_total_msix" sysfs file for the PF and a writable "sriov_vf_msix_count" file for each VF. Management software may use these to learn how many MSI-X vectors are available and to dynamically assign them to VFs before the VFs are passed through to a VM. If the PF driver implements the ->sriov_get_vf_total_msix() callback, "sriov_vf_total_msix" contains the total number of MSI-X vectors available for distribution among VFs. If no driver is bound to the VF, writing "N" to "sriov_vf_msix_count" uses the PF driver ->sriov_set_msix_vec_count() callback to assign "N" MSI-X vectors to the VF. When a VF driver subsequently reads the MSI-X Message Control register, it will see the new Table Size "N". Link: https://lore.kernel.org/linux-pci/20210314124256.70253-2-leon@kernel.org Acked-by: Bjorn Helgaas <bhelgaas@google.com> Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
2021-04-04 07:22:18 +00:00
if (kstrtoint(buf, 0, &val) < 0)
return -EINVAL;
PCI/IOV: Add sysfs MSI-X vector assignment interface A typical cloud provider SR-IOV use case is to create many VFs for use by guest VMs. The VFs may not be assigned to a VM until a customer requests a VM of a certain size, e.g., number of CPUs. A VF may need MSI-X vectors proportional to the number of CPUs in the VM, but there is no standard way to change the number of MSI-X vectors supported by a VF. Some Mellanox ConnectX devices support dynamic assignment of MSI-X vectors to SR-IOV VFs. This can be done by the PF driver after VFs are enabled, and it can be done without affecting VFs that are already in use. The hardware supports a limited pool of MSI-X vectors that can be assigned to the PF or to individual VFs. This is device-specific behavior that requires support in the PF driver. Add a read-only "sriov_vf_total_msix" sysfs file for the PF and a writable "sriov_vf_msix_count" file for each VF. Management software may use these to learn how many MSI-X vectors are available and to dynamically assign them to VFs before the VFs are passed through to a VM. If the PF driver implements the ->sriov_get_vf_total_msix() callback, "sriov_vf_total_msix" contains the total number of MSI-X vectors available for distribution among VFs. If no driver is bound to the VF, writing "N" to "sriov_vf_msix_count" uses the PF driver ->sriov_set_msix_vec_count() callback to assign "N" MSI-X vectors to the VF. When a VF driver subsequently reads the MSI-X Message Control register, it will see the new Table Size "N". Link: https://lore.kernel.org/linux-pci/20210314124256.70253-2-leon@kernel.org Acked-by: Bjorn Helgaas <bhelgaas@google.com> Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
2021-04-04 07:22:18 +00:00
if (val < 0)
return -EINVAL;
device_lock(&pdev->dev);
if (!pdev->driver || !pdev->driver->sriov_set_msix_vec_count) {
PCI/IOV: Add sysfs MSI-X vector assignment interface A typical cloud provider SR-IOV use case is to create many VFs for use by guest VMs. The VFs may not be assigned to a VM until a customer requests a VM of a certain size, e.g., number of CPUs. A VF may need MSI-X vectors proportional to the number of CPUs in the VM, but there is no standard way to change the number of MSI-X vectors supported by a VF. Some Mellanox ConnectX devices support dynamic assignment of MSI-X vectors to SR-IOV VFs. This can be done by the PF driver after VFs are enabled, and it can be done without affecting VFs that are already in use. The hardware supports a limited pool of MSI-X vectors that can be assigned to the PF or to individual VFs. This is device-specific behavior that requires support in the PF driver. Add a read-only "sriov_vf_total_msix" sysfs file for the PF and a writable "sriov_vf_msix_count" file for each VF. Management software may use these to learn how many MSI-X vectors are available and to dynamically assign them to VFs before the VFs are passed through to a VM. If the PF driver implements the ->sriov_get_vf_total_msix() callback, "sriov_vf_total_msix" contains the total number of MSI-X vectors available for distribution among VFs. If no driver is bound to the VF, writing "N" to "sriov_vf_msix_count" uses the PF driver ->sriov_set_msix_vec_count() callback to assign "N" MSI-X vectors to the VF. When a VF driver subsequently reads the MSI-X Message Control register, it will see the new Table Size "N". Link: https://lore.kernel.org/linux-pci/20210314124256.70253-2-leon@kernel.org Acked-by: Bjorn Helgaas <bhelgaas@google.com> Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
2021-04-04 07:22:18 +00:00
ret = -EOPNOTSUPP;
goto err_pdev;
}
device_lock(&vf_dev->dev);
if (vf_dev->driver) {
PCI/IOV: Add sysfs MSI-X vector assignment interface A typical cloud provider SR-IOV use case is to create many VFs for use by guest VMs. The VFs may not be assigned to a VM until a customer requests a VM of a certain size, e.g., number of CPUs. A VF may need MSI-X vectors proportional to the number of CPUs in the VM, but there is no standard way to change the number of MSI-X vectors supported by a VF. Some Mellanox ConnectX devices support dynamic assignment of MSI-X vectors to SR-IOV VFs. This can be done by the PF driver after VFs are enabled, and it can be done without affecting VFs that are already in use. The hardware supports a limited pool of MSI-X vectors that can be assigned to the PF or to individual VFs. This is device-specific behavior that requires support in the PF driver. Add a read-only "sriov_vf_total_msix" sysfs file for the PF and a writable "sriov_vf_msix_count" file for each VF. Management software may use these to learn how many MSI-X vectors are available and to dynamically assign them to VFs before the VFs are passed through to a VM. If the PF driver implements the ->sriov_get_vf_total_msix() callback, "sriov_vf_total_msix" contains the total number of MSI-X vectors available for distribution among VFs. If no driver is bound to the VF, writing "N" to "sriov_vf_msix_count" uses the PF driver ->sriov_set_msix_vec_count() callback to assign "N" MSI-X vectors to the VF. When a VF driver subsequently reads the MSI-X Message Control register, it will see the new Table Size "N". Link: https://lore.kernel.org/linux-pci/20210314124256.70253-2-leon@kernel.org Acked-by: Bjorn Helgaas <bhelgaas@google.com> Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
2021-04-04 07:22:18 +00:00
/*
* A driver is already attached to this VF and has configured
* itself based on the current MSI-X vector count. Changing
* the vector size could mess up the driver, so block it.
*/
ret = -EBUSY;
goto err_dev;
}
ret = pdev->driver->sriov_set_msix_vec_count(vf_dev, val);
PCI/IOV: Add sysfs MSI-X vector assignment interface A typical cloud provider SR-IOV use case is to create many VFs for use by guest VMs. The VFs may not be assigned to a VM until a customer requests a VM of a certain size, e.g., number of CPUs. A VF may need MSI-X vectors proportional to the number of CPUs in the VM, but there is no standard way to change the number of MSI-X vectors supported by a VF. Some Mellanox ConnectX devices support dynamic assignment of MSI-X vectors to SR-IOV VFs. This can be done by the PF driver after VFs are enabled, and it can be done without affecting VFs that are already in use. The hardware supports a limited pool of MSI-X vectors that can be assigned to the PF or to individual VFs. This is device-specific behavior that requires support in the PF driver. Add a read-only "sriov_vf_total_msix" sysfs file for the PF and a writable "sriov_vf_msix_count" file for each VF. Management software may use these to learn how many MSI-X vectors are available and to dynamically assign them to VFs before the VFs are passed through to a VM. If the PF driver implements the ->sriov_get_vf_total_msix() callback, "sriov_vf_total_msix" contains the total number of MSI-X vectors available for distribution among VFs. If no driver is bound to the VF, writing "N" to "sriov_vf_msix_count" uses the PF driver ->sriov_set_msix_vec_count() callback to assign "N" MSI-X vectors to the VF. When a VF driver subsequently reads the MSI-X Message Control register, it will see the new Table Size "N". Link: https://lore.kernel.org/linux-pci/20210314124256.70253-2-leon@kernel.org Acked-by: Bjorn Helgaas <bhelgaas@google.com> Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
2021-04-04 07:22:18 +00:00
err_dev:
device_unlock(&vf_dev->dev);
err_pdev:
device_unlock(&pdev->dev);
return ret ? : count;
}
static DEVICE_ATTR_WO(sriov_vf_msix_count);
#endif
static struct attribute *sriov_vf_dev_attrs[] = {
#ifdef CONFIG_PCI_MSI
&dev_attr_sriov_vf_msix_count.attr,
#endif
NULL,
};
static umode_t sriov_vf_attrs_are_visible(struct kobject *kobj,
struct attribute *a, int n)
{
struct device *dev = kobj_to_dev(kobj);
struct pci_dev *pdev = to_pci_dev(dev);
if (!pdev->is_virtfn)
return 0;
return a->mode;
}
const struct attribute_group sriov_vf_dev_attr_group = {
.attrs = sriov_vf_dev_attrs,
.is_visible = sriov_vf_attrs_are_visible,
};
int pci_iov_add_virtfn(struct pci_dev *dev, int id)
{
int i;
int rc = -ENOMEM;
u64 size;
struct pci_dev *virtfn;
struct resource *res;
struct pci_sriov *iov = dev->sriov;
struct pci_bus *bus;
bus = virtfn_add_bus(dev->bus, pci_iov_virtfn_bus(dev, id));
if (!bus)
goto failed;
virtfn = pci_alloc_dev(bus);
if (!virtfn)
goto failed0;
virtfn->devfn = pci_iov_virtfn_devfn(dev, id);
virtfn->vendor = dev->vendor;
virtfn->device = iov->vf_device;
virtfn->is_virtfn = 1;
virtfn->physfn = pci_dev_get(dev);
virtfn->no_command_memory = 1;
if (id == 0)
pci_read_vf_config_common(virtfn);
rc = pci_setup_device(virtfn);
if (rc)
goto failed1;
virtfn->dev.parent = dev->dev.parent;
virtfn->multifunction = 0;
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
res = &dev->resource[i + PCI_IOV_RESOURCES];
if (!res->parent)
continue;
virtfn->resource[i].name = pci_name(virtfn);
virtfn->resource[i].flags = res->flags;
size = pci_iov_resource_size(dev, i + PCI_IOV_RESOURCES);
virtfn->resource[i].start = res->start + size * id;
virtfn->resource[i].end = virtfn->resource[i].start + size - 1;
rc = request_resource(res, &virtfn->resource[i]);
BUG_ON(rc);
}
pci_device_add(virtfn, virtfn->bus);
rc = pci_iov_sysfs_link(dev, virtfn, id);
if (rc)
goto failed1;
pci_bus_add_device(virtfn);
return 0;
failed1:
pci_stop_and_remove_bus_device(virtfn);
pci_dev_put(dev);
failed0:
virtfn_remove_bus(dev->bus, bus);
failed:
return rc;
}
void pci_iov_remove_virtfn(struct pci_dev *dev, int id)
{
char buf[VIRTFN_ID_LEN];
struct pci_dev *virtfn;
virtfn = pci_get_domain_bus_and_slot(pci_domain_nr(dev->bus),
pci_iov_virtfn_bus(dev, id),
pci_iov_virtfn_devfn(dev, id));
if (!virtfn)
return;
sprintf(buf, "virtfn%u", id);
sysfs_remove_link(&dev->dev.kobj, buf);
/*
* pci_stop_dev() could have been called for this virtfn already,
* so the directory for the virtfn may have been removed before.
* Double check to avoid spurious sysfs warnings.
*/
if (virtfn->dev.kobj.sd)
sysfs_remove_link(&virtfn->dev.kobj, "physfn");
pci_stop_and_remove_bus_device(virtfn);
virtfn_remove_bus(dev->bus, virtfn->bus);
/* balance pci_get_domain_bus_and_slot() */
pci_dev_put(virtfn);
pci_dev_put(dev);
}
static ssize_t sriov_totalvfs_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct pci_dev *pdev = to_pci_dev(dev);
return sysfs_emit(buf, "%u\n", pci_sriov_get_totalvfs(pdev));
}
static ssize_t sriov_numvfs_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct pci_dev *pdev = to_pci_dev(dev);
u16 num_vfs;
/* Serialize vs sriov_numvfs_store() so readers see valid num_VFs */
device_lock(&pdev->dev);
num_vfs = pdev->sriov->num_VFs;
device_unlock(&pdev->dev);
return sysfs_emit(buf, "%u\n", num_vfs);
}
/*
* num_vfs > 0; number of VFs to enable
* num_vfs = 0; disable all VFs
*
* Note: SRIOV spec does not allow partial VF
* disable, so it's all or none.
*/
static ssize_t sriov_numvfs_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct pci_dev *pdev = to_pci_dev(dev);
int ret = 0;
u16 num_vfs;
if (kstrtou16(buf, 0, &num_vfs) < 0)
return -EINVAL;
if (num_vfs > pci_sriov_get_totalvfs(pdev))
return -ERANGE;
device_lock(&pdev->dev);
if (num_vfs == pdev->sriov->num_VFs)
goto exit;
/* is PF driver loaded */
if (!pdev->driver) {
pci_info(pdev, "no driver bound to device; cannot configure SR-IOV\n");
ret = -ENOENT;
goto exit;
}
/* is PF driver loaded w/callback */
if (!pdev->driver->sriov_configure) {
pci_info(pdev, "driver does not support SR-IOV configuration via sysfs\n");
ret = -ENOENT;
goto exit;
}
if (num_vfs == 0) {
/* disable VFs */
ret = pdev->driver->sriov_configure(pdev, 0);
goto exit;
}
/* enable VFs */
if (pdev->sriov->num_VFs) {
pci_warn(pdev, "%d VFs already enabled. Disable before enabling %d VFs\n",
pdev->sriov->num_VFs, num_vfs);
ret = -EBUSY;
goto exit;
}
ret = pdev->driver->sriov_configure(pdev, num_vfs);
if (ret < 0)
goto exit;
if (ret != num_vfs)
pci_warn(pdev, "%d VFs requested; only %d enabled\n",
num_vfs, ret);
exit:
device_unlock(&pdev->dev);
if (ret < 0)
return ret;
return count;
}
static ssize_t sriov_offset_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct pci_dev *pdev = to_pci_dev(dev);
return sysfs_emit(buf, "%u\n", pdev->sriov->offset);
}
static ssize_t sriov_stride_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct pci_dev *pdev = to_pci_dev(dev);
return sysfs_emit(buf, "%u\n", pdev->sriov->stride);
}
static ssize_t sriov_vf_device_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct pci_dev *pdev = to_pci_dev(dev);
return sysfs_emit(buf, "%x\n", pdev->sriov->vf_device);
}
static ssize_t sriov_drivers_autoprobe_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct pci_dev *pdev = to_pci_dev(dev);
return sysfs_emit(buf, "%u\n", pdev->sriov->drivers_autoprobe);
}
static ssize_t sriov_drivers_autoprobe_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct pci_dev *pdev = to_pci_dev(dev);
bool drivers_autoprobe;
if (kstrtobool(buf, &drivers_autoprobe) < 0)
return -EINVAL;
pdev->sriov->drivers_autoprobe = drivers_autoprobe;
return count;
}
static DEVICE_ATTR_RO(sriov_totalvfs);
static DEVICE_ATTR_RW(sriov_numvfs);
static DEVICE_ATTR_RO(sriov_offset);
static DEVICE_ATTR_RO(sriov_stride);
static DEVICE_ATTR_RO(sriov_vf_device);
static DEVICE_ATTR_RW(sriov_drivers_autoprobe);
PCI/IOV: Add sysfs MSI-X vector assignment interface A typical cloud provider SR-IOV use case is to create many VFs for use by guest VMs. The VFs may not be assigned to a VM until a customer requests a VM of a certain size, e.g., number of CPUs. A VF may need MSI-X vectors proportional to the number of CPUs in the VM, but there is no standard way to change the number of MSI-X vectors supported by a VF. Some Mellanox ConnectX devices support dynamic assignment of MSI-X vectors to SR-IOV VFs. This can be done by the PF driver after VFs are enabled, and it can be done without affecting VFs that are already in use. The hardware supports a limited pool of MSI-X vectors that can be assigned to the PF or to individual VFs. This is device-specific behavior that requires support in the PF driver. Add a read-only "sriov_vf_total_msix" sysfs file for the PF and a writable "sriov_vf_msix_count" file for each VF. Management software may use these to learn how many MSI-X vectors are available and to dynamically assign them to VFs before the VFs are passed through to a VM. If the PF driver implements the ->sriov_get_vf_total_msix() callback, "sriov_vf_total_msix" contains the total number of MSI-X vectors available for distribution among VFs. If no driver is bound to the VF, writing "N" to "sriov_vf_msix_count" uses the PF driver ->sriov_set_msix_vec_count() callback to assign "N" MSI-X vectors to the VF. When a VF driver subsequently reads the MSI-X Message Control register, it will see the new Table Size "N". Link: https://lore.kernel.org/linux-pci/20210314124256.70253-2-leon@kernel.org Acked-by: Bjorn Helgaas <bhelgaas@google.com> Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
2021-04-04 07:22:18 +00:00
static struct attribute *sriov_pf_dev_attrs[] = {
&dev_attr_sriov_totalvfs.attr,
&dev_attr_sriov_numvfs.attr,
&dev_attr_sriov_offset.attr,
&dev_attr_sriov_stride.attr,
&dev_attr_sriov_vf_device.attr,
&dev_attr_sriov_drivers_autoprobe.attr,
PCI/IOV: Add sysfs MSI-X vector assignment interface A typical cloud provider SR-IOV use case is to create many VFs for use by guest VMs. The VFs may not be assigned to a VM until a customer requests a VM of a certain size, e.g., number of CPUs. A VF may need MSI-X vectors proportional to the number of CPUs in the VM, but there is no standard way to change the number of MSI-X vectors supported by a VF. Some Mellanox ConnectX devices support dynamic assignment of MSI-X vectors to SR-IOV VFs. This can be done by the PF driver after VFs are enabled, and it can be done without affecting VFs that are already in use. The hardware supports a limited pool of MSI-X vectors that can be assigned to the PF or to individual VFs. This is device-specific behavior that requires support in the PF driver. Add a read-only "sriov_vf_total_msix" sysfs file for the PF and a writable "sriov_vf_msix_count" file for each VF. Management software may use these to learn how many MSI-X vectors are available and to dynamically assign them to VFs before the VFs are passed through to a VM. If the PF driver implements the ->sriov_get_vf_total_msix() callback, "sriov_vf_total_msix" contains the total number of MSI-X vectors available for distribution among VFs. If no driver is bound to the VF, writing "N" to "sriov_vf_msix_count" uses the PF driver ->sriov_set_msix_vec_count() callback to assign "N" MSI-X vectors to the VF. When a VF driver subsequently reads the MSI-X Message Control register, it will see the new Table Size "N". Link: https://lore.kernel.org/linux-pci/20210314124256.70253-2-leon@kernel.org Acked-by: Bjorn Helgaas <bhelgaas@google.com> Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
2021-04-04 07:22:18 +00:00
#ifdef CONFIG_PCI_MSI
&dev_attr_sriov_vf_total_msix.attr,
#endif
NULL,
};
PCI/IOV: Add sysfs MSI-X vector assignment interface A typical cloud provider SR-IOV use case is to create many VFs for use by guest VMs. The VFs may not be assigned to a VM until a customer requests a VM of a certain size, e.g., number of CPUs. A VF may need MSI-X vectors proportional to the number of CPUs in the VM, but there is no standard way to change the number of MSI-X vectors supported by a VF. Some Mellanox ConnectX devices support dynamic assignment of MSI-X vectors to SR-IOV VFs. This can be done by the PF driver after VFs are enabled, and it can be done without affecting VFs that are already in use. The hardware supports a limited pool of MSI-X vectors that can be assigned to the PF or to individual VFs. This is device-specific behavior that requires support in the PF driver. Add a read-only "sriov_vf_total_msix" sysfs file for the PF and a writable "sriov_vf_msix_count" file for each VF. Management software may use these to learn how many MSI-X vectors are available and to dynamically assign them to VFs before the VFs are passed through to a VM. If the PF driver implements the ->sriov_get_vf_total_msix() callback, "sriov_vf_total_msix" contains the total number of MSI-X vectors available for distribution among VFs. If no driver is bound to the VF, writing "N" to "sriov_vf_msix_count" uses the PF driver ->sriov_set_msix_vec_count() callback to assign "N" MSI-X vectors to the VF. When a VF driver subsequently reads the MSI-X Message Control register, it will see the new Table Size "N". Link: https://lore.kernel.org/linux-pci/20210314124256.70253-2-leon@kernel.org Acked-by: Bjorn Helgaas <bhelgaas@google.com> Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
2021-04-04 07:22:18 +00:00
static umode_t sriov_pf_attrs_are_visible(struct kobject *kobj,
struct attribute *a, int n)
{
struct device *dev = kobj_to_dev(kobj);
if (!dev_is_pf(dev))
return 0;
return a->mode;
}
PCI/IOV: Add sysfs MSI-X vector assignment interface A typical cloud provider SR-IOV use case is to create many VFs for use by guest VMs. The VFs may not be assigned to a VM until a customer requests a VM of a certain size, e.g., number of CPUs. A VF may need MSI-X vectors proportional to the number of CPUs in the VM, but there is no standard way to change the number of MSI-X vectors supported by a VF. Some Mellanox ConnectX devices support dynamic assignment of MSI-X vectors to SR-IOV VFs. This can be done by the PF driver after VFs are enabled, and it can be done without affecting VFs that are already in use. The hardware supports a limited pool of MSI-X vectors that can be assigned to the PF or to individual VFs. This is device-specific behavior that requires support in the PF driver. Add a read-only "sriov_vf_total_msix" sysfs file for the PF and a writable "sriov_vf_msix_count" file for each VF. Management software may use these to learn how many MSI-X vectors are available and to dynamically assign them to VFs before the VFs are passed through to a VM. If the PF driver implements the ->sriov_get_vf_total_msix() callback, "sriov_vf_total_msix" contains the total number of MSI-X vectors available for distribution among VFs. If no driver is bound to the VF, writing "N" to "sriov_vf_msix_count" uses the PF driver ->sriov_set_msix_vec_count() callback to assign "N" MSI-X vectors to the VF. When a VF driver subsequently reads the MSI-X Message Control register, it will see the new Table Size "N". Link: https://lore.kernel.org/linux-pci/20210314124256.70253-2-leon@kernel.org Acked-by: Bjorn Helgaas <bhelgaas@google.com> Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
2021-04-04 07:22:18 +00:00
const struct attribute_group sriov_pf_dev_attr_group = {
.attrs = sriov_pf_dev_attrs,
.is_visible = sriov_pf_attrs_are_visible,
};
int __weak pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
{
return 0;
}
int __weak pcibios_sriov_disable(struct pci_dev *pdev)
{
return 0;
}
static int sriov_add_vfs(struct pci_dev *dev, u16 num_vfs)
{
unsigned int i;
int rc;
if (dev->no_vf_scan)
return 0;
for (i = 0; i < num_vfs; i++) {
rc = pci_iov_add_virtfn(dev, i);
if (rc)
goto failed;
}
return 0;
failed:
while (i--)
pci_iov_remove_virtfn(dev, i);
return rc;
}
static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
{
int rc;
int i;
int nres;
u16 initial;
struct resource *res;
struct pci_dev *pdev;
struct pci_sriov *iov = dev->sriov;
int bars = 0;
int bus;
if (!nr_virtfn)
return 0;
if (iov->num_VFs)
return -EINVAL;
pci_read_config_word(dev, iov->pos + PCI_SRIOV_INITIAL_VF, &initial);
if (initial > iov->total_VFs ||
(!(iov->cap & PCI_SRIOV_CAP_VFM) && (initial != iov->total_VFs)))
return -EIO;
if (nr_virtfn < 0 || nr_virtfn > iov->total_VFs ||
(!(iov->cap & PCI_SRIOV_CAP_VFM) && (nr_virtfn > initial)))
return -EINVAL;
nres = 0;
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
bars |= (1 << (i + PCI_IOV_RESOURCES));
res = &dev->resource[i + PCI_IOV_RESOURCES];
if (res->parent)
nres++;
}
if (nres != iov->nres) {
pci_err(dev, "not enough MMIO resources for SR-IOV\n");
return -ENOMEM;
}
bus = pci_iov_virtfn_bus(dev, nr_virtfn - 1);
if (bus > dev->bus->busn_res.end) {
pci_err(dev, "can't enable %d VFs (bus %02x out of range of %pR)\n",
nr_virtfn, bus, &dev->bus->busn_res);
return -ENOMEM;
}
if (pci_enable_resources(dev, bars)) {
pci_err(dev, "SR-IOV: IOV BARS not allocated\n");
return -ENOMEM;
}
if (iov->link != dev->devfn) {
pdev = pci_get_slot(dev->bus, iov->link);
if (!pdev)
return -ENODEV;
if (!pdev->is_physfn) {
pci_dev_put(pdev);
return -ENOSYS;
}
rc = sysfs_create_link(&dev->dev.kobj,
&pdev->dev.kobj, "dep_link");
pci_dev_put(pdev);
if (rc)
return rc;
}
iov->initial_VFs = initial;
if (nr_virtfn < initial)
initial = nr_virtfn;
rc = pcibios_sriov_enable(dev, initial);
if (rc) {
pci_err(dev, "failure %d from pcibios_sriov_enable()\n", rc);
goto err_pcibios;
}
pci_iov_set_numvfs(dev, nr_virtfn);
iov->ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE;
pci_cfg_access_lock(dev);
pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
msleep(100);
pci_cfg_access_unlock(dev);
rc = sriov_add_vfs(dev, initial);
if (rc)
goto err_pcibios;
kobject_uevent(&dev->dev.kobj, KOBJ_CHANGE);
iov->num_VFs = nr_virtfn;
return 0;
err_pcibios:
iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
pci_cfg_access_lock(dev);
pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
ssleep(1);
pci_cfg_access_unlock(dev);
pcibios_sriov_disable(dev);
if (iov->link != dev->devfn)
sysfs_remove_link(&dev->dev.kobj, "dep_link");
pci_iov_set_numvfs(dev, 0);
return rc;
}
static void sriov_del_vfs(struct pci_dev *dev)
{
struct pci_sriov *iov = dev->sriov;
int i;
for (i = 0; i < iov->num_VFs; i++)
pci_iov_remove_virtfn(dev, i);
}
static void sriov_disable(struct pci_dev *dev)
{
struct pci_sriov *iov = dev->sriov;
if (!iov->num_VFs)
return;
sriov_del_vfs(dev);
iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
pci_cfg_access_lock(dev);
pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
ssleep(1);
pci_cfg_access_unlock(dev);
pcibios_sriov_disable(dev);
if (iov->link != dev->devfn)
sysfs_remove_link(&dev->dev.kobj, "dep_link");
iov->num_VFs = 0;
pci_iov_set_numvfs(dev, 0);
}
static int sriov_init(struct pci_dev *dev, int pos)
{
int i, bar64;
int rc;
int nres;
u32 pgsz;
u16 ctrl, total;
struct pci_sriov *iov;
struct resource *res;
const char *res_name;
struct pci_dev *pdev;
pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &ctrl);
if (ctrl & PCI_SRIOV_CTRL_VFE) {
pci_write_config_word(dev, pos + PCI_SRIOV_CTRL, 0);
ssleep(1);
}
ctrl = 0;
list_for_each_entry(pdev, &dev->bus->devices, bus_list)
if (pdev->is_physfn)
goto found;
pdev = NULL;
if (pci_ari_enabled(dev->bus))
ctrl |= PCI_SRIOV_CTRL_ARI;
found:
pci_write_config_word(dev, pos + PCI_SRIOV_CTRL, ctrl);
pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &total);
if (!total)
return 0;
pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &pgsz);
i = PAGE_SHIFT > 12 ? PAGE_SHIFT - 12 : 0;
pgsz &= ~((1 << i) - 1);
if (!pgsz)
return -EIO;
pgsz &= ~(pgsz - 1);
pci_write_config_dword(dev, pos + PCI_SRIOV_SYS_PGSIZE, pgsz);
iov = kzalloc(sizeof(*iov), GFP_KERNEL);
if (!iov)
return -ENOMEM;
nres = 0;
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
res = &dev->resource[i + PCI_IOV_RESOURCES];
res_name = pci_resource_name(dev, i + PCI_IOV_RESOURCES);
/*
* If it is already FIXED, don't change it, something
* (perhaps EA or header fixups) wants it this way.
*/
if (res->flags & IORESOURCE_PCI_FIXED)
bar64 = (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
else
bar64 = __pci_read_base(dev, pci_bar_unknown, res,
pos + PCI_SRIOV_BAR + i * 4);
if (!res->flags)
continue;
if (resource_size(res) & (PAGE_SIZE - 1)) {
rc = -EIO;
goto failed;
}
iov->barsz[i] = resource_size(res);
res->end = res->start + resource_size(res) * total - 1;
pci_info(dev, "%s %pR: contains BAR %d for %d VFs\n",
res_name, res, i, total);
i += bar64;
nres++;
}
iov->pos = pos;
iov->nres = nres;
iov->ctrl = ctrl;
iov->total_VFs = total;
iov->driver_max_VFs = total;
pci_read_config_word(dev, pos + PCI_SRIOV_VF_DID, &iov->vf_device);
iov->pgsz = pgsz;
iov->self = dev;
iov->drivers_autoprobe = true;
pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END)
iov->link = PCI_DEVFN(PCI_SLOT(dev->devfn), iov->link);
if (pdev)
iov->dev = pci_dev_get(pdev);
else
iov->dev = dev;
dev->sriov = iov;
dev->is_physfn = 1;
rc = compute_max_vf_buses(dev);
if (rc)
goto fail_max_buses;
return 0;
fail_max_buses:
dev->sriov = NULL;
dev->is_physfn = 0;
failed:
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
res = &dev->resource[i + PCI_IOV_RESOURCES];
res->flags = 0;
}
kfree(iov);
return rc;
}
static void sriov_release(struct pci_dev *dev)
{
BUG_ON(dev->sriov->num_VFs);
if (dev != dev->sriov->dev)
pci_dev_put(dev->sriov->dev);
kfree(dev->sriov);
dev->sriov = NULL;
}
static void sriov_restore_state(struct pci_dev *dev)
{
int i;
u16 ctrl;
struct pci_sriov *iov = dev->sriov;
pci_read_config_word(dev, iov->pos + PCI_SRIOV_CTRL, &ctrl);
if (ctrl & PCI_SRIOV_CTRL_VFE)
return;
PCI: Restore ARI Capable Hierarchy before setting numVFs In the restore path, we previously read PCI_SRIOV_VF_OFFSET and PCI_SRIOV_VF_STRIDE before restoring PCI_SRIOV_CTRL_ARI: pci_restore_state pci_restore_iov_state sriov_restore_state pci_iov_set_numvfs pci_read_config_word(... PCI_SRIOV_VF_OFFSET, &iov->offset) pci_read_config_word(... PCI_SRIOV_VF_STRIDE, &iov->stride) pci_write_config_word(... PCI_SRIOV_CTRL, iov->ctrl) But per SR-IOV r1.1, sec 3.3.3.5, the device can use PCI_SRIOV_CTRL_ARI to determine PCI_SRIOV_VF_OFFSET and PCI_SRIOV_VF_STRIDE. Therefore, this path, which is used for suspend/resume and AER recovery, can corrupt iov->offset and iov->stride. Since the iov state is associated with the device, not the driver, if we reload the driver, it will use the the corrupted data, which may cause crashes like this: kernel BUG at drivers/pci/iov.c:157! RIP: 0010:pci_iov_add_virtfn+0x2eb/0x350 Call Trace: pci_enable_sriov+0x353/0x440 ixgbe_pci_sriov_configure+0xd5/0x1f0 [ixgbe] sriov_numvfs_store+0xf7/0x170 dev_attr_store+0x18/0x30 sysfs_kf_write+0x37/0x40 kernfs_fop_write+0x120/0x1b0 vfs_write+0xb5/0x1a0 SyS_write+0x55/0xc0 Restore PCI_SRIOV_CTRL_ARI before calling pci_iov_set_numvfs(), then restore the rest of PCI_SRIOV_CTRL (which may set PCI_SRIOV_CTRL_VFE) afterwards. Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com> [bhelgaas: changelog, add comment, also clear ARI if necessary] Signed-off-by: Bjorn Helgaas <bhelgaas@google.com> Acked-by: Alexander Duyck <alexander.h.duyck@intel.com> CC: Emil Tantilov <emil.s.tantilov@intel.com>
2017-10-04 15:52:58 +00:00
/*
* Restore PCI_SRIOV_CTRL_ARI before pci_iov_set_numvfs() because
* it reads offset & stride, which depend on PCI_SRIOV_CTRL_ARI.
*/
ctrl &= ~PCI_SRIOV_CTRL_ARI;
ctrl |= iov->ctrl & PCI_SRIOV_CTRL_ARI;
pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, ctrl);
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++)
pci_update_resource(dev, i + PCI_IOV_RESOURCES);
pci_write_config_dword(dev, iov->pos + PCI_SRIOV_SYS_PGSIZE, iov->pgsz);
pci_iov_set_numvfs(dev, iov->num_VFs);
pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
if (iov->ctrl & PCI_SRIOV_CTRL_VFE)
msleep(100);
}
/**
* pci_iov_init - initialize the IOV capability
* @dev: the PCI device
*
* Returns 0 on success, or negative on failure.
*/
int pci_iov_init(struct pci_dev *dev)
{
int pos;
if (!pci_is_pcie(dev))
return -ENODEV;
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
if (pos)
return sriov_init(dev, pos);
return -ENODEV;
}
/**
* pci_iov_release - release resources used by the IOV capability
* @dev: the PCI device
*/
void pci_iov_release(struct pci_dev *dev)
{
if (dev->is_physfn)
sriov_release(dev);
}
/**
* pci_iov_remove - clean up SR-IOV state after PF driver is detached
* @dev: the PCI device
*/
void pci_iov_remove(struct pci_dev *dev)
{
struct pci_sriov *iov = dev->sriov;
if (!dev->is_physfn)
return;
iov->driver_max_VFs = iov->total_VFs;
if (iov->num_VFs)
pci_warn(dev, "driver left SR-IOV enabled after remove\n");
}
/**
* pci_iov_update_resource - update a VF BAR
* @dev: the PCI device
* @resno: the resource number
*
* Update a VF BAR in the SR-IOV capability of a PF.
*/
void pci_iov_update_resource(struct pci_dev *dev, int resno)
{
struct pci_sriov *iov = dev->is_physfn ? dev->sriov : NULL;
struct resource *res = dev->resource + resno;
int vf_bar = resno - PCI_IOV_RESOURCES;
struct pci_bus_region region;
u16 cmd;
u32 new;
int reg;
/*
* The generic pci_restore_bars() path calls this for all devices,
* including VFs and non-SR-IOV devices. If this is not a PF, we
* have nothing to do.
*/
if (!iov)
return;
pci_read_config_word(dev, iov->pos + PCI_SRIOV_CTRL, &cmd);
if ((cmd & PCI_SRIOV_CTRL_VFE) && (cmd & PCI_SRIOV_CTRL_MSE)) {
dev_WARN(&dev->dev, "can't update enabled VF BAR%d %pR\n",
vf_bar, res);
return;
}
/*
* Ignore unimplemented BARs, unused resource slots for 64-bit
* BARs, and non-movable resources, e.g., those described via
* Enhanced Allocation.
*/
if (!res->flags)
return;
if (res->flags & IORESOURCE_UNSET)
return;
if (res->flags & IORESOURCE_PCI_FIXED)
return;
pcibios_resource_to_bus(dev->bus, &region, res);
new = region.start;
new |= res->flags & ~PCI_BASE_ADDRESS_MEM_MASK;
reg = iov->pos + PCI_SRIOV_BAR + 4 * vf_bar;
pci_write_config_dword(dev, reg, new);
if (res->flags & IORESOURCE_MEM_64) {
new = region.start >> 16 >> 16;
pci_write_config_dword(dev, reg + 4, new);
}
}
resource_size_t __weak pcibios_iov_resource_alignment(struct pci_dev *dev,
int resno)
{
return pci_iov_resource_size(dev, resno);
}
PCI SR-IOV: correct broken resource alignment calculations An SR-IOV capable device includes an SR-IOV PCIe capability which describes the Virtual Function (VF) BAR requirements. A typical SR-IOV device can support multiple VFs whose BARs must be in a contiguous region, effectively an array of VF BARs. The BAR reports the size requirement for a single VF. We calculate the full range needed by simply multiplying the VF BAR size with the number of possible VFs and create a resource spanning the full range. This all seems sane enough except it artificially inflates the alignment requirement for the VF BAR. The VF BAR need only be aligned to the size of a single BAR not the contiguous range of VF BARs. This can cause us to fail to allocate resources for the BAR despite the fact that we actually have enough space. This patch adds a thin PCI specific layer over the generic resource_alignment() function which is aware of the special nature of VF BARs and does sorting and allocation based on the smaller alignment requirement. I recognize that while resource_alignment is generic, it's basically a PCI helper. An alternative to this patch is to add PCI VF BAR specific information to struct resource. I opted for the extra layer rather than adding such PCI specific information to struct resource. This does have the slight downside that we don't cache the BAR size and re-read for each alignment query (happens a small handful of times during boot for each VF BAR). Signed-off-by: Chris Wright <chrisw@sous-sol.org> Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Matthew Wilcox <matthew@wil.cx> Cc: Yu Zhao <yu.zhao@intel.com> Cc: stable@kernel.org Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
2009-08-28 20:00:06 +00:00
/**
* pci_sriov_resource_alignment - get resource alignment for VF BAR
* @dev: the PCI device
* @resno: the resource number
*
* Returns the alignment of the VF BAR found in the SR-IOV capability.
* This is not the same as the resource size which is defined as
* the VF BAR size multiplied by the number of VFs. The alignment
* is just the VF BAR size.
*/
resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno)
PCI SR-IOV: correct broken resource alignment calculations An SR-IOV capable device includes an SR-IOV PCIe capability which describes the Virtual Function (VF) BAR requirements. A typical SR-IOV device can support multiple VFs whose BARs must be in a contiguous region, effectively an array of VF BARs. The BAR reports the size requirement for a single VF. We calculate the full range needed by simply multiplying the VF BAR size with the number of possible VFs and create a resource spanning the full range. This all seems sane enough except it artificially inflates the alignment requirement for the VF BAR. The VF BAR need only be aligned to the size of a single BAR not the contiguous range of VF BARs. This can cause us to fail to allocate resources for the BAR despite the fact that we actually have enough space. This patch adds a thin PCI specific layer over the generic resource_alignment() function which is aware of the special nature of VF BARs and does sorting and allocation based on the smaller alignment requirement. I recognize that while resource_alignment is generic, it's basically a PCI helper. An alternative to this patch is to add PCI VF BAR specific information to struct resource. I opted for the extra layer rather than adding such PCI specific information to struct resource. This does have the slight downside that we don't cache the BAR size and re-read for each alignment query (happens a small handful of times during boot for each VF BAR). Signed-off-by: Chris Wright <chrisw@sous-sol.org> Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Matthew Wilcox <matthew@wil.cx> Cc: Yu Zhao <yu.zhao@intel.com> Cc: stable@kernel.org Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
2009-08-28 20:00:06 +00:00
{
return pcibios_iov_resource_alignment(dev, resno);
PCI SR-IOV: correct broken resource alignment calculations An SR-IOV capable device includes an SR-IOV PCIe capability which describes the Virtual Function (VF) BAR requirements. A typical SR-IOV device can support multiple VFs whose BARs must be in a contiguous region, effectively an array of VF BARs. The BAR reports the size requirement for a single VF. We calculate the full range needed by simply multiplying the VF BAR size with the number of possible VFs and create a resource spanning the full range. This all seems sane enough except it artificially inflates the alignment requirement for the VF BAR. The VF BAR need only be aligned to the size of a single BAR not the contiguous range of VF BARs. This can cause us to fail to allocate resources for the BAR despite the fact that we actually have enough space. This patch adds a thin PCI specific layer over the generic resource_alignment() function which is aware of the special nature of VF BARs and does sorting and allocation based on the smaller alignment requirement. I recognize that while resource_alignment is generic, it's basically a PCI helper. An alternative to this patch is to add PCI VF BAR specific information to struct resource. I opted for the extra layer rather than adding such PCI specific information to struct resource. This does have the slight downside that we don't cache the BAR size and re-read for each alignment query (happens a small handful of times during boot for each VF BAR). Signed-off-by: Chris Wright <chrisw@sous-sol.org> Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Matthew Wilcox <matthew@wil.cx> Cc: Yu Zhao <yu.zhao@intel.com> Cc: stable@kernel.org Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
2009-08-28 20:00:06 +00:00
}
/**
* pci_restore_iov_state - restore the state of the IOV capability
* @dev: the PCI device
*/
void pci_restore_iov_state(struct pci_dev *dev)
{
if (dev->is_physfn)
sriov_restore_state(dev);
}
/**
* pci_vf_drivers_autoprobe - set PF property drivers_autoprobe for VFs
* @dev: the PCI device
* @auto_probe: set VF drivers auto probe flag
*/
void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool auto_probe)
{
if (dev->is_physfn)
dev->sriov->drivers_autoprobe = auto_probe;
}
/**
* pci_iov_bus_range - find bus range used by Virtual Function
* @bus: the PCI bus
*
* Returns max number of buses (exclude current one) used by Virtual
* Functions.
*/
int pci_iov_bus_range(struct pci_bus *bus)
{
int max = 0;
struct pci_dev *dev;
list_for_each_entry(dev, &bus->devices, bus_list) {
if (!dev->is_physfn)
continue;
if (dev->sriov->max_VF_buses > max)
max = dev->sriov->max_VF_buses;
}
return max ? max - bus->number : 0;
}
/**
* pci_enable_sriov - enable the SR-IOV capability
* @dev: the PCI device
* @nr_virtfn: number of virtual functions to enable
*
* Returns 0 on success, or negative on failure.
*/
int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
{
might_sleep();
if (!dev->is_physfn)
return -ENOSYS;
return sriov_enable(dev, nr_virtfn);
}
EXPORT_SYMBOL_GPL(pci_enable_sriov);
/**
* pci_disable_sriov - disable the SR-IOV capability
* @dev: the PCI device
*/
void pci_disable_sriov(struct pci_dev *dev)
{
might_sleep();
if (!dev->is_physfn)
return;
sriov_disable(dev);
}
EXPORT_SYMBOL_GPL(pci_disable_sriov);
/**
* pci_num_vf - return number of VFs associated with a PF device_release_driver
* @dev: the PCI device
*
* Returns number of VFs, or 0 if SR-IOV is not enabled.
*/
int pci_num_vf(struct pci_dev *dev)
{
if (!dev->is_physfn)
return 0;
return dev->sriov->num_VFs;
}
EXPORT_SYMBOL_GPL(pci_num_vf);
/**
* pci_vfs_assigned - returns number of VFs are assigned to a guest
* @dev: the PCI device
*
* Returns number of VFs belonging to this device that are assigned to a guest.
* If device is not a physical function returns 0.
*/
int pci_vfs_assigned(struct pci_dev *dev)
{
struct pci_dev *vfdev;
unsigned int vfs_assigned = 0;
unsigned short dev_id;
/* only search if we are a PF */
if (!dev->is_physfn)
return 0;
/*
* determine the device ID for the VFs, the vendor ID will be the
* same as the PF so there is no need to check for that one
*/
dev_id = dev->sriov->vf_device;
/* loop through all the VFs to see if we own any that are assigned */
vfdev = pci_get_device(dev->vendor, dev_id, NULL);
while (vfdev) {
/*
* It is considered assigned if it is a virtual function with
* our dev as the physical function and the assigned bit is set
*/
if (vfdev->is_virtfn && (vfdev->physfn == dev) &&
pci_is_dev_assigned(vfdev))
vfs_assigned++;
vfdev = pci_get_device(dev->vendor, dev_id, vfdev);
}
return vfs_assigned;
}
EXPORT_SYMBOL_GPL(pci_vfs_assigned);
/**
* pci_sriov_set_totalvfs -- reduce the TotalVFs available
* @dev: the PCI PF device
* @numvfs: number that should be used for TotalVFs supported
*
* Should be called from PF driver's probe routine with
* device's mutex held.
*
* Returns 0 if PF is an SRIOV-capable device and
* value of numvfs valid. If not a PF return -ENOSYS;
* if numvfs is invalid return -EINVAL;
* if VFs already enabled, return -EBUSY.
*/
int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs)
{
if (!dev->is_physfn)
return -ENOSYS;
if (numvfs > dev->sriov->total_VFs)
return -EINVAL;
/* Shouldn't change if VFs already enabled */
if (dev->sriov->ctrl & PCI_SRIOV_CTRL_VFE)
return -EBUSY;
dev->sriov->driver_max_VFs = numvfs;
return 0;
}
EXPORT_SYMBOL_GPL(pci_sriov_set_totalvfs);
/**
* pci_sriov_get_totalvfs -- get total VFs supported on this device
* @dev: the PCI PF device
*
* For a PCIe device with SRIOV support, return the PCIe
* SRIOV capability value of TotalVFs or the value of driver_max_VFs
* if the driver reduced it. Otherwise 0.
*/
int pci_sriov_get_totalvfs(struct pci_dev *dev)
{
if (!dev->is_physfn)
return 0;
return dev->sriov->driver_max_VFs;
}
EXPORT_SYMBOL_GPL(pci_sriov_get_totalvfs);
/**
* pci_sriov_configure_simple - helper to configure SR-IOV
* @dev: the PCI device
* @nr_virtfn: number of virtual functions to enable, 0 to disable
*
* Enable or disable SR-IOV for devices that don't require any PF setup
* before enabling SR-IOV. Return value is negative on error, or number of
* VFs allocated on success.
*/
int pci_sriov_configure_simple(struct pci_dev *dev, int nr_virtfn)
{
int rc;
might_sleep();
if (!dev->is_physfn)
return -ENODEV;
if (pci_vfs_assigned(dev)) {
pci_warn(dev, "Cannot modify SR-IOV while VFs are assigned\n");
return -EPERM;
}
if (nr_virtfn == 0) {
sriov_disable(dev);
return 0;
}
rc = sriov_enable(dev, nr_virtfn);
if (rc < 0)
return rc;
return nr_virtfn;
}
EXPORT_SYMBOL_GPL(pci_sriov_configure_simple);