forked from Minki/linux
Merge branch 'nfp-get_phys_port_name-for-representors-and-SR-IOV-reorder'
Jakub Kicinski says: ==================== nfp: get_phys_port_name for representors and SR-IOV reorder This series starts by making the error message if FW cannot be located easier to understand. Then I move some functions from PCI probe files into library code (nfpcore) where they belong, and remove one function which is never used. Next few patches equip representors with nfp_port structure and make their NDOs fully shared (not defined in apps), thanks to which we can easily determine which netdevs are NFP's by comparing the NDO pointers. 10th patch makes use of the shared NDOs and nfp_ports to deliver netdev-type independent .ndo_get_phys_port_name() implementation. Patches 11 and 12 reorder the nfp_app SR-IOV callbacks with enabling SR-IOV VFs. Unfortunately due to how PCI subsystem works we can't guarantee being able to disable SR-IOV at exit or that it will be disabled when we first probe... We must therefore make sure FW is able to deal with being loaded while SR-IOV is already on. Patch 13 fixes potential deadlock when enabling SR-IOV happens at the same time as port state refresh. Note that this can't happen at this point, since Flower doesn't refresh ports... but lockdep doesn't know about such details and we will have to deal with this sooner or later anyway. Last but not least a new Kconfig is added to make sure those who don't care about flower offloads have a way of not including the code in their kernels. Thanks to nfp_app separation this costs us a single ifdef and excluding flower files from the build. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
2ee87db3a2
@ -25,6 +25,16 @@ config NFP
|
||||
cards working as a advanced Ethernet NIC. It works with both
|
||||
SR-IOV physical and virtual functions.
|
||||
|
||||
config NFP_APP_FLOWER
|
||||
bool "NFP4000/NFP6000 TC Flower offload support"
|
||||
depends on NFP
|
||||
depends on NET_SWITCHDEV
|
||||
---help---
|
||||
Enable driver support for TC Flower offload on NFP4000 and NFP6000.
|
||||
Say Y, if you are planning to make use of TC Flower offload
|
||||
either directly, with Open vSwitch, or any other way. Note that
|
||||
TC Flower offload requires specific FW to work.
|
||||
|
||||
config NFP_DEBUG
|
||||
bool "Debug support for Netronome(R) NFP4000/NFP6000 NIC drivers"
|
||||
depends on NFP
|
||||
|
@ -27,10 +27,14 @@ nfp-objs := \
|
||||
nfp_port.o \
|
||||
bpf/main.o \
|
||||
bpf/offload.o \
|
||||
flower/cmsg.o \
|
||||
flower/main.o \
|
||||
nic/main.o
|
||||
|
||||
ifeq ($(CONFIG_NFP_APP_FLOWER),y)
|
||||
nfp-objs += \
|
||||
flower/cmsg.o \
|
||||
flower/main.o
|
||||
endif
|
||||
|
||||
ifeq ($(CONFIG_BPF_SYSCALL),y)
|
||||
nfp-objs += \
|
||||
bpf/verifier.o \
|
||||
|
@ -79,9 +79,8 @@ nfp_flower_cmsg_alloc(struct nfp_app *app, unsigned int size,
|
||||
return skb;
|
||||
}
|
||||
|
||||
int nfp_flower_cmsg_portmod(struct net_device *netdev, bool carrier_ok)
|
||||
int nfp_flower_cmsg_portmod(struct nfp_repr *repr, bool carrier_ok)
|
||||
{
|
||||
struct nfp_repr *repr = netdev_priv(netdev);
|
||||
struct nfp_flower_cmsg_portmod *msg;
|
||||
struct sk_buff *skb;
|
||||
|
||||
@ -94,7 +93,7 @@ int nfp_flower_cmsg_portmod(struct net_device *netdev, bool carrier_ok)
|
||||
msg->portnum = cpu_to_be32(repr->dst->u.port_info.port_id);
|
||||
msg->reserved = 0;
|
||||
msg->info = carrier_ok;
|
||||
msg->mtu = cpu_to_be16(netdev->mtu);
|
||||
msg->mtu = cpu_to_be16(repr->netdev->mtu);
|
||||
|
||||
nfp_ctrl_tx(repr->app->ctrl, skb);
|
||||
|
||||
|
@ -110,7 +110,7 @@ nfp_flower_cmsg_pcie_port(u8 nfp_pcie, enum nfp_flower_cmsg_port_vnic_type type,
|
||||
NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT);
|
||||
}
|
||||
|
||||
int nfp_flower_cmsg_portmod(struct net_device *netdev, bool carrier_ok);
|
||||
int nfp_flower_cmsg_portmod(struct nfp_repr *repr, bool carrier_ok);
|
||||
void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb);
|
||||
|
||||
#endif
|
||||
|
@ -104,51 +104,30 @@ nfp_flower_repr_get(struct nfp_app *app, u32 port_id)
|
||||
return reprs->reprs[port];
|
||||
}
|
||||
|
||||
static void
|
||||
nfp_flower_repr_netdev_get_stats64(struct net_device *netdev,
|
||||
struct rtnl_link_stats64 *stats)
|
||||
{
|
||||
struct nfp_repr *repr = netdev_priv(netdev);
|
||||
enum nfp_repr_type type;
|
||||
u32 port_id;
|
||||
u8 port = 0;
|
||||
|
||||
port_id = repr->dst->u.port_info.port_id;
|
||||
type = nfp_flower_repr_get_type_and_port(repr->app, port_id, &port);
|
||||
nfp_repr_get_stats64(repr->app, type, port, stats);
|
||||
}
|
||||
|
||||
static int nfp_flower_repr_netdev_open(struct net_device *netdev)
|
||||
static int
|
||||
nfp_flower_repr_netdev_open(struct nfp_app *app, struct nfp_repr *repr)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = nfp_flower_cmsg_portmod(netdev, true);
|
||||
err = nfp_flower_cmsg_portmod(repr, true);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
netif_carrier_on(netdev);
|
||||
netif_tx_wake_all_queues(netdev);
|
||||
netif_carrier_on(repr->netdev);
|
||||
netif_tx_wake_all_queues(repr->netdev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nfp_flower_repr_netdev_stop(struct net_device *netdev)
|
||||
static int
|
||||
nfp_flower_repr_netdev_stop(struct nfp_app *app, struct nfp_repr *repr)
|
||||
{
|
||||
netif_carrier_off(netdev);
|
||||
netif_tx_disable(netdev);
|
||||
netif_carrier_off(repr->netdev);
|
||||
netif_tx_disable(repr->netdev);
|
||||
|
||||
return nfp_flower_cmsg_portmod(netdev, false);
|
||||
return nfp_flower_cmsg_portmod(repr, false);
|
||||
}
|
||||
|
||||
static const struct net_device_ops nfp_flower_repr_netdev_ops = {
|
||||
.ndo_open = nfp_flower_repr_netdev_open,
|
||||
.ndo_stop = nfp_flower_repr_netdev_stop,
|
||||
.ndo_start_xmit = nfp_repr_xmit,
|
||||
.ndo_get_stats64 = nfp_flower_repr_netdev_get_stats64,
|
||||
.ndo_has_offload_stats = nfp_repr_has_offload_stats,
|
||||
.ndo_get_offload_stats = nfp_repr_get_offload_stats,
|
||||
};
|
||||
|
||||
static void nfp_flower_sriov_disable(struct nfp_app *app)
|
||||
{
|
||||
nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_VF);
|
||||
@ -162,14 +141,19 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
|
||||
u8 nfp_pcie = nfp_cppcore_pcie_unit(app->pf->cpp);
|
||||
struct nfp_flower_priv *priv = app->priv;
|
||||
struct nfp_reprs *reprs, *old_reprs;
|
||||
enum nfp_port_type port_type;
|
||||
const u8 queue = 0;
|
||||
int i, err;
|
||||
|
||||
port_type = repr_type == NFP_REPR_TYPE_PF ? NFP_PORT_PF_PORT :
|
||||
NFP_PORT_VF_PORT;
|
||||
|
||||
reprs = nfp_reprs_alloc(cnt);
|
||||
if (!reprs)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < cnt; i++) {
|
||||
struct nfp_port *port;
|
||||
u32 port_id;
|
||||
|
||||
reprs->reprs[i] = nfp_repr_alloc(app);
|
||||
@ -178,15 +162,24 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
|
||||
goto err_reprs_clean;
|
||||
}
|
||||
|
||||
port = nfp_port_alloc(app, port_type, reprs->reprs[i]);
|
||||
if (repr_type == NFP_REPR_TYPE_PF) {
|
||||
port->pf_id = i;
|
||||
} else {
|
||||
port->pf_id = 0; /* For now we only support 1 PF */
|
||||
port->vf_id = i;
|
||||
}
|
||||
|
||||
eth_hw_addr_random(reprs->reprs[i]);
|
||||
|
||||
port_id = nfp_flower_cmsg_pcie_port(nfp_pcie, vnic_type,
|
||||
i, queue);
|
||||
err = nfp_repr_init(app, reprs->reprs[i],
|
||||
&nfp_flower_repr_netdev_ops,
|
||||
port_id, NULL, priv->nn->dp.netdev);
|
||||
if (err)
|
||||
port_id, port, priv->nn->dp.netdev);
|
||||
if (err) {
|
||||
nfp_port_free(port);
|
||||
goto err_reprs_clean;
|
||||
}
|
||||
|
||||
nfp_info(app->cpp, "%s%d Representor(%s) created\n",
|
||||
repr_type == NFP_REPR_TYPE_PF ? "PF" : "VF", i,
|
||||
@ -260,7 +253,6 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
|
||||
|
||||
cmsg_port_id = nfp_flower_cmsg_phys_port(phys_port);
|
||||
err = nfp_repr_init(app, reprs->reprs[phys_port],
|
||||
&nfp_flower_repr_netdev_ops,
|
||||
cmsg_port_id, port, priv->nn->dp.netdev);
|
||||
if (err) {
|
||||
nfp_port_free(port);
|
||||
@ -296,26 +288,16 @@ static int nfp_flower_start(struct nfp_app *app)
|
||||
NFP_REPR_TYPE_PF, 1);
|
||||
}
|
||||
|
||||
static void nfp_flower_vnic_clean(struct nfp_app *app, struct nfp_net *nn)
|
||||
{
|
||||
kfree(app->priv);
|
||||
app->priv = NULL;
|
||||
}
|
||||
|
||||
static int nfp_flower_vnic_init(struct nfp_app *app, struct nfp_net *nn,
|
||||
unsigned int id)
|
||||
{
|
||||
struct nfp_flower_priv *priv;
|
||||
struct nfp_flower_priv *priv = app->priv;
|
||||
|
||||
if (id > 0) {
|
||||
nfp_warn(app->cpp, "FlowerNIC doesn't support more than one data vNIC\n");
|
||||
goto err_invalid_port;
|
||||
}
|
||||
|
||||
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
||||
if (!priv)
|
||||
return -ENOMEM;
|
||||
app->priv = priv;
|
||||
priv->nn = nn;
|
||||
|
||||
eth_hw_addr_random(nn->dp.netdev);
|
||||
@ -347,9 +329,19 @@ static int nfp_flower_init(struct nfp_app *app)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
app->priv = kzalloc(sizeof(struct nfp_flower_priv), GFP_KERNEL);
|
||||
if (!app->priv)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void nfp_flower_clean(struct nfp_app *app)
|
||||
{
|
||||
kfree(app->priv);
|
||||
app->priv = NULL;
|
||||
}
|
||||
|
||||
const struct nfp_app_type app_flower = {
|
||||
.id = NFP_APP_FLOWER_NIC,
|
||||
.name = "flower",
|
||||
@ -358,9 +350,12 @@ const struct nfp_app_type app_flower = {
|
||||
.extra_cap = nfp_flower_extra_cap,
|
||||
|
||||
.init = nfp_flower_init,
|
||||
.clean = nfp_flower_clean,
|
||||
|
||||
.vnic_init = nfp_flower_vnic_init,
|
||||
.vnic_clean = nfp_flower_vnic_clean,
|
||||
|
||||
.repr_open = nfp_flower_repr_netdev_open,
|
||||
.repr_stop = nfp_flower_repr_netdev_stop,
|
||||
|
||||
.start = nfp_flower_start,
|
||||
.stop = nfp_flower_stop,
|
||||
|
@ -43,7 +43,9 @@
|
||||
static const struct nfp_app_type *apps[] = {
|
||||
&app_nic,
|
||||
&app_bpf,
|
||||
#ifdef CONFIG_NFP_APP_FLOWER
|
||||
&app_flower,
|
||||
#endif
|
||||
};
|
||||
|
||||
const char *nfp_app_mip_name(struct nfp_app *app)
|
||||
|
@ -47,6 +47,7 @@ struct sk_buff;
|
||||
struct nfp_app;
|
||||
struct nfp_cpp;
|
||||
struct nfp_pf;
|
||||
struct nfp_repr;
|
||||
struct nfp_net;
|
||||
|
||||
enum nfp_app_id {
|
||||
@ -66,10 +67,13 @@ extern const struct nfp_app_type app_flower;
|
||||
* @ctrl_has_meta: control messages have prepend of type:5/port:CTRL
|
||||
*
|
||||
* Callbacks
|
||||
* @init: perform basic app checks
|
||||
* @init: perform basic app checks and init
|
||||
* @clean: clean app state
|
||||
* @extra_cap: extra capabilities string
|
||||
* @vnic_init: init vNICs (assign port types, etc.)
|
||||
* @vnic_clean: clean up app's vNIC state
|
||||
* @repr_open: representor netdev open callback
|
||||
* @repr_stop: representor netdev stop callback
|
||||
* @start: start application logic
|
||||
* @stop: stop application logic
|
||||
* @ctrl_msg_rx: control message handler
|
||||
@ -88,6 +92,7 @@ struct nfp_app_type {
|
||||
bool ctrl_has_meta;
|
||||
|
||||
int (*init)(struct nfp_app *app);
|
||||
void (*clean)(struct nfp_app *app);
|
||||
|
||||
const char *(*extra_cap)(struct nfp_app *app, struct nfp_net *nn);
|
||||
|
||||
@ -95,6 +100,9 @@ struct nfp_app_type {
|
||||
unsigned int id);
|
||||
void (*vnic_clean)(struct nfp_app *app, struct nfp_net *nn);
|
||||
|
||||
int (*repr_open)(struct nfp_app *app, struct nfp_repr *repr);
|
||||
int (*repr_stop)(struct nfp_app *app, struct nfp_repr *repr);
|
||||
|
||||
int (*start)(struct nfp_app *app);
|
||||
void (*stop)(struct nfp_app *app);
|
||||
|
||||
@ -144,6 +152,12 @@ static inline int nfp_app_init(struct nfp_app *app)
|
||||
return app->type->init(app);
|
||||
}
|
||||
|
||||
static inline void nfp_app_clean(struct nfp_app *app)
|
||||
{
|
||||
if (app->type->clean)
|
||||
app->type->clean(app);
|
||||
}
|
||||
|
||||
static inline int nfp_app_vnic_init(struct nfp_app *app, struct nfp_net *nn,
|
||||
unsigned int id)
|
||||
{
|
||||
@ -156,6 +170,20 @@ static inline void nfp_app_vnic_clean(struct nfp_app *app, struct nfp_net *nn)
|
||||
app->type->vnic_clean(app, nn);
|
||||
}
|
||||
|
||||
static inline int nfp_app_repr_open(struct nfp_app *app, struct nfp_repr *repr)
|
||||
{
|
||||
if (!app->type->repr_open)
|
||||
return -EINVAL;
|
||||
return app->type->repr_open(app, repr);
|
||||
}
|
||||
|
||||
static inline int nfp_app_repr_stop(struct nfp_app *app, struct nfp_repr *repr)
|
||||
{
|
||||
if (!app->type->repr_stop)
|
||||
return -EINVAL;
|
||||
return app->type->repr_stop(app, repr);
|
||||
}
|
||||
|
||||
static inline int nfp_app_start(struct nfp_app *app, struct nfp_net *ctrl)
|
||||
{
|
||||
app->ctrl = ctrl;
|
||||
|
@ -107,17 +107,18 @@ static int nfp_pcie_sriov_enable(struct pci_dev *pdev, int num_vfs)
|
||||
goto err_unlock;
|
||||
}
|
||||
|
||||
err = nfp_app_sriov_enable(pf->app, num_vfs);
|
||||
err = pci_enable_sriov(pdev, num_vfs);
|
||||
if (err) {
|
||||
dev_warn(&pdev->dev, "App specific PCI sriov configuration failed: %d\n",
|
||||
err);
|
||||
dev_warn(&pdev->dev, "Failed to enable PCI SR-IOV: %d\n", err);
|
||||
goto err_unlock;
|
||||
}
|
||||
|
||||
err = pci_enable_sriov(pdev, num_vfs);
|
||||
err = nfp_app_sriov_enable(pf->app, num_vfs);
|
||||
if (err) {
|
||||
dev_warn(&pdev->dev, "Failed to enable PCI sriov: %d\n", err);
|
||||
goto err_app_sriov_disable;
|
||||
dev_warn(&pdev->dev,
|
||||
"App specific PCI SR-IOV configuration failed: %d\n",
|
||||
err);
|
||||
goto err_sriov_disable;
|
||||
}
|
||||
|
||||
pf->num_vfs = num_vfs;
|
||||
@ -127,8 +128,8 @@ static int nfp_pcie_sriov_enable(struct pci_dev *pdev, int num_vfs)
|
||||
mutex_unlock(&pf->lock);
|
||||
return num_vfs;
|
||||
|
||||
err_app_sriov_disable:
|
||||
nfp_app_sriov_disable(pf->app);
|
||||
err_sriov_disable:
|
||||
pci_disable_sriov(pdev);
|
||||
err_unlock:
|
||||
mutex_unlock(&pf->lock);
|
||||
return err;
|
||||
@ -136,17 +137,20 @@ err_unlock:
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __nfp_pcie_sriov_disable(struct pci_dev *pdev)
|
||||
static int nfp_pcie_sriov_disable(struct pci_dev *pdev)
|
||||
{
|
||||
#ifdef CONFIG_PCI_IOV
|
||||
struct nfp_pf *pf = pci_get_drvdata(pdev);
|
||||
|
||||
mutex_lock(&pf->lock);
|
||||
|
||||
/* If the VFs are assigned we cannot shut down SR-IOV without
|
||||
* causing issues, so just leave the hardware available but
|
||||
* disabled
|
||||
*/
|
||||
if (pci_vfs_assigned(pdev)) {
|
||||
dev_warn(&pdev->dev, "Disabling while VFs assigned - VFs will not be deallocated\n");
|
||||
mutex_unlock(&pf->lock);
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
@ -156,22 +160,12 @@ static int __nfp_pcie_sriov_disable(struct pci_dev *pdev)
|
||||
|
||||
pci_disable_sriov(pdev);
|
||||
dev_dbg(&pdev->dev, "Removed VFs.\n");
|
||||
|
||||
mutex_unlock(&pf->lock);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nfp_pcie_sriov_disable(struct pci_dev *pdev)
|
||||
{
|
||||
struct nfp_pf *pf = pci_get_drvdata(pdev);
|
||||
int err;
|
||||
|
||||
mutex_lock(&pf->lock);
|
||||
err = __nfp_pcie_sriov_disable(pdev);
|
||||
mutex_unlock(&pf->lock);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int nfp_pcie_sriov_configure(struct pci_dev *pdev, int num_vfs)
|
||||
{
|
||||
if (num_vfs == 0)
|
||||
@ -382,6 +376,12 @@ static int nfp_pci_probe(struct pci_dev *pdev,
|
||||
pci_set_drvdata(pdev, pf);
|
||||
pf->pdev = pdev;
|
||||
|
||||
pf->wq = alloc_workqueue("nfp-%s", 0, 2, pci_name(pdev));
|
||||
if (!pf->wq) {
|
||||
err = -ENOMEM;
|
||||
goto err_pci_priv_unset;
|
||||
}
|
||||
|
||||
pf->cpp = nfp_cpp_from_nfp6000_pcie(pdev);
|
||||
if (IS_ERR_OR_NULL(pf->cpp)) {
|
||||
err = PTR_ERR(pf->cpp);
|
||||
@ -414,6 +414,14 @@ static int nfp_pci_probe(struct pci_dev *pdev,
|
||||
if (err)
|
||||
goto err_fw_unload;
|
||||
|
||||
pf->num_vfs = pci_num_vf(pdev);
|
||||
if (pf->num_vfs > pf->limit_vfs) {
|
||||
dev_err(&pdev->dev,
|
||||
"Error: %d VFs already enabled, but loaded FW can only support %d\n",
|
||||
pf->num_vfs, pf->limit_vfs);
|
||||
goto err_fw_unload;
|
||||
}
|
||||
|
||||
err = nfp_net_pci_probe(pf);
|
||||
if (err)
|
||||
goto err_sriov_unlimit;
|
||||
@ -443,6 +451,8 @@ err_hwinfo_free:
|
||||
kfree(pf->hwinfo);
|
||||
nfp_cpp_free(pf->cpp);
|
||||
err_disable_msix:
|
||||
destroy_workqueue(pf->wq);
|
||||
err_pci_priv_unset:
|
||||
pci_set_drvdata(pdev, NULL);
|
||||
mutex_destroy(&pf->lock);
|
||||
devlink_free(devlink);
|
||||
@ -463,11 +473,11 @@ static void nfp_pci_remove(struct pci_dev *pdev)
|
||||
|
||||
devlink = priv_to_devlink(pf);
|
||||
|
||||
nfp_net_pci_remove(pf);
|
||||
|
||||
nfp_pcie_sriov_disable(pdev);
|
||||
pci_sriov_set_totalvfs(pf->pdev, 0);
|
||||
|
||||
nfp_net_pci_remove(pf);
|
||||
|
||||
devlink_unregister(devlink);
|
||||
|
||||
kfree(pf->rtbl);
|
||||
@ -475,6 +485,7 @@ static void nfp_pci_remove(struct pci_dev *pdev)
|
||||
if (pf->fw_loaded)
|
||||
nfp_fw_unload(pf);
|
||||
|
||||
destroy_workqueue(pf->wq);
|
||||
pci_set_drvdata(pdev, NULL);
|
||||
kfree(pf->hwinfo);
|
||||
nfp_cpp_free(pf->cpp);
|
||||
|
@ -89,6 +89,7 @@ struct nfp_rtsym_table;
|
||||
* @num_vnics: Number of vNICs spawned
|
||||
* @vnics: Linked list of vNIC structures (struct nfp_net)
|
||||
* @ports: Linked list of port structures (struct nfp_port)
|
||||
* @wq: Workqueue for running works which need to grab @lock
|
||||
* @port_refresh_work: Work entry for taking netdevs out
|
||||
* @lock: Protects all fields which may change after probe
|
||||
*/
|
||||
@ -131,7 +132,10 @@ struct nfp_pf {
|
||||
|
||||
struct list_head vnics;
|
||||
struct list_head ports;
|
||||
|
||||
struct workqueue_struct *wq;
|
||||
struct work_struct port_refresh_work;
|
||||
|
||||
struct mutex lock;
|
||||
};
|
||||
|
||||
|
@ -80,58 +80,6 @@ static int nfp_is_ready(struct nfp_pf *pf)
|
||||
return state == 15;
|
||||
}
|
||||
|
||||
/**
|
||||
* nfp_net_map_area() - Help function to map an area
|
||||
* @cpp: NFP CPP handler
|
||||
* @name: Name for the area
|
||||
* @target: CPP target
|
||||
* @addr: CPP address
|
||||
* @size: Size of the area
|
||||
* @area: Area handle (returned).
|
||||
*
|
||||
* This function is primarily to simplify the code in the main probe
|
||||
* function. To undo the effect of this functions call
|
||||
* @nfp_cpp_area_release_free(*area);
|
||||
*
|
||||
* Return: Pointer to memory mapped area or ERR_PTR
|
||||
*/
|
||||
static u8 __iomem *nfp_net_map_area(struct nfp_cpp *cpp,
|
||||
const char *name, int isl, int target,
|
||||
unsigned long long addr, unsigned long size,
|
||||
struct nfp_cpp_area **area)
|
||||
{
|
||||
u8 __iomem *res;
|
||||
u32 dest;
|
||||
int err;
|
||||
|
||||
dest = NFP_CPP_ISLAND_ID(target, NFP_CPP_ACTION_RW, 0, isl);
|
||||
|
||||
*area = nfp_cpp_area_alloc_with_name(cpp, dest, name, addr, size);
|
||||
if (!*area) {
|
||||
err = -EIO;
|
||||
goto err_area;
|
||||
}
|
||||
|
||||
err = nfp_cpp_area_acquire(*area);
|
||||
if (err < 0)
|
||||
goto err_acquire;
|
||||
|
||||
res = nfp_cpp_area_iomem(*area);
|
||||
if (!res) {
|
||||
err = -EIO;
|
||||
goto err_map;
|
||||
}
|
||||
|
||||
return res;
|
||||
|
||||
err_map:
|
||||
nfp_cpp_area_release(*area);
|
||||
err_acquire:
|
||||
nfp_cpp_area_free(*area);
|
||||
err_area:
|
||||
return (u8 __iomem *)ERR_PTR(err);
|
||||
}
|
||||
|
||||
/**
|
||||
* nfp_net_get_mac_addr() - Get the MAC address.
|
||||
* @pf: NFP PF handle
|
||||
@ -226,31 +174,12 @@ static u8 __iomem *
|
||||
nfp_net_pf_map_rtsym(struct nfp_pf *pf, const char *name, const char *sym_fmt,
|
||||
unsigned int min_size, struct nfp_cpp_area **area)
|
||||
{
|
||||
const struct nfp_rtsym *sym;
|
||||
char pf_symbol[256];
|
||||
u8 __iomem *mem;
|
||||
|
||||
snprintf(pf_symbol, sizeof(pf_symbol), sym_fmt,
|
||||
nfp_cppcore_pcie_unit(pf->cpp));
|
||||
|
||||
sym = nfp_rtsym_lookup(pf->rtbl, pf_symbol);
|
||||
if (!sym)
|
||||
return (u8 __iomem *)ERR_PTR(-ENOENT);
|
||||
|
||||
if (sym->size < min_size) {
|
||||
nfp_err(pf->cpp, "PF symbol %s too small\n", pf_symbol);
|
||||
return (u8 __iomem *)ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
mem = nfp_net_map_area(pf->cpp, name, sym->domain, sym->target,
|
||||
sym->addr, sym->size, area);
|
||||
if (IS_ERR(mem)) {
|
||||
nfp_err(pf->cpp, "Failed to map PF symbol %s: %ld\n",
|
||||
pf_symbol, PTR_ERR(mem));
|
||||
return mem;
|
||||
}
|
||||
|
||||
return mem;
|
||||
return nfp_rtsym_map(pf->rtbl, pf_symbol, name, min_size, area);
|
||||
}
|
||||
|
||||
static void nfp_net_pf_free_vnic(struct nfp_pf *pf, struct nfp_net *nn)
|
||||
@ -485,7 +414,7 @@ nfp_net_pf_app_init(struct nfp_pf *pf, u8 __iomem *qc_bar, unsigned int stride)
|
||||
if (IS_ERR(ctrl_bar)) {
|
||||
nfp_err(pf->cpp, "Failed to find data vNIC memory symbol\n");
|
||||
err = PTR_ERR(ctrl_bar);
|
||||
goto err_free;
|
||||
goto err_app_clean;
|
||||
}
|
||||
|
||||
pf->ctrl_vnic = nfp_net_pf_alloc_vnic(pf, false, ctrl_bar, qc_bar,
|
||||
@ -499,8 +428,11 @@ nfp_net_pf_app_init(struct nfp_pf *pf, u8 __iomem *qc_bar, unsigned int stride)
|
||||
|
||||
err_unmap:
|
||||
nfp_cpp_area_release_free(pf->ctrl_vnic_bar);
|
||||
err_app_clean:
|
||||
nfp_app_clean(pf->app);
|
||||
err_free:
|
||||
nfp_app_free(pf->app);
|
||||
pf->app = NULL;
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -510,6 +442,7 @@ static void nfp_net_pf_app_clean(struct nfp_pf *pf)
|
||||
nfp_net_pf_free_vnic(pf, pf->ctrl_vnic);
|
||||
nfp_cpp_area_release_free(pf->ctrl_vnic_bar);
|
||||
}
|
||||
nfp_app_clean(pf->app);
|
||||
nfp_app_free(pf->app);
|
||||
pf->app = NULL;
|
||||
}
|
||||
@ -555,8 +488,16 @@ static int nfp_net_pf_app_start(struct nfp_pf *pf)
|
||||
if (err)
|
||||
goto err_ctrl_stop;
|
||||
|
||||
if (pf->num_vfs) {
|
||||
err = nfp_app_sriov_enable(pf->app, pf->num_vfs);
|
||||
if (err)
|
||||
goto err_app_stop;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_app_stop:
|
||||
nfp_app_stop(pf->app);
|
||||
err_ctrl_stop:
|
||||
nfp_net_pf_app_stop_ctrl(pf);
|
||||
return err;
|
||||
@ -564,6 +505,8 @@ err_ctrl_stop:
|
||||
|
||||
static void nfp_net_pf_app_stop(struct nfp_pf *pf)
|
||||
{
|
||||
if (pf->num_vfs)
|
||||
nfp_app_sriov_disable(pf->app);
|
||||
nfp_app_stop(pf->app);
|
||||
nfp_net_pf_app_stop_ctrl(pf);
|
||||
}
|
||||
@ -580,26 +523,22 @@ static void nfp_net_pci_unmap_mem(struct nfp_pf *pf)
|
||||
|
||||
static int nfp_net_pci_map_mem(struct nfp_pf *pf)
|
||||
{
|
||||
u32 ctrl_bar_sz;
|
||||
u8 __iomem *mem;
|
||||
u32 min_size;
|
||||
int err;
|
||||
|
||||
ctrl_bar_sz = pf->max_data_vnics * NFP_PF_CSR_SLICE_SIZE;
|
||||
min_size = pf->max_data_vnics * NFP_PF_CSR_SLICE_SIZE;
|
||||
mem = nfp_net_pf_map_rtsym(pf, "net.ctrl", "_pf%d_net_bar0",
|
||||
ctrl_bar_sz, &pf->data_vnic_bar);
|
||||
min_size, &pf->data_vnic_bar);
|
||||
if (IS_ERR(mem)) {
|
||||
nfp_err(pf->cpp, "Failed to find data vNIC memory symbol\n");
|
||||
err = PTR_ERR(mem);
|
||||
if (!pf->fw_loaded && err == -ENOENT)
|
||||
err = -EPROBE_DEFER;
|
||||
return err;
|
||||
return PTR_ERR(mem);
|
||||
}
|
||||
|
||||
pf->mac_stats_mem = nfp_net_pf_map_rtsym(pf, "net.macstats",
|
||||
"_mac_stats",
|
||||
NFP_MAC_STATS_SIZE *
|
||||
(pf->eth_tbl->max_index + 1),
|
||||
&pf->mac_stats_bar);
|
||||
min_size = NFP_MAC_STATS_SIZE * (pf->eth_tbl->max_index + 1);
|
||||
pf->mac_stats_mem = nfp_rtsym_map(pf->rtbl, "_mac_stats",
|
||||
"net.macstats", min_size,
|
||||
&pf->mac_stats_bar);
|
||||
if (IS_ERR(pf->mac_stats_mem)) {
|
||||
if (PTR_ERR(pf->mac_stats_mem) != -ENOENT) {
|
||||
err = PTR_ERR(pf->mac_stats_mem);
|
||||
@ -620,7 +559,7 @@ static int nfp_net_pci_map_mem(struct nfp_pf *pf)
|
||||
pf->vf_cfg_mem = NULL;
|
||||
}
|
||||
|
||||
mem = nfp_net_map_area(pf->cpp, "net.qc", 0, 0,
|
||||
mem = nfp_cpp_map_area(pf->cpp, "net.qc", 0, 0,
|
||||
NFP_PCIE_QUEUE(0), NFP_QCP_QUEUE_AREA_SZ,
|
||||
&pf->qc_area);
|
||||
if (IS_ERR(mem)) {
|
||||
@ -743,7 +682,7 @@ void nfp_net_refresh_port_table(struct nfp_port *port)
|
||||
|
||||
set_bit(NFP_PORT_CHANGED, &port->flags);
|
||||
|
||||
schedule_work(&pf->port_refresh_work);
|
||||
queue_work(pf->wq, &pf->port_refresh_work);
|
||||
}
|
||||
|
||||
int nfp_net_refresh_eth_port(struct nfp_port *port)
|
||||
@ -786,6 +725,12 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!pf->rtbl) {
|
||||
nfp_err(pf->cpp, "No %s, giving up.\n",
|
||||
pf->fw_loaded ? "symbol table" : "firmware found");
|
||||
return -EPROBE_DEFER;
|
||||
}
|
||||
|
||||
mutex_lock(&pf->lock);
|
||||
pf->max_data_vnics = nfp_net_pf_get_num_ports(pf);
|
||||
if ((int)pf->max_data_vnics < 0) {
|
||||
|
@ -37,6 +37,7 @@
|
||||
#include <net/dst_metadata.h>
|
||||
|
||||
#include "nfpcore/nfp_cpp.h"
|
||||
#include "nfpcore/nfp_nsp.h"
|
||||
#include "nfp_app.h"
|
||||
#include "nfp_main.h"
|
||||
#include "nfp_net_ctrl.h"
|
||||
@ -135,25 +136,34 @@ nfp_repr_pf_get_stats64(const struct nfp_app *app, u8 pf,
|
||||
stats->rx_dropped = readq(mem + NFP_NET_CFG_STATS_TX_DISCARDS);
|
||||
}
|
||||
|
||||
void
|
||||
nfp_repr_get_stats64(const struct nfp_app *app, enum nfp_repr_type type,
|
||||
u8 port, struct rtnl_link_stats64 *stats)
|
||||
static void
|
||||
nfp_repr_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
|
||||
{
|
||||
switch (type) {
|
||||
case NFP_REPR_TYPE_PHYS_PORT:
|
||||
nfp_repr_phy_port_get_stats64(app, port, stats);
|
||||
struct nfp_repr *repr = netdev_priv(netdev);
|
||||
struct nfp_eth_table_port *eth_port;
|
||||
struct nfp_app *app = repr->app;
|
||||
|
||||
if (WARN_ON(!repr->port))
|
||||
return;
|
||||
|
||||
switch (repr->port->type) {
|
||||
case NFP_PORT_PHYS_PORT:
|
||||
eth_port = __nfp_port_get_eth_port(repr->port);
|
||||
if (!eth_port)
|
||||
break;
|
||||
nfp_repr_phy_port_get_stats64(app, eth_port->index, stats);
|
||||
break;
|
||||
case NFP_REPR_TYPE_PF:
|
||||
nfp_repr_pf_get_stats64(app, port, stats);
|
||||
case NFP_PORT_PF_PORT:
|
||||
nfp_repr_pf_get_stats64(app, repr->port->pf_id, stats);
|
||||
break;
|
||||
case NFP_REPR_TYPE_VF:
|
||||
nfp_repr_vf_get_stats64(app, port, stats);
|
||||
case NFP_PORT_VF_PORT:
|
||||
nfp_repr_vf_get_stats64(app, repr->port->vf_id, stats);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
bool
|
||||
static bool
|
||||
nfp_repr_has_offload_stats(const struct net_device *dev, int attr_id)
|
||||
{
|
||||
switch (attr_id) {
|
||||
@ -196,8 +206,9 @@ nfp_repr_get_host_stats64(const struct net_device *netdev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int nfp_repr_get_offload_stats(int attr_id, const struct net_device *dev,
|
||||
void *stats)
|
||||
static int
|
||||
nfp_repr_get_offload_stats(int attr_id, const struct net_device *dev,
|
||||
void *stats)
|
||||
{
|
||||
switch (attr_id) {
|
||||
case IFLA_OFFLOAD_XSTATS_CPU_HIT:
|
||||
@ -207,7 +218,7 @@ int nfp_repr_get_offload_stats(int attr_id, const struct net_device *dev,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
netdev_tx_t nfp_repr_xmit(struct sk_buff *skb, struct net_device *netdev)
|
||||
static netdev_tx_t nfp_repr_xmit(struct sk_buff *skb, struct net_device *netdev)
|
||||
{
|
||||
struct nfp_repr *repr = netdev_priv(netdev);
|
||||
unsigned int len = skb->len;
|
||||
@ -224,6 +235,30 @@ netdev_tx_t nfp_repr_xmit(struct sk_buff *skb, struct net_device *netdev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int nfp_repr_stop(struct net_device *netdev)
|
||||
{
|
||||
struct nfp_repr *repr = netdev_priv(netdev);
|
||||
|
||||
return nfp_app_repr_stop(repr->app, repr);
|
||||
}
|
||||
|
||||
static int nfp_repr_open(struct net_device *netdev)
|
||||
{
|
||||
struct nfp_repr *repr = netdev_priv(netdev);
|
||||
|
||||
return nfp_app_repr_open(repr->app, repr);
|
||||
}
|
||||
|
||||
const struct net_device_ops nfp_repr_netdev_ops = {
|
||||
.ndo_open = nfp_repr_open,
|
||||
.ndo_stop = nfp_repr_stop,
|
||||
.ndo_start_xmit = nfp_repr_xmit,
|
||||
.ndo_get_stats64 = nfp_repr_get_stats64,
|
||||
.ndo_has_offload_stats = nfp_repr_has_offload_stats,
|
||||
.ndo_get_offload_stats = nfp_repr_get_offload_stats,
|
||||
.ndo_get_phys_port_name = nfp_port_get_phys_port_name,
|
||||
};
|
||||
|
||||
static void nfp_repr_clean(struct nfp_repr *repr)
|
||||
{
|
||||
unregister_netdev(repr->netdev);
|
||||
@ -248,8 +283,8 @@ static void nfp_repr_set_lockdep_class(struct net_device *dev)
|
||||
}
|
||||
|
||||
int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
|
||||
const struct net_device_ops *netdev_ops, u32 cmsg_port_id,
|
||||
struct nfp_port *port, struct net_device *pf_netdev)
|
||||
u32 cmsg_port_id, struct nfp_port *port,
|
||||
struct net_device *pf_netdev)
|
||||
{
|
||||
struct nfp_repr *repr = netdev_priv(netdev);
|
||||
int err;
|
||||
@ -263,7 +298,7 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
|
||||
repr->dst->u.port_info.port_id = cmsg_port_id;
|
||||
repr->dst->u.port_info.lower_dev = pf_netdev;
|
||||
|
||||
netdev->netdev_ops = netdev_ops;
|
||||
netdev->netdev_ops = &nfp_repr_netdev_ops;
|
||||
|
||||
err = register_netdev(netdev);
|
||||
if (err)
|
||||
|
@ -97,16 +97,15 @@ enum nfp_repr_type {
|
||||
};
|
||||
#define NFP_REPR_TYPE_MAX (__NFP_REPR_TYPE_MAX - 1)
|
||||
|
||||
extern const struct net_device_ops nfp_repr_netdev_ops;
|
||||
|
||||
static inline bool nfp_netdev_is_nfp_repr(struct net_device *netdev)
|
||||
{
|
||||
return netdev->netdev_ops == &nfp_repr_netdev_ops;
|
||||
}
|
||||
|
||||
void nfp_repr_inc_rx_stats(struct net_device *netdev, unsigned int len);
|
||||
void
|
||||
nfp_repr_get_stats64(const struct nfp_app *app, enum nfp_repr_type type,
|
||||
u8 port, struct rtnl_link_stats64 *stats);
|
||||
bool nfp_repr_has_offload_stats(const struct net_device *dev, int attr_id);
|
||||
int nfp_repr_get_offload_stats(int attr_id, const struct net_device *dev,
|
||||
void *stats);
|
||||
netdev_tx_t nfp_repr_xmit(struct sk_buff *skb, struct net_device *netdev);
|
||||
int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
|
||||
const struct net_device_ops *netdev_ops,
|
||||
u32 cmsg_port_id, struct nfp_port *port,
|
||||
struct net_device *pf_netdev);
|
||||
struct net_device *nfp_repr_alloc(struct nfp_app *app);
|
||||
|
@ -42,13 +42,21 @@
|
||||
|
||||
struct nfp_port *nfp_port_from_netdev(struct net_device *netdev)
|
||||
{
|
||||
struct nfp_net *nn;
|
||||
if (nfp_netdev_is_nfp_net(netdev)) {
|
||||
struct nfp_net *nn = netdev_priv(netdev);
|
||||
|
||||
if (WARN_ON(!nfp_netdev_is_nfp_net(netdev)))
|
||||
return NULL;
|
||||
nn = netdev_priv(netdev);
|
||||
return nn->port;
|
||||
}
|
||||
|
||||
return nn->port;
|
||||
if (nfp_netdev_is_nfp_repr(netdev)) {
|
||||
struct nfp_repr *repr = netdev_priv(netdev);
|
||||
|
||||
return repr->port;
|
||||
}
|
||||
|
||||
WARN(1, "Unknown netdev type for nfp_port\n");
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct nfp_port *
|
||||
@ -98,15 +106,31 @@ nfp_port_get_phys_port_name(struct net_device *netdev, char *name, size_t len)
|
||||
int n;
|
||||
|
||||
port = nfp_port_from_netdev(netdev);
|
||||
eth_port = __nfp_port_get_eth_port(port);
|
||||
if (!eth_port)
|
||||
if (!port)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (!eth_port->is_split)
|
||||
n = snprintf(name, len, "p%d", eth_port->label_port);
|
||||
else
|
||||
n = snprintf(name, len, "p%ds%d", eth_port->label_port,
|
||||
eth_port->label_subport);
|
||||
switch (port->type) {
|
||||
case NFP_PORT_PHYS_PORT:
|
||||
eth_port = __nfp_port_get_eth_port(port);
|
||||
if (!eth_port)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (!eth_port->is_split)
|
||||
n = snprintf(name, len, "p%d", eth_port->label_port);
|
||||
else
|
||||
n = snprintf(name, len, "p%ds%d", eth_port->label_port,
|
||||
eth_port->label_subport);
|
||||
break;
|
||||
case NFP_PORT_PF_PORT:
|
||||
n = snprintf(name, len, "pf%d", port->pf_id);
|
||||
break;
|
||||
case NFP_PORT_VF_PORT:
|
||||
n = snprintf(name, len, "pf%dvf%d", port->pf_id, port->vf_id);
|
||||
break;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (n >= len)
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -47,10 +47,14 @@ struct nfp_port;
|
||||
* state when port disappears because of FW fault or config
|
||||
* change
|
||||
* @NFP_PORT_PHYS_PORT: external NIC port
|
||||
* @NFP_PORT_PF_PORT: logical port of PCI PF
|
||||
* @NFP_PORT_VF_PORT: logical port of PCI VF
|
||||
*/
|
||||
enum nfp_port_type {
|
||||
NFP_PORT_INVALID,
|
||||
NFP_PORT_PHYS_PORT,
|
||||
NFP_PORT_PF_PORT,
|
||||
NFP_PORT_VF_PORT,
|
||||
};
|
||||
|
||||
/**
|
||||
@ -72,6 +76,8 @@ enum nfp_port_flags {
|
||||
* @dl_port: devlink port structure
|
||||
* @eth_id: for %NFP_PORT_PHYS_PORT port ID in NFP enumeration scheme
|
||||
* @eth_port: for %NFP_PORT_PHYS_PORT translated ETH Table port entry
|
||||
* @pf_id: for %NFP_PORT_PF_PORT, %NFP_PORT_VF_PORT ID of the PCI PF (0-3)
|
||||
* @vf_id: for %NFP_PORT_VF_PORT ID of the PCI VF within @pf_id
|
||||
* @port_list: entry on pf's list of ports
|
||||
*/
|
||||
struct nfp_port {
|
||||
@ -84,8 +90,18 @@ struct nfp_port {
|
||||
|
||||
struct devlink_port dl_port;
|
||||
|
||||
unsigned int eth_id;
|
||||
struct nfp_eth_table_port *eth_port;
|
||||
union {
|
||||
/* NFP_PORT_PHYS_PORT */
|
||||
struct {
|
||||
unsigned int eth_id;
|
||||
struct nfp_eth_table_port *eth_port;
|
||||
};
|
||||
/* NFP_PORT_PF_PORT, NFP_PORT_VF_PORT */
|
||||
struct {
|
||||
unsigned int pf_id;
|
||||
unsigned int vf_id;
|
||||
};
|
||||
};
|
||||
|
||||
struct list_head port_list;
|
||||
};
|
||||
|
@ -230,6 +230,9 @@ struct nfp_cpp_area *nfp_cpp_area_alloc_with_name(struct nfp_cpp *cpp,
|
||||
struct nfp_cpp_area *nfp_cpp_area_alloc(struct nfp_cpp *cpp, u32 cpp_id,
|
||||
unsigned long long address,
|
||||
unsigned long size);
|
||||
struct nfp_cpp_area *
|
||||
nfp_cpp_area_alloc_acquire(struct nfp_cpp *cpp, const char *name, u32 cpp_id,
|
||||
unsigned long long address, unsigned long size);
|
||||
void nfp_cpp_area_free(struct nfp_cpp_area *area);
|
||||
int nfp_cpp_area_acquire(struct nfp_cpp_area *area);
|
||||
int nfp_cpp_area_acquire_nonblocking(struct nfp_cpp_area *area);
|
||||
@ -239,8 +242,6 @@ int nfp_cpp_area_read(struct nfp_cpp_area *area, unsigned long offset,
|
||||
void *buffer, size_t length);
|
||||
int nfp_cpp_area_write(struct nfp_cpp_area *area, unsigned long offset,
|
||||
const void *buffer, size_t length);
|
||||
int nfp_cpp_area_check_range(struct nfp_cpp_area *area,
|
||||
unsigned long long offset, unsigned long size);
|
||||
const char *nfp_cpp_area_name(struct nfp_cpp_area *cpp_area);
|
||||
void *nfp_cpp_area_priv(struct nfp_cpp_area *cpp_area);
|
||||
struct nfp_cpp *nfp_cpp_area_cpp(struct nfp_cpp_area *cpp_area);
|
||||
@ -278,6 +279,10 @@ int nfp_cpp_readq(struct nfp_cpp *cpp, u32 cpp_id,
|
||||
int nfp_cpp_writeq(struct nfp_cpp *cpp, u32 cpp_id,
|
||||
unsigned long long address, u64 value);
|
||||
|
||||
u8 __iomem *
|
||||
nfp_cpp_map_area(struct nfp_cpp *cpp, const char *name, int domain, int target,
|
||||
u64 addr, unsigned long size, struct nfp_cpp_area **area);
|
||||
|
||||
struct nfp_cpp_mutex;
|
||||
|
||||
int nfp_cpp_mutex_init(struct nfp_cpp *cpp, int target,
|
||||
|
@ -360,6 +360,41 @@ nfp_cpp_area_alloc(struct nfp_cpp *cpp, u32 dest,
|
||||
return nfp_cpp_area_alloc_with_name(cpp, dest, NULL, address, size);
|
||||
}
|
||||
|
||||
/**
|
||||
* nfp_cpp_area_alloc_acquire() - allocate a new CPP area and lock it down
|
||||
* @cpp: CPP handle
|
||||
* @name: Name of region
|
||||
* @dest: CPP id
|
||||
* @address: Start address on CPP target
|
||||
* @size: Size of area
|
||||
*
|
||||
* Allocate and initialize a CPP area structure, and lock it down so
|
||||
* that it can be accessed directly.
|
||||
*
|
||||
* NOTE: @address and @size must be 32-bit aligned values.
|
||||
*
|
||||
* NOTE: The area must also be 'released' when the structure is freed.
|
||||
*
|
||||
* Return: NFP CPP Area handle, or NULL
|
||||
*/
|
||||
struct nfp_cpp_area *
|
||||
nfp_cpp_area_alloc_acquire(struct nfp_cpp *cpp, const char *name, u32 dest,
|
||||
unsigned long long address, unsigned long size)
|
||||
{
|
||||
struct nfp_cpp_area *area;
|
||||
|
||||
area = nfp_cpp_area_alloc_with_name(cpp, dest, name, address, size);
|
||||
if (!area)
|
||||
return NULL;
|
||||
|
||||
if (nfp_cpp_area_acquire(area)) {
|
||||
nfp_cpp_area_free(area);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return area;
|
||||
}
|
||||
|
||||
/**
|
||||
* nfp_cpp_area_free() - free up the CPP area
|
||||
* @area: CPP area handle
|
||||
@ -535,27 +570,6 @@ int nfp_cpp_area_write(struct nfp_cpp_area *area,
|
||||
return area->cpp->op->area_write(area, kernel_vaddr, offset, length);
|
||||
}
|
||||
|
||||
/**
|
||||
* nfp_cpp_area_check_range() - check if address range fits in CPP area
|
||||
* @area: CPP area handle
|
||||
* @offset: offset into CPP target
|
||||
* @length: size of address range in bytes
|
||||
*
|
||||
* Check if address range fits within CPP area. Return 0 if area
|
||||
* fits or -EFAULT on error.
|
||||
*
|
||||
* Return: 0, or -ERRNO
|
||||
*/
|
||||
int nfp_cpp_area_check_range(struct nfp_cpp_area *area,
|
||||
unsigned long long offset, unsigned long length)
|
||||
{
|
||||
if (offset < area->offset ||
|
||||
offset + length > area->offset + area->size)
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* nfp_cpp_area_name() - return name of a CPP area
|
||||
* @cpp_area: CPP area handle
|
||||
|
@ -279,3 +279,43 @@ exit_release:
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* nfp_cpp_map_area() - Helper function to map an area
|
||||
* @cpp: NFP CPP handler
|
||||
* @name: Name for the area
|
||||
* @domain: CPP domain
|
||||
* @target: CPP target
|
||||
* @addr: CPP address
|
||||
* @size: Size of the area
|
||||
* @area: Area handle (output)
|
||||
*
|
||||
* Map an area of IOMEM access. To undo the effect of this function call
|
||||
* @nfp_cpp_area_release_free(*area).
|
||||
*
|
||||
* Return: Pointer to memory mapped area or ERR_PTR
|
||||
*/
|
||||
u8 __iomem *
|
||||
nfp_cpp_map_area(struct nfp_cpp *cpp, const char *name, int domain, int target,
|
||||
u64 addr, unsigned long size, struct nfp_cpp_area **area)
|
||||
{
|
||||
u8 __iomem *res;
|
||||
u32 dest;
|
||||
|
||||
dest = NFP_CPP_ISLAND_ID(target, NFP_CPP_ACTION_RW, 0, domain);
|
||||
|
||||
*area = nfp_cpp_area_alloc_acquire(cpp, name, dest, addr, size);
|
||||
if (!*area)
|
||||
goto err_eio;
|
||||
|
||||
res = nfp_cpp_area_iomem(*area);
|
||||
if (!res)
|
||||
goto err_release_free;
|
||||
|
||||
return res;
|
||||
|
||||
err_release_free:
|
||||
nfp_cpp_area_release_free(*area);
|
||||
err_eio:
|
||||
return (u8 __iomem *)ERR_PTR(-EIO);
|
||||
}
|
||||
|
@ -97,7 +97,11 @@ int nfp_rtsym_count(struct nfp_rtsym_table *rtbl);
|
||||
const struct nfp_rtsym *nfp_rtsym_get(struct nfp_rtsym_table *rtbl, int idx);
|
||||
const struct nfp_rtsym *
|
||||
nfp_rtsym_lookup(struct nfp_rtsym_table *rtbl, const char *name);
|
||||
|
||||
u64 nfp_rtsym_read_le(struct nfp_rtsym_table *rtbl, const char *name,
|
||||
int *error);
|
||||
u8 __iomem *
|
||||
nfp_rtsym_map(struct nfp_rtsym_table *rtbl, const char *name, const char *id,
|
||||
unsigned int min_size, struct nfp_cpp_area **area);
|
||||
|
||||
#endif /* NFP_NFFW_H */
|
||||
|
@ -289,3 +289,30 @@ exit:
|
||||
return ~0ULL;
|
||||
return val;
|
||||
}
|
||||
|
||||
u8 __iomem *
|
||||
nfp_rtsym_map(struct nfp_rtsym_table *rtbl, const char *name, const char *id,
|
||||
unsigned int min_size, struct nfp_cpp_area **area)
|
||||
{
|
||||
const struct nfp_rtsym *sym;
|
||||
u8 __iomem *mem;
|
||||
|
||||
sym = nfp_rtsym_lookup(rtbl, name);
|
||||
if (!sym)
|
||||
return (u8 __iomem *)ERR_PTR(-ENOENT);
|
||||
|
||||
if (sym->size < min_size) {
|
||||
nfp_err(rtbl->cpp, "Symbol %s too small\n", name);
|
||||
return (u8 __iomem *)ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
mem = nfp_cpp_map_area(rtbl->cpp, id, sym->domain, sym->target,
|
||||
sym->addr, sym->size, area);
|
||||
if (IS_ERR(mem)) {
|
||||
nfp_err(rtbl->cpp, "Failed to map symbol %s: %ld\n",
|
||||
name, PTR_ERR(mem));
|
||||
return mem;
|
||||
}
|
||||
|
||||
return mem;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user