forked from Minki/linux
Merge branch 'mlx4'
Amir Vadai says: ==================== net/mlx4: Mellanox driver update 08-12-2013 This patchset contains: 1. Support in ndo_get_phys_port_id added by Hadar. 2. Change the driver to use by default CQE/EQE size of 64 bytes done by Eyal. This doubles the packet-rate of the NIC. 3. Configure the XPS queue mapping on driver load - added by Ido. 4. Fixes for some small bugs done by Jenny and Matan Patchset was applied and tested against commit: "2372175 lib: hash: follow-up fixups for arch hash" Changes from V1: - Removed Patch 10 "net/mlx4_en: Fix Supported/Advertised link mode reported by ethtool". This patch is needed to be rewritten from scratch and I wouldn't like it to block the rest of the patches in this set. Also, fix to Kconfig suggested by Ben will be sent in next patchset. Changes from V0: - Found some issues in "Reuse memory in RX flow" patch from V0. Removing this patch from the patchset till analyzed and fixed. - Fix some coding style issues in patch 6 "Configure the XPS queue mapping on driver load" - Changed patch 9 "Add NAPI support for transmit side" to use NAPI_POLL_WEIGHT instead of MLX4_EN_TX_BUDGET ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
9ddffb8a11
@ -161,12 +161,16 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
|
||||
cq->mcq.comp = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq;
|
||||
cq->mcq.event = mlx4_en_cq_event;
|
||||
|
||||
if (!cq->is_tx) {
|
||||
if (cq->is_tx) {
|
||||
netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq,
|
||||
NAPI_POLL_WEIGHT);
|
||||
} else {
|
||||
netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64);
|
||||
napi_hash_add(&cq->napi);
|
||||
napi_enable(&cq->napi);
|
||||
}
|
||||
|
||||
napi_enable(&cq->napi);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -188,12 +192,12 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
|
||||
|
||||
void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
|
||||
{
|
||||
napi_disable(&cq->napi);
|
||||
if (!cq->is_tx) {
|
||||
napi_disable(&cq->napi);
|
||||
napi_hash_del(&cq->napi);
|
||||
synchronize_rcu();
|
||||
netif_napi_del(&cq->napi);
|
||||
}
|
||||
netif_napi_del(&cq->napi);
|
||||
|
||||
mlx4_cq_free(priv->mdev->dev, &cq->mcq);
|
||||
}
|
||||
|
@ -174,6 +174,9 @@ static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr,
|
||||
mlx4_err(mdev, "Internal error detected, restarting device\n");
|
||||
break;
|
||||
|
||||
case MLX4_DEV_EVENT_SLAVE_INIT:
|
||||
case MLX4_DEV_EVENT_SLAVE_SHUTDOWN:
|
||||
break;
|
||||
default:
|
||||
if (port < 1 || port > dev->caps.num_ports ||
|
||||
!mdev->pndev[port])
|
||||
|
@ -1910,8 +1910,10 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
|
||||
prof->tx_ring_size, i, TX, node))
|
||||
goto err;
|
||||
|
||||
if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], priv->base_tx_qpn + i,
|
||||
prof->tx_ring_size, TXBB_SIZE, node))
|
||||
if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i],
|
||||
priv->base_tx_qpn + i,
|
||||
prof->tx_ring_size, TXBB_SIZE,
|
||||
node, i))
|
||||
goto err;
|
||||
}
|
||||
|
||||
@ -2164,6 +2166,27 @@ static int mlx4_en_set_vf_link_state(struct net_device *dev, int vf, int link_st
|
||||
|
||||
return mlx4_set_vf_link_state(mdev->dev, en_priv->port, vf, link_state);
|
||||
}
|
||||
|
||||
#define PORT_ID_BYTE_LEN 8
|
||||
static int mlx4_en_get_phys_port_id(struct net_device *dev,
|
||||
struct netdev_phys_port_id *ppid)
|
||||
{
|
||||
struct mlx4_en_priv *priv = netdev_priv(dev);
|
||||
struct mlx4_dev *mdev = priv->mdev->dev;
|
||||
int i;
|
||||
u64 phys_port_id = mdev->caps.phys_port_id[priv->port];
|
||||
|
||||
if (!phys_port_id)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
ppid->id_len = sizeof(phys_port_id);
|
||||
for (i = PORT_ID_BYTE_LEN - 1; i >= 0; --i) {
|
||||
ppid->id[i] = phys_port_id & 0xff;
|
||||
phys_port_id >>= 8;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct net_device_ops mlx4_netdev_ops = {
|
||||
.ndo_open = mlx4_en_open,
|
||||
.ndo_stop = mlx4_en_close,
|
||||
@ -2189,6 +2212,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
|
||||
#ifdef CONFIG_NET_RX_BUSY_POLL
|
||||
.ndo_busy_poll = mlx4_en_low_latency_recv,
|
||||
#endif
|
||||
.ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
|
||||
};
|
||||
|
||||
static const struct net_device_ops mlx4_netdev_ops_master = {
|
||||
@ -2217,6 +2241,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
.ndo_rx_flow_steer = mlx4_en_filter_rfs,
|
||||
#endif
|
||||
.ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
|
||||
};
|
||||
|
||||
int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
|
||||
|
@ -55,7 +55,7 @@ MODULE_PARM_DESC(inline_thold, "threshold for using inline data");
|
||||
|
||||
int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
|
||||
struct mlx4_en_tx_ring **pring, int qpn, u32 size,
|
||||
u16 stride, int node)
|
||||
u16 stride, int node, int queue_index)
|
||||
{
|
||||
struct mlx4_en_dev *mdev = priv->mdev;
|
||||
struct mlx4_en_tx_ring *ring;
|
||||
@ -140,6 +140,10 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
|
||||
ring->bf_enabled = true;
|
||||
|
||||
ring->hwtstamp_tx_type = priv->hwtstamp_config.tx_type;
|
||||
ring->queue_index = queue_index;
|
||||
|
||||
if (queue_index < priv->num_tx_rings_p_up && cpu_online(queue_index))
|
||||
cpumask_set_cpu(queue_index, &ring->affinity_mask);
|
||||
|
||||
*pring = ring;
|
||||
return 0;
|
||||
@ -206,6 +210,9 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
|
||||
|
||||
err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context,
|
||||
&ring->qp, &ring->qp_state);
|
||||
if (!user_prio && cpu_online(ring->queue_index))
|
||||
netif_set_xps_queue(priv->dev, &ring->affinity_mask,
|
||||
ring->queue_index);
|
||||
|
||||
return err;
|
||||
}
|
||||
@ -317,7 +324,7 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
|
||||
}
|
||||
}
|
||||
}
|
||||
dev_kfree_skb_any(skb);
|
||||
dev_kfree_skb(skb);
|
||||
return tx_info->nr_txbb;
|
||||
}
|
||||
|
||||
@ -354,7 +361,9 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
|
||||
return cnt;
|
||||
}
|
||||
|
||||
static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
|
||||
static int mlx4_en_process_tx_cq(struct net_device *dev,
|
||||
struct mlx4_en_cq *cq,
|
||||
int budget)
|
||||
{
|
||||
struct mlx4_en_priv *priv = netdev_priv(dev);
|
||||
struct mlx4_cq *mcq = &cq->mcq;
|
||||
@ -372,9 +381,10 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
|
||||
u32 bytes = 0;
|
||||
int factor = priv->cqe_factor;
|
||||
u64 timestamp = 0;
|
||||
int done = 0;
|
||||
|
||||
if (!priv->port_up)
|
||||
return;
|
||||
return 0;
|
||||
|
||||
index = cons_index & size_mask;
|
||||
cqe = &buf[(index << factor) + factor];
|
||||
@ -383,7 +393,7 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
|
||||
|
||||
/* Process all completed CQEs */
|
||||
while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
|
||||
cons_index & size)) {
|
||||
cons_index & size) && (done < budget)) {
|
||||
/*
|
||||
* make sure we read the CQE after we read the
|
||||
* ownership bit
|
||||
@ -421,7 +431,7 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
|
||||
txbbs_stamp = txbbs_skipped;
|
||||
packets++;
|
||||
bytes += ring->tx_info[ring_index].nr_bytes;
|
||||
} while (ring_index != new_index);
|
||||
} while ((++done < budget) && (ring_index != new_index));
|
||||
|
||||
++cons_index;
|
||||
index = cons_index & size_mask;
|
||||
@ -447,6 +457,7 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
|
||||
netif_tx_wake_queue(ring->tx_queue);
|
||||
priv->port_stats.wake_queue++;
|
||||
}
|
||||
return done;
|
||||
}
|
||||
|
||||
void mlx4_en_tx_irq(struct mlx4_cq *mcq)
|
||||
@ -454,10 +465,31 @@ void mlx4_en_tx_irq(struct mlx4_cq *mcq)
|
||||
struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
|
||||
struct mlx4_en_priv *priv = netdev_priv(cq->dev);
|
||||
|
||||
mlx4_en_process_tx_cq(cq->dev, cq);
|
||||
mlx4_en_arm_cq(priv, cq);
|
||||
if (priv->port_up)
|
||||
napi_schedule(&cq->napi);
|
||||
else
|
||||
mlx4_en_arm_cq(priv, cq);
|
||||
}
|
||||
|
||||
/* TX CQ polling - called by NAPI */
|
||||
int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget)
|
||||
{
|
||||
struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
|
||||
struct net_device *dev = cq->dev;
|
||||
struct mlx4_en_priv *priv = netdev_priv(dev);
|
||||
int done;
|
||||
|
||||
done = mlx4_en_process_tx_cq(dev, cq, budget);
|
||||
|
||||
/* If we used up all the quota - we're probably not done yet... */
|
||||
if (done < budget) {
|
||||
/* Done for now */
|
||||
napi_complete(napi);
|
||||
mlx4_en_arm_cq(priv, cq);
|
||||
return done;
|
||||
}
|
||||
return budget;
|
||||
}
|
||||
|
||||
static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
|
||||
struct mlx4_en_tx_ring *ring,
|
||||
|
@ -207,25 +207,25 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
|
||||
|
||||
/* when opcode modifier = 1 */
|
||||
#define QUERY_FUNC_CAP_PHYS_PORT_OFFSET 0x3
|
||||
#define QUERY_FUNC_CAP_RDMA_PROPS_OFFSET 0x8
|
||||
#define QUERY_FUNC_CAP_ETH_PROPS_OFFSET 0xc
|
||||
#define QUERY_FUNC_CAP_FLAGS0_OFFSET 0x8
|
||||
#define QUERY_FUNC_CAP_FLAGS1_OFFSET 0xc
|
||||
|
||||
#define QUERY_FUNC_CAP_QP0_TUNNEL 0x10
|
||||
#define QUERY_FUNC_CAP_QP0_PROXY 0x14
|
||||
#define QUERY_FUNC_CAP_QP1_TUNNEL 0x18
|
||||
#define QUERY_FUNC_CAP_QP1_PROXY 0x1c
|
||||
#define QUERY_FUNC_CAP_PHYS_PORT_ID 0x28
|
||||
|
||||
#define QUERY_FUNC_CAP_ETH_PROPS_FORCE_MAC 0x40
|
||||
#define QUERY_FUNC_CAP_ETH_PROPS_FORCE_VLAN 0x80
|
||||
#define QUERY_FUNC_CAP_FLAGS1_FORCE_MAC 0x40
|
||||
#define QUERY_FUNC_CAP_FLAGS1_FORCE_VLAN 0x80
|
||||
#define QUERY_FUNC_CAP_FLAGS1_NIC_INFO 0x10
|
||||
|
||||
#define QUERY_FUNC_CAP_RDMA_PROPS_FORCE_PHY_WQE_GID 0x80
|
||||
#define QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID 0x80
|
||||
|
||||
if (vhcr->op_modifier == 1) {
|
||||
field = 0;
|
||||
/* ensure force vlan and force mac bits are not set */
|
||||
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_ETH_PROPS_OFFSET);
|
||||
/* ensure that phy_wqe_gid bit is not set */
|
||||
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_RDMA_PROPS_OFFSET);
|
||||
/* Set nic_info bit to mark new fields support */
|
||||
field = QUERY_FUNC_CAP_FLAGS1_NIC_INFO;
|
||||
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS1_OFFSET);
|
||||
|
||||
field = vhcr->in_modifier; /* phys-port = logical-port */
|
||||
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
|
||||
@ -243,6 +243,9 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
|
||||
size += 2;
|
||||
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP1_PROXY);
|
||||
|
||||
MLX4_PUT(outbox->buf, dev->caps.phys_port_id[vhcr->in_modifier],
|
||||
QUERY_FUNC_CAP_PHYS_PORT_ID);
|
||||
|
||||
} else if (vhcr->op_modifier == 0) {
|
||||
/* enable rdma and ethernet interfaces, and new quota locations */
|
||||
field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA |
|
||||
@ -391,22 +394,22 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
|
||||
goto out;
|
||||
}
|
||||
|
||||
MLX4_GET(func_cap->flags1, outbox, QUERY_FUNC_CAP_FLAGS1_OFFSET);
|
||||
if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_ETH) {
|
||||
MLX4_GET(field, outbox, QUERY_FUNC_CAP_ETH_PROPS_OFFSET);
|
||||
if (field & QUERY_FUNC_CAP_ETH_PROPS_FORCE_VLAN) {
|
||||
if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_OFFSET) {
|
||||
mlx4_err(dev, "VLAN is enforced on this port\n");
|
||||
err = -EPROTONOSUPPORT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (field & QUERY_FUNC_CAP_ETH_PROPS_FORCE_MAC) {
|
||||
if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_FORCE_MAC) {
|
||||
mlx4_err(dev, "Force mac is enabled on this port\n");
|
||||
err = -EPROTONOSUPPORT;
|
||||
goto out;
|
||||
}
|
||||
} else if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_IB) {
|
||||
MLX4_GET(field, outbox, QUERY_FUNC_CAP_RDMA_PROPS_OFFSET);
|
||||
if (field & QUERY_FUNC_CAP_RDMA_PROPS_FORCE_PHY_WQE_GID) {
|
||||
MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET);
|
||||
if (field & QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID) {
|
||||
mlx4_err(dev, "phy_wqe_gid is "
|
||||
"enforced on this ib port\n");
|
||||
err = -EPROTONOSUPPORT;
|
||||
@ -433,6 +436,10 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
|
||||
MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP1_PROXY);
|
||||
func_cap->qp1_proxy_qpn = size & 0xFFFFFF;
|
||||
|
||||
if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_NIC_INFO)
|
||||
MLX4_GET(func_cap->phys_port_id, outbox,
|
||||
QUERY_FUNC_CAP_PHYS_PORT_ID);
|
||||
|
||||
/* All other resources are allocated by the master, but we still report
|
||||
* 'num' and 'reserved' capabilities as follows:
|
||||
* - num remains the maximum resource index
|
||||
@ -1713,6 +1720,43 @@ int mlx4_NOP(struct mlx4_dev *dev)
|
||||
return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100, MLX4_CMD_NATIVE);
|
||||
}
|
||||
|
||||
int mlx4_get_phys_port_id(struct mlx4_dev *dev)
|
||||
{
|
||||
u8 port;
|
||||
u32 *outbox;
|
||||
struct mlx4_cmd_mailbox *mailbox;
|
||||
u32 in_mod;
|
||||
u32 guid_hi, guid_lo;
|
||||
int err, ret = 0;
|
||||
#define MOD_STAT_CFG_PORT_OFFSET 8
|
||||
#define MOD_STAT_CFG_GUID_H 0X14
|
||||
#define MOD_STAT_CFG_GUID_L 0X1c
|
||||
|
||||
mailbox = mlx4_alloc_cmd_mailbox(dev);
|
||||
if (IS_ERR(mailbox))
|
||||
return PTR_ERR(mailbox);
|
||||
outbox = mailbox->buf;
|
||||
|
||||
for (port = 1; port <= dev->caps.num_ports; port++) {
|
||||
in_mod = port << MOD_STAT_CFG_PORT_OFFSET;
|
||||
err = mlx4_cmd_box(dev, 0, mailbox->dma, in_mod, 0x2,
|
||||
MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A,
|
||||
MLX4_CMD_NATIVE);
|
||||
if (err) {
|
||||
mlx4_err(dev, "Fail to get port %d uplink guid\n",
|
||||
port);
|
||||
ret = err;
|
||||
} else {
|
||||
MLX4_GET(guid_hi, outbox, MOD_STAT_CFG_GUID_H);
|
||||
MLX4_GET(guid_lo, outbox, MOD_STAT_CFG_GUID_L);
|
||||
dev->caps.phys_port_id[port] = (u64)guid_lo |
|
||||
(u64)guid_hi << 32;
|
||||
}
|
||||
}
|
||||
mlx4_free_cmd_mailbox(dev, mailbox);
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define MLX4_WOL_SETUP_MODE (5 << 28)
|
||||
int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port)
|
||||
{
|
||||
|
@ -140,6 +140,8 @@ struct mlx4_func_cap {
|
||||
u32 qp1_proxy_qpn;
|
||||
u8 physical_port;
|
||||
u8 port_flags;
|
||||
u8 flags1;
|
||||
u64 phys_port_id;
|
||||
};
|
||||
|
||||
struct mlx4_adapter {
|
||||
|
@ -96,10 +96,10 @@ MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num"
|
||||
" To activate device managed"
|
||||
" flow steering when available, set to -1");
|
||||
|
||||
static bool enable_64b_cqe_eqe;
|
||||
static bool enable_64b_cqe_eqe = true;
|
||||
module_param(enable_64b_cqe_eqe, bool, 0444);
|
||||
MODULE_PARM_DESC(enable_64b_cqe_eqe,
|
||||
"Enable 64 byte CQEs/EQEs when the FW supports this");
|
||||
"Enable 64 byte CQEs/EQEs when the FW supports this (default: True)");
|
||||
|
||||
#define HCA_GLOBAL_CAP_MASK 0
|
||||
|
||||
@ -606,6 +606,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
|
||||
dev->caps.qp1_tunnel[i - 1] = func_cap.qp1_tunnel_qpn;
|
||||
dev->caps.qp1_proxy[i - 1] = func_cap.qp1_proxy_qpn;
|
||||
dev->caps.port_mask[i] = dev->caps.port_type[i];
|
||||
dev->caps.phys_port_id[i] = func_cap.phys_port_id;
|
||||
if (mlx4_get_slave_pkey_gid_tbl_len(dev, i,
|
||||
&dev->caps.gid_table_len[i],
|
||||
&dev->caps.pkey_table_len[i]))
|
||||
@ -1484,6 +1485,10 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
|
||||
|
||||
choose_steering_mode(dev, &dev_cap);
|
||||
|
||||
err = mlx4_get_phys_port_id(dev);
|
||||
if (err)
|
||||
mlx4_err(dev, "Fail to get physical port id\n");
|
||||
|
||||
if (mlx4_is_master(dev))
|
||||
mlx4_parav_master_pf_caps(dev);
|
||||
|
||||
|
@ -125,9 +125,14 @@ static struct mlx4_promisc_qp *get_promisc_qp(struct mlx4_dev *dev, u8 port,
|
||||
enum mlx4_steer_type steer,
|
||||
u32 qpn)
|
||||
{
|
||||
struct mlx4_steer *s_steer = &mlx4_priv(dev)->steer[port - 1];
|
||||
struct mlx4_steer *s_steer;
|
||||
struct mlx4_promisc_qp *pqp;
|
||||
|
||||
if (port < 1 || port > dev->caps.num_ports)
|
||||
return NULL;
|
||||
|
||||
s_steer = &mlx4_priv(dev)->steer[port - 1];
|
||||
|
||||
list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) {
|
||||
if (pqp->qpn == qpn)
|
||||
return pqp;
|
||||
@ -154,6 +159,9 @@ static int new_steering_entry(struct mlx4_dev *dev, u8 port,
|
||||
u32 prot;
|
||||
int err;
|
||||
|
||||
if (port < 1 || port > dev->caps.num_ports)
|
||||
return -EINVAL;
|
||||
|
||||
s_steer = &mlx4_priv(dev)->steer[port - 1];
|
||||
new_entry = kzalloc(sizeof *new_entry, GFP_KERNEL);
|
||||
if (!new_entry)
|
||||
@ -238,6 +246,9 @@ static int existing_steering_entry(struct mlx4_dev *dev, u8 port,
|
||||
struct mlx4_promisc_qp *pqp;
|
||||
struct mlx4_promisc_qp *dqp;
|
||||
|
||||
if (port < 1 || port > dev->caps.num_ports)
|
||||
return -EINVAL;
|
||||
|
||||
s_steer = &mlx4_priv(dev)->steer[port - 1];
|
||||
|
||||
pqp = get_promisc_qp(dev, port, steer, qpn);
|
||||
@ -283,6 +294,9 @@ static bool check_duplicate_entry(struct mlx4_dev *dev, u8 port,
|
||||
struct mlx4_steer_index *tmp_entry, *entry = NULL;
|
||||
struct mlx4_promisc_qp *dqp, *tmp_dqp;
|
||||
|
||||
if (port < 1 || port > dev->caps.num_ports)
|
||||
return NULL;
|
||||
|
||||
s_steer = &mlx4_priv(dev)->steer[port - 1];
|
||||
|
||||
/* if qp is not promisc, it cannot be duplicated */
|
||||
@ -324,6 +338,9 @@ static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 port,
|
||||
bool ret = false;
|
||||
int i;
|
||||
|
||||
if (port < 1 || port > dev->caps.num_ports)
|
||||
return NULL;
|
||||
|
||||
s_steer = &mlx4_priv(dev)->steer[port - 1];
|
||||
|
||||
mailbox = mlx4_alloc_cmd_mailbox(dev);
|
||||
@ -378,6 +395,9 @@ static int add_promisc_qp(struct mlx4_dev *dev, u8 port,
|
||||
int err;
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
|
||||
if (port < 1 || port > dev->caps.num_ports)
|
||||
return -EINVAL;
|
||||
|
||||
s_steer = &mlx4_priv(dev)->steer[port - 1];
|
||||
|
||||
mutex_lock(&priv->mcg_table.mutex);
|
||||
@ -484,6 +504,9 @@ static int remove_promisc_qp(struct mlx4_dev *dev, u8 port,
|
||||
int loc, i;
|
||||
int err;
|
||||
|
||||
if (port < 1 || port > dev->caps.num_ports)
|
||||
return -EINVAL;
|
||||
|
||||
s_steer = &mlx4_priv(dev)->steer[port - 1];
|
||||
mutex_lock(&priv->mcg_table.mutex);
|
||||
|
||||
@ -910,6 +933,9 @@ int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
|
||||
u8 port = gid[5];
|
||||
u8 new_entry = 0;
|
||||
|
||||
if (port < 1 || port > dev->caps.num_ports)
|
||||
return -EINVAL;
|
||||
|
||||
mailbox = mlx4_alloc_cmd_mailbox(dev);
|
||||
if (IS_ERR(mailbox))
|
||||
return PTR_ERR(mailbox);
|
||||
|
@ -255,6 +255,8 @@ struct mlx4_en_tx_ring {
|
||||
u16 poll_cnt;
|
||||
struct mlx4_en_tx_info *tx_info;
|
||||
u8 *bounce_buf;
|
||||
u8 queue_index;
|
||||
cpumask_t affinity_mask;
|
||||
u32 last_nr_txbb;
|
||||
struct mlx4_qp qp;
|
||||
struct mlx4_qp_context context;
|
||||
@ -719,7 +721,8 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
|
||||
|
||||
int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
|
||||
struct mlx4_en_tx_ring **pring,
|
||||
int qpn, u32 size, u16 stride, int node);
|
||||
int qpn, u32 size, u16 stride,
|
||||
int node, int queue_index);
|
||||
void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
|
||||
struct mlx4_en_tx_ring **pring);
|
||||
int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
|
||||
@ -741,6 +744,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev,
|
||||
struct mlx4_en_cq *cq,
|
||||
int budget);
|
||||
int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget);
|
||||
int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget);
|
||||
void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
|
||||
int is_tx, int rss, int qpn, int cqn, int user_prio,
|
||||
struct mlx4_qp_context *context);
|
||||
|
@ -454,6 +454,7 @@ struct mlx4_caps {
|
||||
u32 userspace_caps; /* userspace must be aware of these */
|
||||
u32 function_caps; /* VFs must be aware of these */
|
||||
u16 hca_core_clock;
|
||||
u64 phys_port_id[MLX4_MAX_PORTS + 1];
|
||||
};
|
||||
|
||||
struct mlx4_buf_list {
|
||||
@ -1113,6 +1114,7 @@ int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
|
||||
int *vector);
|
||||
void mlx4_release_eq(struct mlx4_dev *dev, int vec);
|
||||
|
||||
int mlx4_get_phys_port_id(struct mlx4_dev *dev);
|
||||
int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port);
|
||||
int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user