mirror of
https://github.com/torvalds/linux.git
synced 2024-12-02 00:51:44 +00:00
qede: preserve per queue stats across up/down of interface
Here we do the initialization of coalescing values on load. per queue coalesce values are also restored across up/down of ethernet interface. Signed-off-by: Bhaskar Upadhaya <bupadhaya@marvell.com> Signed-off-by: Igor Russkikh <irusskikh@marvell.com> Signed-off-by: Ariel Elior <aelior@marvell.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
a0d2d97d74
commit
b0ec5489c4
@ -168,6 +168,12 @@ struct qede_dump_info {
|
||||
u32 args[QEDE_DUMP_MAX_ARGS];
|
||||
};
|
||||
|
||||
struct qede_coalesce {
|
||||
bool isvalid;
|
||||
u16 rxc;
|
||||
u16 txc;
|
||||
};
|
||||
|
||||
struct qede_dev {
|
||||
struct qed_dev *cdev;
|
||||
struct net_device *ndev;
|
||||
@ -194,6 +200,7 @@ struct qede_dev {
|
||||
((edev)->dev_info.common.dev_type == QED_DEV_TYPE_AH)
|
||||
|
||||
struct qede_fastpath *fp_array;
|
||||
struct qede_coalesce *coal_entry;
|
||||
u8 req_num_tx;
|
||||
u8 fp_num_tx;
|
||||
u8 req_num_rx;
|
||||
@ -581,6 +588,9 @@ int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
|
||||
struct flow_cls_offload *f);
|
||||
|
||||
void qede_forced_speed_maps_init(void);
|
||||
int qede_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal);
|
||||
int qede_set_per_coalesce(struct net_device *dev, u32 queue,
|
||||
struct ethtool_coalesce *coal);
|
||||
|
||||
#define RX_RING_SIZE_POW 13
|
||||
#define RX_RING_SIZE ((u16)BIT(RX_RING_SIZE_POW))
|
||||
|
@ -819,8 +819,7 @@ out:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int qede_set_coalesce(struct net_device *dev,
|
||||
struct ethtool_coalesce *coal)
|
||||
int qede_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
|
||||
{
|
||||
struct qede_dev *edev = netdev_priv(dev);
|
||||
struct qede_fastpath *fp;
|
||||
@ -855,6 +854,8 @@ static int qede_set_coalesce(struct net_device *dev,
|
||||
"Set RX coalesce error, rc = %d\n", rc);
|
||||
return rc;
|
||||
}
|
||||
edev->coal_entry[i].rxc = rxc;
|
||||
edev->coal_entry[i].isvalid = true;
|
||||
}
|
||||
|
||||
if (edev->fp_array[i].type & QEDE_FASTPATH_TX) {
|
||||
@ -874,6 +875,8 @@ static int qede_set_coalesce(struct net_device *dev,
|
||||
"Set TX coalesce error, rc = %d\n", rc);
|
||||
return rc;
|
||||
}
|
||||
edev->coal_entry[i].txc = txc;
|
||||
edev->coal_entry[i].isvalid = true;
|
||||
}
|
||||
}
|
||||
|
||||
@ -2105,9 +2108,8 @@ err:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int qede_set_per_coalesce(struct net_device *dev,
|
||||
u32 queue,
|
||||
struct ethtool_coalesce *coal)
|
||||
int qede_set_per_coalesce(struct net_device *dev, u32 queue,
|
||||
struct ethtool_coalesce *coal)
|
||||
{
|
||||
struct qede_dev *edev = netdev_priv(dev);
|
||||
struct qede_fastpath *fp;
|
||||
@ -2150,6 +2152,8 @@ static int qede_set_per_coalesce(struct net_device *dev,
|
||||
"Set RX coalesce error, rc = %d\n", rc);
|
||||
goto out;
|
||||
}
|
||||
edev->coal_entry[queue].rxc = rxc;
|
||||
edev->coal_entry[queue].isvalid = true;
|
||||
}
|
||||
|
||||
if (edev->fp_array[queue].type & QEDE_FASTPATH_TX) {
|
||||
@ -2161,6 +2165,8 @@ static int qede_set_per_coalesce(struct net_device *dev,
|
||||
"Set TX coalesce error, rc = %d\n", rc);
|
||||
goto out;
|
||||
}
|
||||
edev->coal_entry[queue].txc = txc;
|
||||
edev->coal_entry[queue].isvalid = true;
|
||||
}
|
||||
out:
|
||||
__qede_unlock(edev);
|
||||
|
@ -904,6 +904,7 @@ static int qede_alloc_fp_array(struct qede_dev *edev)
|
||||
{
|
||||
u8 fp_combined, fp_rx = edev->fp_num_rx;
|
||||
struct qede_fastpath *fp;
|
||||
void *mem;
|
||||
int i;
|
||||
|
||||
edev->fp_array = kcalloc(QEDE_QUEUE_CNT(edev),
|
||||
@ -913,6 +914,15 @@ static int qede_alloc_fp_array(struct qede_dev *edev)
|
||||
goto err;
|
||||
}
|
||||
|
||||
mem = krealloc(edev->coal_entry, QEDE_QUEUE_CNT(edev) *
|
||||
sizeof(*edev->coal_entry), GFP_KERNEL);
|
||||
if (!mem) {
|
||||
DP_ERR(edev, "coalesce entry allocation failed\n");
|
||||
kfree(edev->coal_entry);
|
||||
goto err;
|
||||
}
|
||||
edev->coal_entry = mem;
|
||||
|
||||
fp_combined = QEDE_QUEUE_CNT(edev) - fp_rx - edev->fp_num_tx;
|
||||
|
||||
/* Allocate the FP elements for Rx queues followed by combined and then
|
||||
@ -1320,8 +1330,10 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
|
||||
* [e.g., QED register callbacks] won't break anything when
|
||||
* accessing the netdevice.
|
||||
*/
|
||||
if (mode != QEDE_REMOVE_RECOVERY)
|
||||
if (mode != QEDE_REMOVE_RECOVERY) {
|
||||
kfree(edev->coal_entry);
|
||||
free_netdev(ndev);
|
||||
}
|
||||
|
||||
dev_info(&pdev->dev, "Ending qede_remove successfully\n");
|
||||
}
|
||||
@ -2328,8 +2340,9 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
|
||||
bool is_locked)
|
||||
{
|
||||
struct qed_link_params link_params;
|
||||
struct ethtool_coalesce coal = {};
|
||||
u8 num_tc;
|
||||
int rc;
|
||||
int rc, i;
|
||||
|
||||
DP_INFO(edev, "Starting qede load\n");
|
||||
|
||||
@ -2390,6 +2403,18 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
|
||||
|
||||
edev->state = QEDE_STATE_OPEN;
|
||||
|
||||
coal.rx_coalesce_usecs = QED_DEFAULT_RX_USECS;
|
||||
coal.tx_coalesce_usecs = QED_DEFAULT_TX_USECS;
|
||||
|
||||
for_each_queue(i) {
|
||||
if (edev->coal_entry[i].isvalid) {
|
||||
coal.rx_coalesce_usecs = edev->coal_entry[i].rxc;
|
||||
coal.tx_coalesce_usecs = edev->coal_entry[i].txc;
|
||||
}
|
||||
__qede_unlock(edev);
|
||||
qede_set_per_coalesce(edev->ndev, i, &coal);
|
||||
__qede_lock(edev);
|
||||
}
|
||||
DP_INFO(edev, "Ending successfully qede load\n");
|
||||
|
||||
goto out;
|
||||
|
Loading…
Reference in New Issue
Block a user