Merge branch 'hns3-fixes'
Guangbin Huang says: ==================== net: hns3: add some fixes for -net This series adds some fixes for the HNS3 ethernet driver. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
2eca426d37
@ -537,6 +537,8 @@ struct hnae3_ae_dev {
|
||||
* Get 1588 rx hwstamp
|
||||
* get_ts_info
|
||||
* Get phc info
|
||||
* clean_vf_config
|
||||
* Clean residual vf info after disable sriov
|
||||
*/
|
||||
struct hnae3_ae_ops {
|
||||
int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev);
|
||||
@ -730,6 +732,7 @@ struct hnae3_ae_ops {
|
||||
struct ethtool_ts_info *info);
|
||||
int (*get_link_diagnosis_info)(struct hnae3_handle *handle,
|
||||
u32 *status_code);
|
||||
void (*clean_vf_config)(struct hnae3_ae_dev *ae_dev, int num_vfs);
|
||||
};
|
||||
|
||||
struct hnae3_dcb_ops {
|
||||
|
@ -1028,46 +1028,56 @@ static bool hns3_can_use_tx_sgl(struct hns3_enet_ring *ring,
|
||||
|
||||
static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring)
|
||||
{
|
||||
u32 alloc_size = ring->tqp->handle->kinfo.tx_spare_buf_size;
|
||||
struct hns3_tx_spare *tx_spare;
|
||||
struct page *page;
|
||||
u32 alloc_size;
|
||||
dma_addr_t dma;
|
||||
int order;
|
||||
|
||||
alloc_size = ring->tqp->handle->kinfo.tx_spare_buf_size;
|
||||
if (!alloc_size)
|
||||
return;
|
||||
|
||||
order = get_order(alloc_size);
|
||||
if (order >= MAX_ORDER) {
|
||||
if (net_ratelimit())
|
||||
dev_warn(ring_to_dev(ring), "failed to allocate tx spare buffer, exceed to max order\n");
|
||||
return;
|
||||
}
|
||||
|
||||
tx_spare = devm_kzalloc(ring_to_dev(ring), sizeof(*tx_spare),
|
||||
GFP_KERNEL);
|
||||
if (!tx_spare) {
|
||||
/* The driver still work without the tx spare buffer */
|
||||
dev_warn(ring_to_dev(ring), "failed to allocate hns3_tx_spare\n");
|
||||
return;
|
||||
goto devm_kzalloc_error;
|
||||
}
|
||||
|
||||
page = alloc_pages_node(dev_to_node(ring_to_dev(ring)),
|
||||
GFP_KERNEL, order);
|
||||
if (!page) {
|
||||
dev_warn(ring_to_dev(ring), "failed to allocate tx spare pages\n");
|
||||
devm_kfree(ring_to_dev(ring), tx_spare);
|
||||
return;
|
||||
goto alloc_pages_error;
|
||||
}
|
||||
|
||||
dma = dma_map_page(ring_to_dev(ring), page, 0,
|
||||
PAGE_SIZE << order, DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(ring_to_dev(ring), dma)) {
|
||||
dev_warn(ring_to_dev(ring), "failed to map pages for tx spare\n");
|
||||
put_page(page);
|
||||
devm_kfree(ring_to_dev(ring), tx_spare);
|
||||
return;
|
||||
goto dma_mapping_error;
|
||||
}
|
||||
|
||||
tx_spare->dma = dma;
|
||||
tx_spare->buf = page_address(page);
|
||||
tx_spare->len = PAGE_SIZE << order;
|
||||
ring->tx_spare = tx_spare;
|
||||
return;
|
||||
|
||||
dma_mapping_error:
|
||||
put_page(page);
|
||||
alloc_pages_error:
|
||||
devm_kfree(ring_to_dev(ring), tx_spare);
|
||||
devm_kzalloc_error:
|
||||
ring->tqp->handle->kinfo.tx_spare_buf_size = 0;
|
||||
}
|
||||
|
||||
/* Use hns3_tx_spare_space() to make sure there is enough buffer
|
||||
@ -3050,6 +3060,21 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* hns3_clean_vf_config
|
||||
* @pdev: pointer to a pci_dev structure
|
||||
* @num_vfs: number of VFs allocated
|
||||
*
|
||||
* Clean residual vf config after disable sriov
|
||||
**/
|
||||
static void hns3_clean_vf_config(struct pci_dev *pdev, int num_vfs)
|
||||
{
|
||||
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
|
||||
|
||||
if (ae_dev->ops->clean_vf_config)
|
||||
ae_dev->ops->clean_vf_config(ae_dev, num_vfs);
|
||||
}
|
||||
|
||||
/* hns3_remove - Device removal routine
|
||||
* @pdev: PCI device information struct
|
||||
*/
|
||||
@ -3088,7 +3113,10 @@ static int hns3_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
|
||||
else
|
||||
return num_vfs;
|
||||
} else if (!pci_vfs_assigned(pdev)) {
|
||||
int num_vfs_pre = pci_num_vf(pdev);
|
||||
|
||||
pci_disable_sriov(pdev);
|
||||
hns3_clean_vf_config(pdev, num_vfs_pre);
|
||||
} else {
|
||||
dev_warn(&pdev->dev,
|
||||
"Unable to free VFs because some are assigned to VMs.\n");
|
||||
|
@ -653,8 +653,8 @@ static void hns3_get_ringparam(struct net_device *netdev,
|
||||
struct hnae3_handle *h = priv->ae_handle;
|
||||
int rx_queue_index = h->kinfo.num_tqps;
|
||||
|
||||
if (hns3_nic_resetting(netdev)) {
|
||||
netdev_err(netdev, "dev resetting!");
|
||||
if (hns3_nic_resetting(netdev) || !priv->ring) {
|
||||
netdev_err(netdev, "failed to get ringparam value, due to dev resetting or uninited\n");
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1074,8 +1074,14 @@ static int hns3_check_ringparam(struct net_device *ndev,
|
||||
{
|
||||
#define RX_BUF_LEN_2K 2048
|
||||
#define RX_BUF_LEN_4K 4096
|
||||
if (hns3_nic_resetting(ndev))
|
||||
|
||||
struct hns3_nic_priv *priv = netdev_priv(ndev);
|
||||
|
||||
if (hns3_nic_resetting(ndev) || !priv->ring) {
|
||||
netdev_err(ndev, "failed to set ringparam value, due to dev resetting or uninited\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
|
||||
if (param->rx_mini_pending || param->rx_jumbo_pending)
|
||||
return -EINVAL;
|
||||
@ -1766,9 +1772,6 @@ static int hns3_set_tx_spare_buf_size(struct net_device *netdev,
|
||||
struct hnae3_handle *h = priv->ae_handle;
|
||||
int ret;
|
||||
|
||||
if (hns3_nic_resetting(netdev))
|
||||
return -EBUSY;
|
||||
|
||||
h->kinfo.tx_spare_buf_size = data;
|
||||
|
||||
ret = hns3_reset_notify(h, HNAE3_DOWN_CLIENT);
|
||||
@ -1799,6 +1802,11 @@ static int hns3_set_tunable(struct net_device *netdev,
|
||||
struct hnae3_handle *h = priv->ae_handle;
|
||||
int i, ret = 0;
|
||||
|
||||
if (hns3_nic_resetting(netdev) || !priv->ring) {
|
||||
netdev_err(netdev, "failed to set tunable value, dev resetting!");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
switch (tuna->id) {
|
||||
case ETHTOOL_TX_COPYBREAK:
|
||||
priv->tx_copybreak = *(u32 *)data;
|
||||
@ -1818,7 +1826,8 @@ static int hns3_set_tunable(struct net_device *netdev,
|
||||
old_tx_spare_buf_size = h->kinfo.tx_spare_buf_size;
|
||||
new_tx_spare_buf_size = *(u32 *)data;
|
||||
ret = hns3_set_tx_spare_buf_size(netdev, new_tx_spare_buf_size);
|
||||
if (ret) {
|
||||
if (ret ||
|
||||
(!priv->ring->tx_spare && new_tx_spare_buf_size != 0)) {
|
||||
int ret1;
|
||||
|
||||
netdev_warn(netdev,
|
||||
|
@ -12724,6 +12724,55 @@ static int hclge_get_link_diagnosis_info(struct hnae3_handle *handle,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* After disable sriov, VF still has some config and info need clean,
|
||||
* which configed by PF.
|
||||
*/
|
||||
static void hclge_clear_vport_vf_info(struct hclge_vport *vport, int vfid)
|
||||
{
|
||||
struct hclge_dev *hdev = vport->back;
|
||||
struct hclge_vlan_info vlan_info;
|
||||
int ret;
|
||||
|
||||
/* after disable sriov, clean VF rate configured by PF */
|
||||
ret = hclge_tm_qs_shaper_cfg(vport, 0);
|
||||
if (ret)
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"failed to clean vf%d rate config, ret = %d\n",
|
||||
vfid, ret);
|
||||
|
||||
vlan_info.vlan_tag = 0;
|
||||
vlan_info.qos = 0;
|
||||
vlan_info.vlan_proto = ETH_P_8021Q;
|
||||
ret = hclge_update_port_base_vlan_cfg(vport,
|
||||
HNAE3_PORT_BASE_VLAN_DISABLE,
|
||||
&vlan_info);
|
||||
if (ret)
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"failed to clean vf%d port base vlan, ret = %d\n",
|
||||
vfid, ret);
|
||||
|
||||
ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, false);
|
||||
if (ret)
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"failed to clean vf%d spoof config, ret = %d\n",
|
||||
vfid, ret);
|
||||
|
||||
memset(&vport->vf_info, 0, sizeof(vport->vf_info));
|
||||
}
|
||||
|
||||
static void hclge_clean_vport_config(struct hnae3_ae_dev *ae_dev, int num_vfs)
|
||||
{
|
||||
struct hclge_dev *hdev = ae_dev->priv;
|
||||
struct hclge_vport *vport;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < num_vfs; i++) {
|
||||
vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM];
|
||||
|
||||
hclge_clear_vport_vf_info(vport, i);
|
||||
}
|
||||
}
|
||||
|
||||
static const struct hnae3_ae_ops hclge_ops = {
|
||||
.init_ae_dev = hclge_init_ae_dev,
|
||||
.uninit_ae_dev = hclge_uninit_ae_dev,
|
||||
@ -12825,6 +12874,7 @@ static const struct hnae3_ae_ops hclge_ops = {
|
||||
.get_rx_hwts = hclge_ptp_get_rx_hwts,
|
||||
.get_ts_info = hclge_ptp_get_ts_info,
|
||||
.get_link_diagnosis_info = hclge_get_link_diagnosis_info,
|
||||
.clean_vf_config = hclge_clean_vport_config,
|
||||
};
|
||||
|
||||
static struct hnae3_ae_algo ae_algo = {
|
||||
|
@ -48,7 +48,7 @@ static int hclge_mdio_write(struct mii_bus *bus, int phyid, int regnum,
|
||||
int ret;
|
||||
|
||||
if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state))
|
||||
return 0;
|
||||
return -EBUSY;
|
||||
|
||||
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MDIO_CONFIG, false);
|
||||
|
||||
@ -86,7 +86,7 @@ static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum)
|
||||
int ret;
|
||||
|
||||
if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state))
|
||||
return 0;
|
||||
return -EBUSY;
|
||||
|
||||
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MDIO_CONFIG, true);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user