Networking fixes, including fixes from netfilter.

Current release - regressions:
 
  - llc: only change llc->dev when bind() succeeds, fix null-deref
 
 Current release - new code bugs:
 
  - smc: fix a memory leak in smc_sysctl_net_exit()
 
  - dsa: realtek: make interface drivers depend on OF
 
 Previous releases - regressions:
 
  - sched: act_ct: fix ref leak when switching zones
 
 Previous releases - always broken:
 
  - netfilter: egress: report interface as outgoing
 
  - vsock/virtio: enable VQs early on probe and finish the setup
    before using them
 
 Misc:
 
  - memcg: enable accounting for nft objects
 
 Signed-off-by: Jakub Kicinski <kuba@kernel.org>
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEE6jPA+I1ugmIBA4hXMUZtbf5SIrsFAmJCSMMACgkQMUZtbf5S
 Irt4eQ//fTAC/7mBmT8uoUJMZlrRckSDnJ/Y1ukgOQrjbwcgeRi0PK1cy2oGmU4w
 mRZ8zhskVpmzodPuduCIdmsdE2PaWTCFoVRC52QH1HffCRbj1mRK9vrf94q0TP9+
 jqzaIOhKyWKGMgYQGObIFbojnF4H1wm+tIXcEVWzxivS/2yY4W/3hdBIblBO++r5
 c9vxO//qzGH1kGDCWfahuJSTvZBpQ3HTmjGLC1F8xTh8RkR7MGQyGCQ984j+DClb
 PJJQXeV/Zoyxvrzv14MU5Ms9+lsgH2pyBdVzvN8p2QSwSaU8CsbbM05I4lB5mT/b
 tGBYNreMmuQbXRxNVoxaZOTgQqEtTgH+AKJ9L0f2Es6Ftp5TTrFZZA97lO0/qzMj
 NGbxa0p7tlNyOGKDxyUw6SB1+kqqgR2a6skk4XnQ6CAH7AvxSFOxvt63mjeJfCY7
 +j5Lxtm+a/RpVt6Djsvpwq12lKiootcbEyMoUKxKeQ+4I08z6W6hoS1zjUDeMDM6
 q8eDXsxpZgGF6k7x3eKkOWKLMVeQ1cv0CjGaCoTXCqtGZTixRll3v6I6/Oh405Gw
 18fZkIC4TjRdXyfA23n7MzyukjOjmbzn5Kx01lfiMYFeiS/tMwFt/W+ka836j0R6
 gzUdEHLEZdPN699WP4fRrxmIjGlGpEpl02WDEFrP+LDdFCYHCzY=
 =sOIu
 -----END PGP SIGNATURE-----

Merge tag 'net-5.18-rc0' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Pull networking fixes from Jakub Kicinski:
 "Including fixes from netfilter.

  Current release - regressions:

   - llc: only change llc->dev when bind() succeeds, fix null-deref

  Current release - new code bugs:

   - smc: fix a memory leak in smc_sysctl_net_exit()

   - dsa: realtek: make interface drivers depend on OF

  Previous releases - regressions:

   - sched: act_ct: fix ref leak when switching zones

  Previous releases - always broken:

   - netfilter: egress: report interface as outgoing

   - vsock/virtio: enable VQs early on probe and finish the setup before
     using them

  Misc:

   - memcg: enable accounting for nft objects"

* tag 'net-5.18-rc0' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (39 commits)
  Revert "selftests: net: Add tls config dependency for tls selftests"
  net/smc: Send out the remaining data in sndbuf before close
  net: move net_unlink_todo() out of the header
  net: dsa: bcm_sf2_cfp: fix an incorrect NULL check on list iterator
  net: bnxt_ptp: fix compilation error
  selftests: net: Add tls config dependency for tls selftests
  memcg: enable accounting for nft objects
  net/sched: act_ct: fix ref leak when switching zones
  net/smc: fix a memory leak in smc_sysctl_net_exit()
  selftests: tls: skip cmsg_to_pipe tests with TLS=n
  octeontx2-af: initialize action variable
  net: sparx5: switchdev: fix possible NULL pointer dereference
  net/x25: Fix null-ptr-deref caused by x25_disconnect
  qlcnic: dcb: default to returning -EOPNOTSUPP
  net: sparx5: depends on PTP_1588_CLOCK_OPTIONAL
  net: hns3: fix phy can not link up when autoneg off and reset
  net: hns3: add NULL pointer check for hns3_set/get_ringparam()
  net: hns3: add netdev reset check for hns3_set_tunable()
  net: hns3: clean residual vf config after disable sriov
  net: hns3: add max order judgement for tx spare buffer
  ...
This commit is contained in:
Linus Torvalds 2022-03-28 17:02:04 -07:00
commit d717e4cae0
38 changed files with 465 additions and 195 deletions

View File

@ -567,14 +567,14 @@ static void bcm_sf2_cfp_slice_ipv6(struct bcm_sf2_priv *priv,
static struct cfp_rule *bcm_sf2_cfp_rule_find(struct bcm_sf2_priv *priv,
int port, u32 location)
{
struct cfp_rule *rule = NULL;
struct cfp_rule *rule;
list_for_each_entry(rule, &priv->cfp.rules_list, next) {
if (rule->port == port && rule->fs.location == location)
break;
return rule;
}
return rule;
return NULL;
}
static int bcm_sf2_cfp_rule_cmp(struct bcm_sf2_priv *priv, int port,

View File

@ -12,6 +12,7 @@ menuconfig NET_DSA_REALTEK
config NET_DSA_REALTEK_MDIO
tristate "Realtek MDIO connected switch driver"
depends on NET_DSA_REALTEK
depends on OF
help
Select to enable support for registering switches configured
through MDIO.
@ -19,6 +20,7 @@ config NET_DSA_REALTEK_MDIO
config NET_DSA_REALTEK_SMI
tristate "Realtek SMI connected switch driver"
depends on NET_DSA_REALTEK
depends on OF
help
Select to enable support for registering switches connected
through SMI.

View File

@ -382,7 +382,7 @@ static int bnxt_ptp_enable(struct ptp_clock_info *ptp_info,
struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
ptp_info);
struct bnxt *bp = ptp->bp;
u8 pin_id;
int pin_id;
int rc;
switch (rq->type) {
@ -390,6 +390,8 @@ static int bnxt_ptp_enable(struct ptp_clock_info *ptp_info,
/* Configure an External PPS IN */
pin_id = ptp_find_pin(ptp->ptp_clock, PTP_PF_EXTTS,
rq->extts.index);
if (!TSIO_PIN_VALID(pin_id))
return -EOPNOTSUPP;
if (!on)
break;
rc = bnxt_ptp_cfg_pin(bp, pin_id, BNXT_PPS_PIN_PPS_IN);
@ -403,6 +405,8 @@ static int bnxt_ptp_enable(struct ptp_clock_info *ptp_info,
/* Configure a Periodic PPS OUT */
pin_id = ptp_find_pin(ptp->ptp_clock, PTP_PF_PEROUT,
rq->perout.index);
if (!TSIO_PIN_VALID(pin_id))
return -EOPNOTSUPP;
if (!on)
break;

View File

@ -31,7 +31,7 @@ struct pps_pin {
u8 state;
};
#define TSIO_PIN_VALID(pin) ((pin) < (BNXT_MAX_TSIO_PINS))
#define TSIO_PIN_VALID(pin) ((pin) >= 0 && (pin) < (BNXT_MAX_TSIO_PINS))
#define EVENT_DATA2_PPS_EVENT_TYPE(data2) \
((data2) & ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE)

View File

@ -674,7 +674,10 @@ static int enetc_get_ts_info(struct net_device *ndev,
#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
SOF_TIMESTAMPING_RAW_HARDWARE |
SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_RX_SOFTWARE |
SOF_TIMESTAMPING_SOFTWARE;
info->tx_types = (1 << HWTSTAMP_TX_OFF) |
(1 << HWTSTAMP_TX_ON) |

View File

@ -537,6 +537,8 @@ struct hnae3_ae_dev {
* Get 1588 rx hwstamp
* get_ts_info
* Get phc info
* clean_vf_config
* Clean residual vf info after disable sriov
*/
struct hnae3_ae_ops {
int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev);
@ -730,6 +732,7 @@ struct hnae3_ae_ops {
struct ethtool_ts_info *info);
int (*get_link_diagnosis_info)(struct hnae3_handle *handle,
u32 *status_code);
void (*clean_vf_config)(struct hnae3_ae_dev *ae_dev, int num_vfs);
};
struct hnae3_dcb_ops {

View File

@ -1028,46 +1028,56 @@ static bool hns3_can_use_tx_sgl(struct hns3_enet_ring *ring,
static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring)
{
u32 alloc_size = ring->tqp->handle->kinfo.tx_spare_buf_size;
struct hns3_tx_spare *tx_spare;
struct page *page;
u32 alloc_size;
dma_addr_t dma;
int order;
alloc_size = ring->tqp->handle->kinfo.tx_spare_buf_size;
if (!alloc_size)
return;
order = get_order(alloc_size);
if (order >= MAX_ORDER) {
if (net_ratelimit())
dev_warn(ring_to_dev(ring), "failed to allocate tx spare buffer, exceed to max order\n");
return;
}
tx_spare = devm_kzalloc(ring_to_dev(ring), sizeof(*tx_spare),
GFP_KERNEL);
if (!tx_spare) {
/* The driver still work without the tx spare buffer */
dev_warn(ring_to_dev(ring), "failed to allocate hns3_tx_spare\n");
return;
goto devm_kzalloc_error;
}
page = alloc_pages_node(dev_to_node(ring_to_dev(ring)),
GFP_KERNEL, order);
if (!page) {
dev_warn(ring_to_dev(ring), "failed to allocate tx spare pages\n");
devm_kfree(ring_to_dev(ring), tx_spare);
return;
goto alloc_pages_error;
}
dma = dma_map_page(ring_to_dev(ring), page, 0,
PAGE_SIZE << order, DMA_TO_DEVICE);
if (dma_mapping_error(ring_to_dev(ring), dma)) {
dev_warn(ring_to_dev(ring), "failed to map pages for tx spare\n");
put_page(page);
devm_kfree(ring_to_dev(ring), tx_spare);
return;
goto dma_mapping_error;
}
tx_spare->dma = dma;
tx_spare->buf = page_address(page);
tx_spare->len = PAGE_SIZE << order;
ring->tx_spare = tx_spare;
return;
dma_mapping_error:
put_page(page);
alloc_pages_error:
devm_kfree(ring_to_dev(ring), tx_spare);
devm_kzalloc_error:
ring->tqp->handle->kinfo.tx_spare_buf_size = 0;
}
/* Use hns3_tx_spare_space() to make sure there is enough buffer
@ -3050,6 +3060,21 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return ret;
}
/**
* hns3_clean_vf_config
* @pdev: pointer to a pci_dev structure
* @num_vfs: number of VFs allocated
*
* Clean residual vf config after disable sriov
**/
static void hns3_clean_vf_config(struct pci_dev *pdev, int num_vfs)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
if (ae_dev->ops->clean_vf_config)
ae_dev->ops->clean_vf_config(ae_dev, num_vfs);
}
/* hns3_remove - Device removal routine
* @pdev: PCI device information struct
*/
@ -3088,7 +3113,10 @@ static int hns3_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
else
return num_vfs;
} else if (!pci_vfs_assigned(pdev)) {
int num_vfs_pre = pci_num_vf(pdev);
pci_disable_sriov(pdev);
hns3_clean_vf_config(pdev, num_vfs_pre);
} else {
dev_warn(&pdev->dev,
"Unable to free VFs because some are assigned to VMs.\n");

View File

@ -653,8 +653,8 @@ static void hns3_get_ringparam(struct net_device *netdev,
struct hnae3_handle *h = priv->ae_handle;
int rx_queue_index = h->kinfo.num_tqps;
if (hns3_nic_resetting(netdev)) {
netdev_err(netdev, "dev resetting!");
if (hns3_nic_resetting(netdev) || !priv->ring) {
netdev_err(netdev, "failed to get ringparam value, due to dev resetting or uninited\n");
return;
}
@ -1074,8 +1074,14 @@ static int hns3_check_ringparam(struct net_device *ndev,
{
#define RX_BUF_LEN_2K 2048
#define RX_BUF_LEN_4K 4096
if (hns3_nic_resetting(ndev))
struct hns3_nic_priv *priv = netdev_priv(ndev);
if (hns3_nic_resetting(ndev) || !priv->ring) {
netdev_err(ndev, "failed to set ringparam value, due to dev resetting or uninited\n");
return -EBUSY;
}
if (param->rx_mini_pending || param->rx_jumbo_pending)
return -EINVAL;
@ -1766,9 +1772,6 @@ static int hns3_set_tx_spare_buf_size(struct net_device *netdev,
struct hnae3_handle *h = priv->ae_handle;
int ret;
if (hns3_nic_resetting(netdev))
return -EBUSY;
h->kinfo.tx_spare_buf_size = data;
ret = hns3_reset_notify(h, HNAE3_DOWN_CLIENT);
@ -1799,6 +1802,11 @@ static int hns3_set_tunable(struct net_device *netdev,
struct hnae3_handle *h = priv->ae_handle;
int i, ret = 0;
if (hns3_nic_resetting(netdev) || !priv->ring) {
netdev_err(netdev, "failed to set tunable value, dev resetting!");
return -EBUSY;
}
switch (tuna->id) {
case ETHTOOL_TX_COPYBREAK:
priv->tx_copybreak = *(u32 *)data;
@ -1818,7 +1826,8 @@ static int hns3_set_tunable(struct net_device *netdev,
old_tx_spare_buf_size = h->kinfo.tx_spare_buf_size;
new_tx_spare_buf_size = *(u32 *)data;
ret = hns3_set_tx_spare_buf_size(netdev, new_tx_spare_buf_size);
if (ret) {
if (ret ||
(!priv->ring->tx_spare && new_tx_spare_buf_size != 0)) {
int ret1;
netdev_warn(netdev,

View File

@ -1872,6 +1872,7 @@ static int hclge_alloc_vport(struct hclge_dev *hdev)
vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
vport->mps = HCLGE_MAC_DEFAULT_FRAME;
vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
vport->port_base_vlan_cfg.tbl_sta = true;
vport->rxvlan_cfg.rx_vlan_offload_en = true;
vport->req_vlan_fltr_en = true;
INIT_LIST_HEAD(&vport->vlan_list);
@ -8438,12 +8439,11 @@ int hclge_rm_uc_addr_common(struct hclge_vport *vport,
hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
hclge_prepare_mac_addr(&req, addr, false);
ret = hclge_remove_mac_vlan_tbl(vport, &req);
if (!ret) {
if (!ret || ret == -ENOENT) {
mutex_lock(&hdev->vport_lock);
hclge_update_umv_space(vport, true);
mutex_unlock(&hdev->vport_lock);
} else if (ret == -ENOENT) {
ret = 0;
return 0;
}
return ret;
@ -8993,11 +8993,16 @@ static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
ether_addr_copy(vport->vf_info.mac, mac_addr);
/* there is a timewindow for PF to know VF unalive, it may
* cause send mailbox fail, but it doesn't matter, VF will
* query it when reinit.
*/
if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
dev_info(&hdev->pdev->dev,
"MAC of VF %d has been set to %s, and it will be reinitialized!\n",
vf, format_mac_addr);
return hclge_inform_reset_assert_to_vf(vport);
(void)hclge_inform_reset_assert_to_vf(vport);
return 0;
}
dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %s\n",
@ -9818,19 +9823,28 @@ static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
bool writen_to_tbl)
{
struct hclge_vport_vlan_cfg *vlan, *tmp;
struct hclge_dev *hdev = vport->back;
list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
if (vlan->vlan_id == vlan_id)
mutex_lock(&hdev->vport_lock);
list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
if (vlan->vlan_id == vlan_id) {
mutex_unlock(&hdev->vport_lock);
return;
}
}
vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
if (!vlan)
if (!vlan) {
mutex_unlock(&hdev->vport_lock);
return;
}
vlan->hd_tbl_status = writen_to_tbl;
vlan->vlan_id = vlan_id;
list_add_tail(&vlan->node, &vport->vlan_list);
mutex_unlock(&hdev->vport_lock);
}
static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
@ -9839,6 +9853,8 @@ static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
struct hclge_dev *hdev = vport->back;
int ret;
mutex_lock(&hdev->vport_lock);
list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
if (!vlan->hd_tbl_status) {
ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
@ -9848,12 +9864,16 @@ static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
dev_err(&hdev->pdev->dev,
"restore vport vlan list failed, ret=%d\n",
ret);
mutex_unlock(&hdev->vport_lock);
return ret;
}
}
vlan->hd_tbl_status = true;
}
mutex_unlock(&hdev->vport_lock);
return 0;
}
@ -9863,6 +9883,8 @@ static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
struct hclge_vport_vlan_cfg *vlan, *tmp;
struct hclge_dev *hdev = vport->back;
mutex_lock(&hdev->vport_lock);
list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
if (vlan->vlan_id == vlan_id) {
if (is_write_tbl && vlan->hd_tbl_status)
@ -9877,6 +9899,8 @@ static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
break;
}
}
mutex_unlock(&hdev->vport_lock);
}
void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
@ -9884,6 +9908,8 @@ void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
struct hclge_vport_vlan_cfg *vlan, *tmp;
struct hclge_dev *hdev = vport->back;
mutex_lock(&hdev->vport_lock);
list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
if (vlan->hd_tbl_status)
hclge_set_vlan_filter_hw(hdev,
@ -9899,6 +9925,7 @@ void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
}
}
clear_bit(vport->vport_id, hdev->vf_vlan_full);
mutex_unlock(&hdev->vport_lock);
}
void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
@ -9907,6 +9934,8 @@ void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
struct hclge_vport *vport;
int i;
mutex_lock(&hdev->vport_lock);
for (i = 0; i < hdev->num_alloc_vport; i++) {
vport = &hdev->vport[i];
list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
@ -9914,37 +9943,61 @@ void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
kfree(vlan);
}
}
mutex_unlock(&hdev->vport_lock);
}
void hclge_restore_vport_port_base_vlan_config(struct hclge_dev *hdev)
{
struct hclge_vlan_info *vlan_info;
struct hclge_vport *vport;
u16 vlan_proto;
u16 vlan_id;
u16 state;
int vf_id;
int ret;
/* PF should restore all vfs port base vlan */
for (vf_id = 0; vf_id < hdev->num_alloc_vfs; vf_id++) {
vport = &hdev->vport[vf_id + HCLGE_VF_VPORT_START_NUM];
vlan_info = vport->port_base_vlan_cfg.tbl_sta ?
&vport->port_base_vlan_cfg.vlan_info :
&vport->port_base_vlan_cfg.old_vlan_info;
vlan_id = vlan_info->vlan_tag;
vlan_proto = vlan_info->vlan_proto;
state = vport->port_base_vlan_cfg.state;
if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
ret = hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
vport->vport_id,
vlan_id, false);
vport->port_base_vlan_cfg.tbl_sta = ret == 0;
}
}
}
void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
{
struct hclge_vport_vlan_cfg *vlan, *tmp;
struct hclge_dev *hdev = vport->back;
u16 vlan_proto;
u16 vlan_id;
u16 state;
int ret;
vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
state = vport->port_base_vlan_cfg.state;
mutex_lock(&hdev->vport_lock);
if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
vport->vport_id, vlan_id,
false);
return;
if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
vport->vport_id,
vlan->vlan_id, false);
if (ret)
break;
vlan->hd_tbl_status = true;
}
}
list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
vport->vport_id,
vlan->vlan_id, false);
if (ret)
break;
vlan->hd_tbl_status = true;
}
mutex_unlock(&hdev->vport_lock);
}
/* For global reset and imp reset, hardware will clear the mac table,
@ -9984,6 +10037,7 @@ static void hclge_restore_hw_table(struct hclge_dev *hdev)
struct hnae3_handle *handle = &vport->nic;
hclge_restore_mac_table_common(vport);
hclge_restore_vport_port_base_vlan_config(hdev);
hclge_restore_vport_vlan_table(vport);
set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
hclge_restore_fd_entries(handle);
@ -10040,6 +10094,8 @@ static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
false);
}
vport->port_base_vlan_cfg.tbl_sta = false;
/* force add VLAN 0 */
ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, false, 0);
if (ret)
@ -10129,7 +10185,9 @@ out:
else
nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
vport->port_base_vlan_cfg.old_vlan_info = *old_vlan_info;
vport->port_base_vlan_cfg.vlan_info = *vlan_info;
vport->port_base_vlan_cfg.tbl_sta = true;
hclge_set_vport_vlan_fltr_change(vport);
return 0;
@ -10197,14 +10255,17 @@ static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
return ret;
}
/* for DEVICE_VERSION_V3, vf doesn't need to know about the port based
/* there is a timewindow for PF to know VF unalive, it may
* cause send mailbox fail, but it doesn't matter, VF will
* query it when reinit.
* for DEVICE_VERSION_V3, vf doesn't need to know about the port based
* VLAN state.
*/
if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 &&
test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
vport->vport_id, state,
&vlan_info);
(void)hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
vport->vport_id,
state, &vlan_info);
return 0;
}
@ -11838,8 +11899,8 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_misc_irq_uninit(hdev);
hclge_devlink_uninit(hdev);
hclge_pci_uninit(hdev);
mutex_destroy(&hdev->vport_lock);
hclge_uninit_vport_vlan_table(hdev);
mutex_destroy(&hdev->vport_lock);
ae_dev->priv = NULL;
}
@ -12663,6 +12724,55 @@ static int hclge_get_link_diagnosis_info(struct hnae3_handle *handle,
return 0;
}
/* After disable sriov, VF still has some config and info need clean,
* which configed by PF.
*/
static void hclge_clear_vport_vf_info(struct hclge_vport *vport, int vfid)
{
struct hclge_dev *hdev = vport->back;
struct hclge_vlan_info vlan_info;
int ret;
/* after disable sriov, clean VF rate configured by PF */
ret = hclge_tm_qs_shaper_cfg(vport, 0);
if (ret)
dev_err(&hdev->pdev->dev,
"failed to clean vf%d rate config, ret = %d\n",
vfid, ret);
vlan_info.vlan_tag = 0;
vlan_info.qos = 0;
vlan_info.vlan_proto = ETH_P_8021Q;
ret = hclge_update_port_base_vlan_cfg(vport,
HNAE3_PORT_BASE_VLAN_DISABLE,
&vlan_info);
if (ret)
dev_err(&hdev->pdev->dev,
"failed to clean vf%d port base vlan, ret = %d\n",
vfid, ret);
ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, false);
if (ret)
dev_err(&hdev->pdev->dev,
"failed to clean vf%d spoof config, ret = %d\n",
vfid, ret);
memset(&vport->vf_info, 0, sizeof(vport->vf_info));
}
static void hclge_clean_vport_config(struct hnae3_ae_dev *ae_dev, int num_vfs)
{
struct hclge_dev *hdev = ae_dev->priv;
struct hclge_vport *vport;
int i;
for (i = 0; i < num_vfs; i++) {
vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM];
hclge_clear_vport_vf_info(vport, i);
}
}
static const struct hnae3_ae_ops hclge_ops = {
.init_ae_dev = hclge_init_ae_dev,
.uninit_ae_dev = hclge_uninit_ae_dev,
@ -12764,6 +12874,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.get_rx_hwts = hclge_ptp_get_rx_hwts,
.get_ts_info = hclge_ptp_get_ts_info,
.get_link_diagnosis_info = hclge_get_link_diagnosis_info,
.clean_vf_config = hclge_clean_vport_config,
};
static struct hnae3_ae_algo ae_algo = {

View File

@ -985,7 +985,9 @@ struct hclge_vlan_info {
struct hclge_port_base_vlan_config {
u16 state;
bool tbl_sta;
struct hclge_vlan_info vlan_info;
struct hclge_vlan_info old_vlan_info;
};
struct hclge_vf_info {
@ -1031,6 +1033,7 @@ struct hclge_vport {
spinlock_t mac_list_lock; /* protect mac address need to add/detele */
struct list_head uc_mac_list; /* Store VF unicast table */
struct list_head mc_mac_list; /* Store VF multicast table */
struct list_head vlan_list; /* Store VF vlan table */
};
@ -1100,6 +1103,7 @@ void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list);
void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev);
void hclge_restore_mac_table_common(struct hclge_vport *vport);
void hclge_restore_vport_port_base_vlan_config(struct hclge_dev *hdev);
void hclge_restore_vport_vlan_table(struct hclge_vport *vport);
int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
struct hclge_vlan_info *vlan_info);

View File

@ -48,7 +48,7 @@ static int hclge_mdio_write(struct mii_bus *bus, int phyid, int regnum,
int ret;
if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state))
return 0;
return -EBUSY;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MDIO_CONFIG, false);
@ -86,7 +86,7 @@ static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum)
int ret;
if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state))
return 0;
return -EBUSY;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MDIO_CONFIG, true);

View File

@ -2862,6 +2862,11 @@ static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
return ret;
}
/* get current port based vlan state from PF */
ret = hclgevf_get_port_base_vlan_filter_state(hdev);
if (ret)
return ret;
set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state);
hclgevf_init_rxd_adv_layout(hdev);

View File

@ -605,7 +605,7 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
struct npc_install_flow_req req = { 0 };
struct npc_install_flow_rsp rsp = { 0 };
struct npc_mcam *mcam = &rvu->hw->mcam;
struct nix_rx_action action;
struct nix_rx_action action = { 0 };
int blkaddr, index;
/* AF's and SDP VFs work in promiscuous mode */
@ -626,7 +626,6 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
*(u64 *)&action = npc_get_mcam_action(rvu, mcam,
blkaddr, index);
} else {
*(u64 *)&action = 0x00;
action.op = NIX_RX_ACTIONOP_UCAST;
action.pf_func = pcifunc;
}
@ -657,7 +656,7 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
struct npc_mcam *mcam = &rvu->hw->mcam;
struct rvu_hwinfo *hw = rvu->hw;
int blkaddr, ucast_idx, index;
struct nix_rx_action action;
struct nix_rx_action action = { 0 };
u64 relaxed_mask;
if (!hw->cap.nix_rx_multicast && is_cgx_vf(rvu, pcifunc))
@ -685,14 +684,14 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
blkaddr, ucast_idx);
if (action.op != NIX_RX_ACTIONOP_RSS) {
*(u64 *)&action = 0x00;
*(u64 *)&action = 0;
action.op = NIX_RX_ACTIONOP_UCAST;
}
/* RX_ACTION set to MCAST for CGX PF's */
if (hw->cap.nix_rx_multicast && pfvf->use_mce_list &&
is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) {
*(u64 *)&action = 0x00;
*(u64 *)&action = 0;
action.op = NIX_RX_ACTIONOP_MCAST;
pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
action.index = pfvf->promisc_mce_idx;
@ -832,7 +831,7 @@ void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
struct rvu_hwinfo *hw = rvu->hw;
int blkaddr, ucast_idx, index;
u8 mac_addr[ETH_ALEN] = { 0 };
struct nix_rx_action action;
struct nix_rx_action action = { 0 };
struct rvu_pfvf *pfvf;
u16 vf_func;
@ -861,14 +860,14 @@ void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
blkaddr, ucast_idx);
if (action.op != NIX_RX_ACTIONOP_RSS) {
*(u64 *)&action = 0x00;
*(u64 *)&action = 0;
action.op = NIX_RX_ACTIONOP_UCAST;
action.pf_func = pcifunc;
}
/* RX_ACTION set to MCAST for CGX PF's */
if (hw->cap.nix_rx_multicast && pfvf->use_mce_list) {
*(u64 *)&action = 0x00;
*(u64 *)&action = 0;
action.op = NIX_RX_ACTIONOP_MCAST;
action.index = pfvf->mcast_mce_idx;
}

View File

@ -4,6 +4,7 @@ config SPARX5_SWITCH
depends on HAS_IOMEM
depends on OF
depends on ARCH_SPARX5 || COMPILE_TEST
depends on PTP_1588_CLOCK_OPTIONAL
select PHYLINK
select PHY_SPARX5_SERDES
select RESET_CONTROLLER

View File

@ -422,6 +422,8 @@ static int sparx5_fdma_tx_alloc(struct sparx5 *sparx5)
db_hw->dataptr = phys;
db_hw->status = 0;
db = devm_kzalloc(sparx5->dev, sizeof(*db), GFP_KERNEL);
if (!db)
return -ENOMEM;
db->cpu_addr = cpu_addr;
list_add_tail(&db->list, &tx->db_list);
}

View File

@ -212,19 +212,7 @@ bool sparx5_mact_find(struct sparx5 *sparx5,
mutex_unlock(&sparx5->lock);
return ret == 0;
}
static int sparx5_mact_lookup(struct sparx5 *sparx5,
const unsigned char mac[ETH_ALEN],
u16 vid)
{
u32 pcfg2;
if (sparx5_mact_find(sparx5, mac, vid, &pcfg2))
return 1;
return 0;
return ret;
}
int sparx5_mact_forget(struct sparx5 *sparx5,
@ -305,9 +293,10 @@ int sparx5_add_mact_entry(struct sparx5 *sparx5,
{
struct sparx5_mact_entry *mact_entry;
int ret;
u32 cfg2;
ret = sparx5_mact_lookup(sparx5, addr, vid);
if (ret)
ret = sparx5_mact_find(sparx5, addr, vid, &cfg2);
if (!ret)
return 0;
/* In case the entry already exists, don't add it again to SW,

View File

@ -65,13 +65,10 @@ enum sparx5_vlan_port_type {
#define PGID_IPV6_MC_CTRL (PGID_BASE + 5)
#define PGID_BCAST (PGID_BASE + 6)
#define PGID_CPU (PGID_BASE + 7)
#define PGID_MCAST_START (PGID_BASE + 8)
#define PGID_TABLE_SIZE 3290
#define PGID_MCAST_START 65
#define PGID_GLAG_START 833
#define PGID_GLAG_END 1088
#define IFH_LEN 9 /* 36 bytes */
#define NULL_VID 0
#define SPX5_MACT_PULL_DELAY (2 * HZ)
@ -328,6 +325,7 @@ void sparx5_mact_init(struct sparx5 *sparx5);
/* sparx5_vlan.c */
void sparx5_pgid_update_mask(struct sparx5_port *port, int pgid, bool enable);
void sparx5_pgid_read_mask(struct sparx5 *sparx5, int pgid, u32 portmask[3]);
void sparx5_update_fwd(struct sparx5 *sparx5);
void sparx5_vlan_init(struct sparx5 *sparx5);
void sparx5_vlan_port_setup(struct sparx5 *sparx5, int portno);
@ -374,7 +372,6 @@ enum sparx5_pgid_type {
SPX5_PGID_FREE,
SPX5_PGID_RESERVED,
SPX5_PGID_MULTICAST,
SPX5_PGID_GLAG
};
void sparx5_pgid_init(struct sparx5 *spx5);

View File

@ -15,28 +15,14 @@ void sparx5_pgid_init(struct sparx5 *spx5)
spx5->pgid_map[i] = SPX5_PGID_RESERVED;
}
int sparx5_pgid_alloc_glag(struct sparx5 *spx5, u16 *idx)
{
int i;
for (i = PGID_GLAG_START; i <= PGID_GLAG_END; i++)
if (spx5->pgid_map[i] == SPX5_PGID_FREE) {
spx5->pgid_map[i] = SPX5_PGID_GLAG;
*idx = i;
return 0;
}
return -EBUSY;
}
int sparx5_pgid_alloc_mcast(struct sparx5 *spx5, u16 *idx)
{
int i;
/* The multicast area starts at index 65, but the first 7
* are reserved for flood masks and CPU. Start alloc after that.
*/
for (i = PGID_MCAST_START; i < PGID_TABLE_SIZE; i++) {
if (i == PGID_GLAG_START)
i = PGID_GLAG_END + 1;
if (spx5->pgid_map[i] == SPX5_PGID_FREE) {
spx5->pgid_map[i] = SPX5_PGID_MULTICAST;
*idx = i;

View File

@ -406,11 +406,11 @@ static int sparx5_handle_port_mdb_add(struct net_device *dev,
res = sparx5_mact_find(spx5, v->addr, vid, &mact_entry);
if (res) {
if (res == 0) {
pgid_idx = LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_GET(mact_entry);
/* MC_IDX has an offset of 65 in the PGID table. */
pgid_idx += PGID_MCAST_START;
/* MC_IDX starts after the port masks in the PGID table */
pgid_idx += SPX5_PORTS;
sparx5_pgid_update_mask(port, pgid_idx, true);
} else {
err = sparx5_pgid_alloc_mcast(spx5, &pgid_idx);
@ -468,17 +468,15 @@ static int sparx5_handle_port_mdb_del(struct net_device *dev,
res = sparx5_mact_find(spx5, v->addr, vid, &mact_entry);
if (res) {
if (res == 0) {
pgid_idx = LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_GET(mact_entry);
/* MC_IDX has an offset of 65 in the PGID table. */
pgid_idx += PGID_MCAST_START;
/* MC_IDX starts after the port masks in the PGID table */
pgid_idx += SPX5_PORTS;
sparx5_pgid_update_mask(port, pgid_idx, false);
pgid_entry[0] = spx5_rd(spx5, ANA_AC_PGID_CFG(pgid_idx));
pgid_entry[1] = spx5_rd(spx5, ANA_AC_PGID_CFG1(pgid_idx));
pgid_entry[2] = spx5_rd(spx5, ANA_AC_PGID_CFG2(pgid_idx));
if (pgid_entry[0] == 0 && pgid_entry[1] == 0 && pgid_entry[2] == 0) {
sparx5_pgid_read_mask(spx5, pgid_idx, pgid_entry);
if (bitmap_empty((unsigned long *)pgid_entry, SPX5_PORTS)) {
/* No ports are in MC group. Remove entry */
err = sparx5_mdb_del_entry(dev, spx5, v->addr, vid, pgid_idx);
if (err)

View File

@ -138,6 +138,13 @@ void sparx5_pgid_update_mask(struct sparx5_port *port, int pgid, bool enable)
}
}
void sparx5_pgid_read_mask(struct sparx5 *spx5, int pgid, u32 portmask[3])
{
portmask[0] = spx5_rd(spx5, ANA_AC_PGID_CFG(pgid));
portmask[1] = spx5_rd(spx5, ANA_AC_PGID_CFG1(pgid));
portmask[2] = spx5_rd(spx5, ANA_AC_PGID_CFG2(pgid));
}
void sparx5_update_fwd(struct sparx5 *sparx5)
{
DECLARE_BITMAP(workmask, SPX5_PORTS);

View File

@ -51,7 +51,7 @@ static inline int qlcnic_dcb_get_hw_capability(struct qlcnic_dcb *dcb)
if (dcb && dcb->ops->get_hw_capability)
return dcb->ops->get_hw_capability(dcb);
return 0;
return -EOPNOTSUPP;
}
static inline void qlcnic_dcb_free(struct qlcnic_dcb *dcb)
@ -65,7 +65,7 @@ static inline int qlcnic_dcb_attach(struct qlcnic_dcb *dcb)
if (dcb && dcb->ops->attach)
return dcb->ops->attach(dcb);
return 0;
return -EOPNOTSUPP;
}
static inline int
@ -74,7 +74,7 @@ qlcnic_dcb_query_hw_capability(struct qlcnic_dcb *dcb, char *buf)
if (dcb && dcb->ops->query_hw_capability)
return dcb->ops->query_hw_capability(dcb, buf);
return 0;
return -EOPNOTSUPP;
}
static inline void qlcnic_dcb_get_info(struct qlcnic_dcb *dcb)
@ -89,7 +89,7 @@ qlcnic_dcb_query_cee_param(struct qlcnic_dcb *dcb, char *buf, u8 type)
if (dcb && dcb->ops->query_cee_param)
return dcb->ops->query_cee_param(dcb, buf, type);
return 0;
return -EOPNOTSUPP;
}
static inline int qlcnic_dcb_get_cee_cfg(struct qlcnic_dcb *dcb)
@ -97,7 +97,7 @@ static inline int qlcnic_dcb_get_cee_cfg(struct qlcnic_dcb *dcb)
if (dcb && dcb->ops->get_cee_cfg)
return dcb->ops->get_cee_cfg(dcb);
return 0;
return -EOPNOTSUPP;
}
static inline void qlcnic_dcb_aen_handler(struct qlcnic_dcb *dcb, void *msg)

View File

@ -487,6 +487,13 @@ static int ethqos_clks_config(void *priv, bool enabled)
dev_err(&ethqos->pdev->dev, "rgmii_clk enable failed\n");
return ret;
}
/* Enable functional clock to prevent DMA reset to timeout due
* to lacking PHY clock after the hardware block has been power
* cycled. The actual configuration will be adjusted once
* ethqos_fix_mac_speed() is invoked.
*/
ethqos_set_func_clk_en(ethqos);
} else {
clk_disable_unprepare(ethqos->rgmii_clk);
}

View File

@ -11,6 +11,7 @@
*/
#include "bcm-phy-lib.h"
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/phy.h>
#include <linux/brcmphy.h>
@ -602,6 +603,26 @@ static int brcm_fet_config_init(struct phy_device *phydev)
if (err < 0)
return err;
/* The datasheet indicates the PHY needs up to 1us to complete a reset,
* build some slack here.
*/
usleep_range(1000, 2000);
/* The PHY requires 65 MDC clock cycles to complete a write operation
* and turnaround the line properly.
*
* We ignore -EIO here as the MDIO controller (e.g.: mdio-bcm-unimac)
* may flag the lack of turn-around as a read failure. This is
* particularly true with this combination since the MDIO controller
* only used 64 MDC cycles. This is not a critical failure in this
* specific case and it has no functional impact otherwise, so we let
* that one go through. If there is a genuine bus error, the next read
* of MII_BRCM_FET_INTREG will error out.
*/
err = phy_read(phydev, MII_BMCR);
if (err < 0 && err != -EIO)
return err;
reg = phy_read(phydev, MII_BRCM_FET_INTREG);
if (reg < 0)
return reg;

View File

@ -1872,6 +1872,45 @@ static const struct driver_info mct_info = {
.tx_fixup = ax88179_tx_fixup,
};
static const struct driver_info at_umc2000_info = {
.description = "AT-UMC2000 USB 3.0/USB 3.1 Gen 1 to Gigabit Ethernet Adapter",
.bind = ax88179_bind,
.unbind = ax88179_unbind,
.status = ax88179_status,
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
static const struct driver_info at_umc200_info = {
.description = "AT-UMC200 USB 3.0/USB 3.1 Gen 1 to Fast Ethernet Adapter",
.bind = ax88179_bind,
.unbind = ax88179_unbind,
.status = ax88179_status,
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
static const struct driver_info at_umc2000sp_info = {
.description = "AT-UMC2000/SP USB 3.0/USB 3.1 Gen 1 to Gigabit Ethernet Adapter",
.bind = ax88179_bind,
.unbind = ax88179_unbind,
.status = ax88179_status,
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
static const struct usb_device_id products[] = {
{
/* ASIX AX88179 10/100/1000 */
@ -1913,6 +1952,18 @@ static const struct usb_device_id products[] = {
/* Magic Control Technology U3-A9003 USB 3.0 Gigabit Ethernet Adapter */
USB_DEVICE(0x0711, 0x0179),
.driver_info = (unsigned long)&mct_info,
}, {
/* Allied Telesis AT-UMC2000 USB 3.0/USB 3.1 Gen 1 to Gigabit Ethernet Adapter */
USB_DEVICE(0x07c9, 0x000e),
.driver_info = (unsigned long)&at_umc2000_info,
}, {
/* Allied Telesis AT-UMC200 USB 3.0/USB 3.1 Gen 1 to Fast Ethernet Adapter */
USB_DEVICE(0x07c9, 0x000f),
.driver_info = (unsigned long)&at_umc200_info,
}, {
/* Allied Telesis AT-UMC2000/SP USB 3.0/USB 3.1 Gen 1 to Gigabit Ethernet Adapter */
USB_DEVICE(0x07c9, 0x0010),
.driver_info = (unsigned long)&at_umc2000sp_info,
},
{ },
};

View File

@ -4601,16 +4601,6 @@ bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
struct list_head **iter);
#ifdef CONFIG_LOCKDEP
static LIST_HEAD(net_unlink_list);
static inline void net_unlink_todo(struct net_device *dev)
{
if (list_empty(&dev->unlink_list))
list_add_tail(&dev->unlink_list, &net_unlink_list);
}
#endif
/* iterate through upper list, must be called under RCU read lock */
#define netdev_for_each_upper_dev_rcu(dev, updev, iter) \
for (iter = &(dev)->adj_list.upper, \

View File

@ -99,7 +99,7 @@ static inline struct sk_buff *nf_hook_egress(struct sk_buff *skb, int *rc,
return skb;
nf_hook_state_init(&state, NF_NETDEV_EGRESS,
NFPROTO_NETDEV, dev, NULL, NULL,
NFPROTO_NETDEV, NULL, dev, NULL,
dev_net(dev), NULL);
/* nf assumes rcu_read_lock, not just read_lock_bh */

View File

@ -7193,6 +7193,16 @@ static int __netdev_update_upper_level(struct net_device *dev,
return 0;
}
#ifdef CONFIG_LOCKDEP
static LIST_HEAD(net_unlink_list);
static void net_unlink_todo(struct net_device *dev)
{
if (list_empty(&dev->unlink_list))
list_add_tail(&dev->unlink_list, &net_unlink_list);
}
#endif
static int __netdev_update_lower_level(struct net_device *dev,
struct netdev_nested_priv *priv)
{

View File

@ -275,6 +275,7 @@ static int llc_ui_autobind(struct socket *sock, struct sockaddr_llc *addr)
{
struct sock *sk = sock->sk;
struct llc_sock *llc = llc_sk(sk);
struct net_device *dev = NULL;
struct llc_sap *sap;
int rc = -EINVAL;
@ -286,16 +287,15 @@ static int llc_ui_autobind(struct socket *sock, struct sockaddr_llc *addr)
goto out;
rc = -ENODEV;
if (sk->sk_bound_dev_if) {
llc->dev = dev_get_by_index(&init_net, sk->sk_bound_dev_if);
if (llc->dev && addr->sllc_arphrd != llc->dev->type) {
dev_put(llc->dev);
llc->dev = NULL;
dev = dev_get_by_index(&init_net, sk->sk_bound_dev_if);
if (dev && addr->sllc_arphrd != dev->type) {
dev_put(dev);
dev = NULL;
}
} else
llc->dev = dev_getfirstbyhwtype(&init_net, addr->sllc_arphrd);
if (!llc->dev)
dev = dev_getfirstbyhwtype(&init_net, addr->sllc_arphrd);
if (!dev)
goto out;
netdev_tracker_alloc(llc->dev, &llc->dev_tracker, GFP_KERNEL);
rc = -EUSERS;
llc->laddr.lsap = llc_ui_autoport();
if (!llc->laddr.lsap)
@ -304,6 +304,12 @@ static int llc_ui_autobind(struct socket *sock, struct sockaddr_llc *addr)
sap = llc_sap_open(llc->laddr.lsap, NULL);
if (!sap)
goto out;
/* Note: We do not expect errors from this point. */
llc->dev = dev;
netdev_tracker_alloc(llc->dev, &llc->dev_tracker, GFP_KERNEL);
dev = NULL;
memcpy(llc->laddr.mac, llc->dev->dev_addr, IFHWADDRLEN);
memcpy(&llc->addr, addr, sizeof(llc->addr));
/* assign new connection to its SAP */
@ -311,10 +317,7 @@ static int llc_ui_autobind(struct socket *sock, struct sockaddr_llc *addr)
sock_reset_flag(sk, SOCK_ZAPPED);
rc = 0;
out:
if (rc) {
dev_put_track(llc->dev, &llc->dev_tracker);
llc->dev = NULL;
}
dev_put(dev);
return rc;
}
@ -337,6 +340,7 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
struct sockaddr_llc *addr = (struct sockaddr_llc *)uaddr;
struct sock *sk = sock->sk;
struct llc_sock *llc = llc_sk(sk);
struct net_device *dev = NULL;
struct llc_sap *sap;
int rc = -EINVAL;
@ -352,25 +356,27 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
rc = -ENODEV;
rcu_read_lock();
if (sk->sk_bound_dev_if) {
llc->dev = dev_get_by_index_rcu(&init_net, sk->sk_bound_dev_if);
if (llc->dev) {
dev = dev_get_by_index_rcu(&init_net, sk->sk_bound_dev_if);
if (dev) {
if (is_zero_ether_addr(addr->sllc_mac))
memcpy(addr->sllc_mac, llc->dev->dev_addr,
memcpy(addr->sllc_mac, dev->dev_addr,
IFHWADDRLEN);
if (addr->sllc_arphrd != llc->dev->type ||
if (addr->sllc_arphrd != dev->type ||
!ether_addr_equal(addr->sllc_mac,
llc->dev->dev_addr)) {
dev->dev_addr)) {
rc = -EINVAL;
llc->dev = NULL;
dev = NULL;
}
}
} else
llc->dev = dev_getbyhwaddr_rcu(&init_net, addr->sllc_arphrd,
} else {
dev = dev_getbyhwaddr_rcu(&init_net, addr->sllc_arphrd,
addr->sllc_mac);
dev_hold_track(llc->dev, &llc->dev_tracker, GFP_ATOMIC);
}
dev_hold(dev);
rcu_read_unlock();
if (!llc->dev)
if (!dev)
goto out;
if (!addr->sllc_sap) {
rc = -EUSERS;
addr->sllc_sap = llc_ui_autoport();
@ -402,6 +408,12 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
goto out_put;
}
}
/* Note: We do not expect errors from this point. */
llc->dev = dev;
netdev_tracker_alloc(llc->dev, &llc->dev_tracker, GFP_KERNEL);
dev = NULL;
llc->laddr.lsap = addr->sllc_sap;
memcpy(llc->laddr.mac, addr->sllc_mac, IFHWADDRLEN);
memcpy(&llc->addr, addr, sizeof(llc->addr));
@ -412,10 +424,7 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
out_put:
llc_sap_put(sap);
out:
if (rc) {
dev_put_track(llc->dev, &llc->dev_tracker);
llc->dev = NULL;
}
dev_put(dev);
release_sock(sk);
return rc;
}

View File

@ -58,7 +58,7 @@ static struct nf_hook_entries *allocate_hook_entries_size(u16 num)
if (num == 0)
return NULL;
e = kvzalloc(alloc, GFP_KERNEL);
e = kvzalloc(alloc, GFP_KERNEL_ACCOUNT);
if (e)
e->num_hook_entries = num;
return e;

View File

@ -341,8 +341,8 @@ static void tcp_options(const struct sk_buff *skb,
if (!ptr)
return;
state->td_scale =
state->flags = 0;
state->td_scale = 0;
state->flags &= IP_CT_TCP_FLAG_BE_LIBERAL;
while (length > 0) {
int opcode=*ptr++;
@ -862,6 +862,16 @@ static bool tcp_can_early_drop(const struct nf_conn *ct)
return false;
}
static void nf_ct_tcp_state_reset(struct ip_ct_tcp_state *state)
{
state->td_end = 0;
state->td_maxend = 0;
state->td_maxwin = 0;
state->td_maxack = 0;
state->td_scale = 0;
state->flags &= IP_CT_TCP_FLAG_BE_LIBERAL;
}
/* Returns verdict for packet, or -1 for invalid. */
int nf_conntrack_tcp_packet(struct nf_conn *ct,
struct sk_buff *skb,
@ -968,8 +978,7 @@ int nf_conntrack_tcp_packet(struct nf_conn *ct,
ct->proto.tcp.last_flags &= ~IP_CT_EXP_CHALLENGE_ACK;
ct->proto.tcp.seen[ct->proto.tcp.last_dir].flags =
ct->proto.tcp.last_flags;
memset(&ct->proto.tcp.seen[dir], 0,
sizeof(struct ip_ct_tcp_state));
nf_ct_tcp_state_reset(&ct->proto.tcp.seen[dir]);
break;
}
ct->proto.tcp.last_index = index;

View File

@ -1192,16 +1192,16 @@ static int nf_tables_newtable(struct sk_buff *skb, const struct nfnl_info *info,
}
err = -ENOMEM;
table = kzalloc(sizeof(*table), GFP_KERNEL);
table = kzalloc(sizeof(*table), GFP_KERNEL_ACCOUNT);
if (table == NULL)
goto err_kzalloc;
table->name = nla_strdup(attr, GFP_KERNEL);
table->name = nla_strdup(attr, GFP_KERNEL_ACCOUNT);
if (table->name == NULL)
goto err_strdup;
if (nla[NFTA_TABLE_USERDATA]) {
table->udata = nla_memdup(nla[NFTA_TABLE_USERDATA], GFP_KERNEL);
table->udata = nla_memdup(nla[NFTA_TABLE_USERDATA], GFP_KERNEL_ACCOUNT);
if (table->udata == NULL)
goto err_table_udata;
@ -1882,7 +1882,7 @@ static struct nft_hook *nft_netdev_hook_alloc(struct net *net,
struct nft_hook *hook;
int err;
hook = kmalloc(sizeof(struct nft_hook), GFP_KERNEL);
hook = kmalloc(sizeof(struct nft_hook), GFP_KERNEL_ACCOUNT);
if (!hook) {
err = -ENOMEM;
goto err_hook_alloc;
@ -2105,7 +2105,7 @@ static struct nft_rule_blob *nf_tables_chain_alloc_rules(unsigned int size)
if (size > INT_MAX)
return NULL;
blob = kvmalloc(size, GFP_KERNEL);
blob = kvmalloc(size, GFP_KERNEL_ACCOUNT);
if (!blob)
return NULL;
@ -2205,7 +2205,7 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
if (err < 0)
return err;
basechain = kzalloc(sizeof(*basechain), GFP_KERNEL);
basechain = kzalloc(sizeof(*basechain), GFP_KERNEL_ACCOUNT);
if (basechain == NULL) {
nft_chain_release_hook(&hook);
return -ENOMEM;
@ -2235,7 +2235,7 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
if (flags & NFT_CHAIN_HW_OFFLOAD)
return -EOPNOTSUPP;
chain = kzalloc(sizeof(*chain), GFP_KERNEL);
chain = kzalloc(sizeof(*chain), GFP_KERNEL_ACCOUNT);
if (chain == NULL)
return -ENOMEM;
@ -2248,7 +2248,7 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
chain->table = table;
if (nla[NFTA_CHAIN_NAME]) {
chain->name = nla_strdup(nla[NFTA_CHAIN_NAME], GFP_KERNEL);
chain->name = nla_strdup(nla[NFTA_CHAIN_NAME], GFP_KERNEL_ACCOUNT);
} else {
if (!(flags & NFT_CHAIN_BINDING)) {
err = -EINVAL;
@ -2256,7 +2256,7 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
}
snprintf(name, sizeof(name), "__chain%llu", ++chain_id);
chain->name = kstrdup(name, GFP_KERNEL);
chain->name = kstrdup(name, GFP_KERNEL_ACCOUNT);
}
if (!chain->name) {
@ -2265,7 +2265,7 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
}
if (nla[NFTA_CHAIN_USERDATA]) {
chain->udata = nla_memdup(nla[NFTA_CHAIN_USERDATA], GFP_KERNEL);
chain->udata = nla_memdup(nla[NFTA_CHAIN_USERDATA], GFP_KERNEL_ACCOUNT);
if (chain->udata == NULL) {
err = -ENOMEM;
goto err_destroy_chain;
@ -2428,7 +2428,7 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
char *name;
err = -ENOMEM;
name = nla_strdup(nla[NFTA_CHAIN_NAME], GFP_KERNEL);
name = nla_strdup(nla[NFTA_CHAIN_NAME], GFP_KERNEL_ACCOUNT);
if (!name)
goto err;
@ -2876,7 +2876,7 @@ static struct nft_expr *nft_expr_init(const struct nft_ctx *ctx,
goto err1;
err = -ENOMEM;
expr = kzalloc(expr_info.ops->size, GFP_KERNEL);
expr = kzalloc(expr_info.ops->size, GFP_KERNEL_ACCOUNT);
if (expr == NULL)
goto err2;
@ -3484,7 +3484,7 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
}
err = -ENOMEM;
rule = kzalloc(sizeof(*rule) + size + usize, GFP_KERNEL);
rule = kzalloc(sizeof(*rule) + size + usize, GFP_KERNEL_ACCOUNT);
if (rule == NULL)
goto err_release_expr;
@ -3897,7 +3897,7 @@ cont:
free_page((unsigned long)inuse);
}
set->name = kasprintf(GFP_KERNEL, name, min + n);
set->name = kasprintf(GFP_KERNEL_ACCOUNT, name, min + n);
if (!set->name)
return -ENOMEM;
@ -4461,11 +4461,11 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
alloc_size = sizeof(*set) + size + udlen;
if (alloc_size < size || alloc_size > INT_MAX)
return -ENOMEM;
set = kvzalloc(alloc_size, GFP_KERNEL);
set = kvzalloc(alloc_size, GFP_KERNEL_ACCOUNT);
if (!set)
return -ENOMEM;
name = nla_strdup(nla[NFTA_SET_NAME], GFP_KERNEL);
name = nla_strdup(nla[NFTA_SET_NAME], GFP_KERNEL_ACCOUNT);
if (!name) {
err = -ENOMEM;
goto err_set_name;
@ -6000,7 +6000,7 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
err = -ENOMEM;
elem.priv = nft_set_elem_init(set, &tmpl, elem.key.val.data,
elem.key_end.val.data, elem.data.val.data,
timeout, expiration, GFP_KERNEL);
timeout, expiration, GFP_KERNEL_ACCOUNT);
if (elem.priv == NULL)
goto err_parse_data;
@ -6244,7 +6244,7 @@ static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set,
err = -ENOMEM;
elem.priv = nft_set_elem_init(set, &tmpl, elem.key.val.data,
elem.key_end.val.data, NULL, 0, 0,
GFP_KERNEL);
GFP_KERNEL_ACCOUNT);
if (elem.priv == NULL)
goto fail_elem;
@ -6556,7 +6556,7 @@ static struct nft_object *nft_obj_init(const struct nft_ctx *ctx,
}
err = -ENOMEM;
obj = kzalloc(sizeof(*obj) + ops->size, GFP_KERNEL);
obj = kzalloc(sizeof(*obj) + ops->size, GFP_KERNEL_ACCOUNT);
if (!obj)
goto err2;
@ -6722,7 +6722,7 @@ static int nf_tables_newobj(struct sk_buff *skb, const struct nfnl_info *info,
obj->key.table = table;
obj->handle = nf_tables_alloc_handle(table);
obj->key.name = nla_strdup(nla[NFTA_OBJ_NAME], GFP_KERNEL);
obj->key.name = nla_strdup(nla[NFTA_OBJ_NAME], GFP_KERNEL_ACCOUNT);
if (!obj->key.name) {
err = -ENOMEM;
goto err_strdup;
@ -7483,7 +7483,7 @@ static int nf_tables_newflowtable(struct sk_buff *skb,
nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla);
flowtable = kzalloc(sizeof(*flowtable), GFP_KERNEL);
flowtable = kzalloc(sizeof(*flowtable), GFP_KERNEL_ACCOUNT);
if (!flowtable)
return -ENOMEM;
@ -7491,7 +7491,7 @@ static int nf_tables_newflowtable(struct sk_buff *skb,
flowtable->handle = nf_tables_alloc_handle(table);
INIT_LIST_HEAD(&flowtable->hook_list);
flowtable->name = nla_strdup(nla[NFTA_FLOWTABLE_NAME], GFP_KERNEL);
flowtable->name = nla_strdup(nla[NFTA_FLOWTABLE_NAME], GFP_KERNEL_ACCOUNT);
if (!flowtable->name) {
err = -ENOMEM;
goto err1;

View File

@ -666,22 +666,25 @@ static bool tcf_ct_skb_nfct_cached(struct net *net, struct sk_buff *skb,
if (!ct)
return false;
if (!net_eq(net, read_pnet(&ct->ct_net)))
return false;
goto drop_ct;
if (nf_ct_zone(ct)->id != zone_id)
return false;
goto drop_ct;
/* Force conntrack entry direction. */
if (force && CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) {
if (nf_ct_is_confirmed(ct))
nf_ct_kill(ct);
nf_ct_put(ct);
nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
return false;
goto drop_ct;
}
return true;
drop_ct:
nf_ct_put(ct);
nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
return false;
}
/* Trim the skb to the length specified by the IP/IPv6 header,

View File

@ -57,6 +57,9 @@ static void smc_close_stream_wait(struct smc_sock *smc, long timeout)
if (!smc_tx_prepared_sends(&smc->conn))
return;
/* Send out corked data remaining in sndbuf */
smc_tx_pending(&smc->conn);
smc->wait_close_tx_prepared = 1;
add_wait_queue(sk_sleep(sk), &wait);
while (!signal_pending(current) && timeout) {

View File

@ -61,5 +61,10 @@ err_alloc:
void __net_exit smc_sysctl_net_exit(struct net *net)
{
struct ctl_table *table;
table = net->smc.smc_hdr->ctl_table_arg;
unregister_net_sysctl_table(net->smc.smc_hdr);
if (!net_eq(net, &init_net))
kfree(table);
}

View File

@ -622,6 +622,13 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
INIT_WORK(&vsock->event_work, virtio_transport_event_work);
INIT_WORK(&vsock->send_pkt_work, virtio_transport_send_pkt_work);
if (virtio_has_feature(vdev, VIRTIO_VSOCK_F_SEQPACKET))
vsock->seqpacket_allow = true;
vdev->priv = vsock;
virtio_device_ready(vdev);
mutex_lock(&vsock->tx_lock);
vsock->tx_run = true;
mutex_unlock(&vsock->tx_lock);
@ -636,10 +643,6 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
vsock->event_run = true;
mutex_unlock(&vsock->event_lock);
if (virtio_has_feature(vdev, VIRTIO_VSOCK_F_SEQPACKET))
vsock->seqpacket_allow = true;
vdev->priv = vsock;
rcu_assign_pointer(the_virtio_vsock, vsock);
mutex_unlock(&the_virtio_vsock_mutex);

View File

@ -1765,10 +1765,15 @@ void x25_kill_by_neigh(struct x25_neigh *nb)
write_lock_bh(&x25_list_lock);
sk_for_each(s, &x25_list)
if (x25_sk(s)->neighbour == nb)
sk_for_each(s, &x25_list) {
if (x25_sk(s)->neighbour == nb) {
write_unlock_bh(&x25_list_lock);
lock_sock(s);
x25_disconnect(s, ENETUNREACH, 0, 0);
release_sock(s);
write_lock_bh(&x25_list_lock);
}
}
write_unlock_bh(&x25_list_lock);
/* Remove any related forwards */

View File

@ -120,11 +120,11 @@ echo "[ OK ]"
# Move the underlay to a non-default VRF
ip -netns hv-1 link set veth0 vrf vrf-underlay
ip -netns hv-1 link set veth0 down
ip -netns hv-1 link set veth0 up
ip -netns hv-1 link set vxlan0 down
ip -netns hv-1 link set vxlan0 up
ip -netns hv-2 link set veth0 vrf vrf-underlay
ip -netns hv-2 link set veth0 down
ip -netns hv-2 link set veth0 up
ip -netns hv-2 link set vxlan0 down
ip -netns hv-2 link set vxlan0 up
echo -n "Check VM connectivity through VXLAN (underlay in a VRF) "
ip netns exec vm-1 ping -c 1 -W 1 10.0.0.2 &> /dev/null || (echo "[FAIL]"; false)

View File

@ -683,6 +683,9 @@ TEST_F(tls, splice_cmsg_to_pipe)
char buf[10];
int p[2];
if (self->notls)
SKIP(return, "no TLS support");
ASSERT_GE(pipe(p), 0);
EXPECT_EQ(tls_send_cmsg(self->fd, 100, test_str, send_len, 0), 10);
EXPECT_EQ(splice(self->cfd, NULL, p[1], NULL, send_len, 0), -1);
@ -703,6 +706,9 @@ TEST_F(tls, splice_dec_cmsg_to_pipe)
char buf[10];
int p[2];
if (self->notls)
SKIP(return, "no TLS support");
ASSERT_GE(pipe(p), 0);
EXPECT_EQ(tls_send_cmsg(self->fd, 100, test_str, send_len, 0), 10);
EXPECT_EQ(recv(self->cfd, buf, send_len, 0), -1);