mirror of
https://github.com/torvalds/linux.git
synced 2024-11-23 04:31:50 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Fix wrong TCP checksums on MTU probing when checksum offloading is disabled, from Douglas Caetano dos Santos. 2) Fix qdisc backlog updates in qfq and sfb schedulers, from Cong Wang. 3) Route lookup flow key protocol value is wrong in ip6gre_xmit_other(), fix from Lance Richardson. 4) Scheduling while atomic in multicast routing code of ipv4 and ipv6, fix from Nikolay Aleksandrov. 5) Fix packet alignment in fec driver, from Eric Nelson. 6) Fix perf regression in sctp due to struct layout and cache misses, from Xin Long. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: sctp: fix the issue sctp_diag uses lock_sock in rcu_read_lock sctp: change to check peer prsctp_capable when using prsctp polices sctp: remove prsctp_param from sctp_chunk sctp: move sent_count to the memory hole in sctp_chunk tg3: Avoid NULL pointer dereference in tg3_io_error_detected() act_ife: Fix false encoding act_ife: Fix external mac header on encode VSOCK: Don't dec ack backlog twice for rejected connections Revert "net: ethernet: bcmgenet: use phydev from struct net_device" net: fec: align IP header in hardware net: fec: remove QUIRK_HAS_RACC from i.mx27 net: fec: remove QUIRK_HAS_RACC from i.mx25 ipmr, ip6mr: fix scheduling while atomic and a deadlock with ipmr_get_route ip6_gre: fix flowi6_proto value in ip6gre_xmit_other() tcp: fix a compile error in DBGUNDO() tcp: fix wrong checksum calculation on MTU probing sch_sfb: keep backlog updated with qlen sch_qfq: keep backlog updated with qlen can: dev: fix deadlock reported after bus-off
This commit is contained in:
commit
bb6bbc7ca2
@ -21,6 +21,7 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/if_arp.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/can.h>
|
||||
#include <linux/can/dev.h>
|
||||
#include <linux/can/skb.h>
|
||||
@ -501,9 +502,8 @@ EXPORT_SYMBOL_GPL(can_free_echo_skb);
|
||||
/*
|
||||
* CAN device restart for bus-off recovery
|
||||
*/
|
||||
static void can_restart(unsigned long data)
|
||||
static void can_restart(struct net_device *dev)
|
||||
{
|
||||
struct net_device *dev = (struct net_device *)data;
|
||||
struct can_priv *priv = netdev_priv(dev);
|
||||
struct net_device_stats *stats = &dev->stats;
|
||||
struct sk_buff *skb;
|
||||
@ -543,6 +543,14 @@ restart:
|
||||
netdev_err(dev, "Error %d during restart", err);
|
||||
}
|
||||
|
||||
static void can_restart_work(struct work_struct *work)
|
||||
{
|
||||
struct delayed_work *dwork = to_delayed_work(work);
|
||||
struct can_priv *priv = container_of(dwork, struct can_priv, restart_work);
|
||||
|
||||
can_restart(priv->dev);
|
||||
}
|
||||
|
||||
int can_restart_now(struct net_device *dev)
|
||||
{
|
||||
struct can_priv *priv = netdev_priv(dev);
|
||||
@ -556,8 +564,8 @@ int can_restart_now(struct net_device *dev)
|
||||
if (priv->state != CAN_STATE_BUS_OFF)
|
||||
return -EBUSY;
|
||||
|
||||
/* Runs as soon as possible in the timer context */
|
||||
mod_timer(&priv->restart_timer, jiffies);
|
||||
cancel_delayed_work_sync(&priv->restart_work);
|
||||
can_restart(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -578,8 +586,8 @@ void can_bus_off(struct net_device *dev)
|
||||
netif_carrier_off(dev);
|
||||
|
||||
if (priv->restart_ms)
|
||||
mod_timer(&priv->restart_timer,
|
||||
jiffies + (priv->restart_ms * HZ) / 1000);
|
||||
schedule_delayed_work(&priv->restart_work,
|
||||
msecs_to_jiffies(priv->restart_ms));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(can_bus_off);
|
||||
|
||||
@ -688,6 +696,7 @@ struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max)
|
||||
return NULL;
|
||||
|
||||
priv = netdev_priv(dev);
|
||||
priv->dev = dev;
|
||||
|
||||
if (echo_skb_max) {
|
||||
priv->echo_skb_max = echo_skb_max;
|
||||
@ -697,7 +706,7 @@ struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max)
|
||||
|
||||
priv->state = CAN_STATE_STOPPED;
|
||||
|
||||
init_timer(&priv->restart_timer);
|
||||
INIT_DELAYED_WORK(&priv->restart_work, can_restart_work);
|
||||
|
||||
return dev;
|
||||
}
|
||||
@ -778,8 +787,6 @@ int open_candev(struct net_device *dev)
|
||||
if (!netif_carrier_ok(dev))
|
||||
netif_carrier_on(dev);
|
||||
|
||||
setup_timer(&priv->restart_timer, can_restart, (unsigned long)dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(open_candev);
|
||||
@ -794,7 +801,7 @@ void close_candev(struct net_device *dev)
|
||||
{
|
||||
struct can_priv *priv = netdev_priv(dev);
|
||||
|
||||
del_timer_sync(&priv->restart_timer);
|
||||
cancel_delayed_work_sync(&priv->restart_work);
|
||||
can_flush_echo_skb(dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(close_candev);
|
||||
|
@ -453,25 +453,29 @@ static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv,
|
||||
static int bcmgenet_get_settings(struct net_device *dev,
|
||||
struct ethtool_cmd *cmd)
|
||||
{
|
||||
struct bcmgenet_priv *priv = netdev_priv(dev);
|
||||
|
||||
if (!netif_running(dev))
|
||||
return -EINVAL;
|
||||
|
||||
if (!dev->phydev)
|
||||
if (!priv->phydev)
|
||||
return -ENODEV;
|
||||
|
||||
return phy_ethtool_gset(dev->phydev, cmd);
|
||||
return phy_ethtool_gset(priv->phydev, cmd);
|
||||
}
|
||||
|
||||
static int bcmgenet_set_settings(struct net_device *dev,
|
||||
struct ethtool_cmd *cmd)
|
||||
{
|
||||
struct bcmgenet_priv *priv = netdev_priv(dev);
|
||||
|
||||
if (!netif_running(dev))
|
||||
return -EINVAL;
|
||||
|
||||
if (!dev->phydev)
|
||||
if (!priv->phydev)
|
||||
return -ENODEV;
|
||||
|
||||
return phy_ethtool_sset(dev->phydev, cmd);
|
||||
return phy_ethtool_sset(priv->phydev, cmd);
|
||||
}
|
||||
|
||||
static int bcmgenet_set_rx_csum(struct net_device *dev,
|
||||
@ -937,7 +941,7 @@ static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e)
|
||||
e->eee_active = p->eee_active;
|
||||
e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER);
|
||||
|
||||
return phy_ethtool_get_eee(dev->phydev, e);
|
||||
return phy_ethtool_get_eee(priv->phydev, e);
|
||||
}
|
||||
|
||||
static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
|
||||
@ -954,7 +958,7 @@ static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
|
||||
if (!p->eee_enabled) {
|
||||
bcmgenet_eee_enable_set(dev, false);
|
||||
} else {
|
||||
ret = phy_init_eee(dev->phydev, 0);
|
||||
ret = phy_init_eee(priv->phydev, 0);
|
||||
if (ret) {
|
||||
netif_err(priv, hw, dev, "EEE initialization failed\n");
|
||||
return ret;
|
||||
@ -964,12 +968,14 @@ static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
|
||||
bcmgenet_eee_enable_set(dev, true);
|
||||
}
|
||||
|
||||
return phy_ethtool_set_eee(dev->phydev, e);
|
||||
return phy_ethtool_set_eee(priv->phydev, e);
|
||||
}
|
||||
|
||||
static int bcmgenet_nway_reset(struct net_device *dev)
|
||||
{
|
||||
return genphy_restart_aneg(dev->phydev);
|
||||
struct bcmgenet_priv *priv = netdev_priv(dev);
|
||||
|
||||
return genphy_restart_aneg(priv->phydev);
|
||||
}
|
||||
|
||||
/* standard ethtool support functions. */
|
||||
@ -996,13 +1002,12 @@ static struct ethtool_ops bcmgenet_ethtool_ops = {
|
||||
static int bcmgenet_power_down(struct bcmgenet_priv *priv,
|
||||
enum bcmgenet_power_mode mode)
|
||||
{
|
||||
struct net_device *ndev = priv->dev;
|
||||
int ret = 0;
|
||||
u32 reg;
|
||||
|
||||
switch (mode) {
|
||||
case GENET_POWER_CABLE_SENSE:
|
||||
phy_detach(ndev->phydev);
|
||||
phy_detach(priv->phydev);
|
||||
break;
|
||||
|
||||
case GENET_POWER_WOL_MAGIC:
|
||||
@ -1063,6 +1068,7 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv,
|
||||
/* ioctl handle special commands that are not present in ethtool. */
|
||||
static int bcmgenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
|
||||
{
|
||||
struct bcmgenet_priv *priv = netdev_priv(dev);
|
||||
int val = 0;
|
||||
|
||||
if (!netif_running(dev))
|
||||
@ -1072,10 +1078,10 @@ static int bcmgenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
|
||||
case SIOCGMIIPHY:
|
||||
case SIOCGMIIREG:
|
||||
case SIOCSMIIREG:
|
||||
if (!dev->phydev)
|
||||
if (!priv->phydev)
|
||||
val = -ENODEV;
|
||||
else
|
||||
val = phy_mii_ioctl(dev->phydev, rq, cmd);
|
||||
val = phy_mii_ioctl(priv->phydev, rq, cmd);
|
||||
break;
|
||||
|
||||
default:
|
||||
@ -2458,7 +2464,6 @@ static void bcmgenet_irq_task(struct work_struct *work)
|
||||
{
|
||||
struct bcmgenet_priv *priv = container_of(
|
||||
work, struct bcmgenet_priv, bcmgenet_irq_work);
|
||||
struct net_device *ndev = priv->dev;
|
||||
|
||||
netif_dbg(priv, intr, priv->dev, "%s\n", __func__);
|
||||
|
||||
@ -2471,7 +2476,7 @@ static void bcmgenet_irq_task(struct work_struct *work)
|
||||
|
||||
/* Link UP/DOWN event */
|
||||
if (priv->irq0_stat & UMAC_IRQ_LINK_EVENT) {
|
||||
phy_mac_interrupt(ndev->phydev,
|
||||
phy_mac_interrupt(priv->phydev,
|
||||
!!(priv->irq0_stat & UMAC_IRQ_LINK_UP));
|
||||
priv->irq0_stat &= ~UMAC_IRQ_LINK_EVENT;
|
||||
}
|
||||
@ -2833,7 +2838,7 @@ static void bcmgenet_netif_start(struct net_device *dev)
|
||||
/* Monitor link interrupts now */
|
||||
bcmgenet_link_intr_enable(priv);
|
||||
|
||||
phy_start(dev->phydev);
|
||||
phy_start(priv->phydev);
|
||||
}
|
||||
|
||||
static int bcmgenet_open(struct net_device *dev)
|
||||
@ -2932,7 +2937,7 @@ static void bcmgenet_netif_stop(struct net_device *dev)
|
||||
struct bcmgenet_priv *priv = netdev_priv(dev);
|
||||
|
||||
netif_tx_stop_all_queues(dev);
|
||||
phy_stop(dev->phydev);
|
||||
phy_stop(priv->phydev);
|
||||
bcmgenet_intr_disable(priv);
|
||||
bcmgenet_disable_rx_napi(priv);
|
||||
bcmgenet_disable_tx_napi(priv);
|
||||
@ -2958,7 +2963,7 @@ static int bcmgenet_close(struct net_device *dev)
|
||||
bcmgenet_netif_stop(dev);
|
||||
|
||||
/* Really kill the PHY state machine and disconnect from it */
|
||||
phy_disconnect(dev->phydev);
|
||||
phy_disconnect(priv->phydev);
|
||||
|
||||
/* Disable MAC receive */
|
||||
umac_enable_set(priv, CMD_RX_EN, false);
|
||||
@ -3517,7 +3522,7 @@ static int bcmgenet_suspend(struct device *d)
|
||||
|
||||
bcmgenet_netif_stop(dev);
|
||||
|
||||
phy_suspend(dev->phydev);
|
||||
phy_suspend(priv->phydev);
|
||||
|
||||
netif_device_detach(dev);
|
||||
|
||||
@ -3581,7 +3586,7 @@ static int bcmgenet_resume(struct device *d)
|
||||
if (priv->wolopts)
|
||||
clk_disable_unprepare(priv->clk_wol);
|
||||
|
||||
phy_init_hw(dev->phydev);
|
||||
phy_init_hw(priv->phydev);
|
||||
/* Speed settings must be restored */
|
||||
bcmgenet_mii_config(priv->dev);
|
||||
|
||||
@ -3614,7 +3619,7 @@ static int bcmgenet_resume(struct device *d)
|
||||
|
||||
netif_device_attach(dev);
|
||||
|
||||
phy_resume(dev->phydev);
|
||||
phy_resume(priv->phydev);
|
||||
|
||||
if (priv->eee.eee_enabled)
|
||||
bcmgenet_eee_enable_set(dev, true);
|
||||
|
@ -597,6 +597,7 @@ struct bcmgenet_priv {
|
||||
|
||||
/* MDIO bus variables */
|
||||
wait_queue_head_t wq;
|
||||
struct phy_device *phydev;
|
||||
bool internal_phy;
|
||||
struct device_node *phy_dn;
|
||||
struct device_node *mdio_dn;
|
||||
|
@ -86,7 +86,7 @@ static int bcmgenet_mii_write(struct mii_bus *bus, int phy_id,
|
||||
void bcmgenet_mii_setup(struct net_device *dev)
|
||||
{
|
||||
struct bcmgenet_priv *priv = netdev_priv(dev);
|
||||
struct phy_device *phydev = dev->phydev;
|
||||
struct phy_device *phydev = priv->phydev;
|
||||
u32 reg, cmd_bits = 0;
|
||||
bool status_changed = false;
|
||||
|
||||
@ -183,9 +183,9 @@ void bcmgenet_mii_reset(struct net_device *dev)
|
||||
if (GENET_IS_V4(priv))
|
||||
return;
|
||||
|
||||
if (dev->phydev) {
|
||||
phy_init_hw(dev->phydev);
|
||||
phy_start_aneg(dev->phydev);
|
||||
if (priv->phydev) {
|
||||
phy_init_hw(priv->phydev);
|
||||
phy_start_aneg(priv->phydev);
|
||||
}
|
||||
}
|
||||
|
||||
@ -236,7 +236,6 @@ static void bcmgenet_internal_phy_setup(struct net_device *dev)
|
||||
|
||||
static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
|
||||
{
|
||||
struct net_device *ndev = priv->dev;
|
||||
u32 reg;
|
||||
|
||||
/* Speed settings are set in bcmgenet_mii_setup() */
|
||||
@ -245,14 +244,14 @@ static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
|
||||
bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL);
|
||||
|
||||
if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
|
||||
fixed_phy_set_link_update(ndev->phydev,
|
||||
fixed_phy_set_link_update(priv->phydev,
|
||||
bcmgenet_fixed_phy_link_update);
|
||||
}
|
||||
|
||||
int bcmgenet_mii_config(struct net_device *dev)
|
||||
{
|
||||
struct bcmgenet_priv *priv = netdev_priv(dev);
|
||||
struct phy_device *phydev = dev->phydev;
|
||||
struct phy_device *phydev = priv->phydev;
|
||||
struct device *kdev = &priv->pdev->dev;
|
||||
const char *phy_name = NULL;
|
||||
u32 id_mode_dis = 0;
|
||||
@ -303,7 +302,7 @@ int bcmgenet_mii_config(struct net_device *dev)
|
||||
* capabilities, use that knowledge to also configure the
|
||||
* Reverse MII interface correctly.
|
||||
*/
|
||||
if ((phydev->supported & PHY_BASIC_FEATURES) ==
|
||||
if ((priv->phydev->supported & PHY_BASIC_FEATURES) ==
|
||||
PHY_BASIC_FEATURES)
|
||||
port_ctrl = PORT_MODE_EXT_RVMII_25;
|
||||
else
|
||||
@ -372,7 +371,7 @@ int bcmgenet_mii_probe(struct net_device *dev)
|
||||
return -ENODEV;
|
||||
}
|
||||
} else {
|
||||
phydev = dev->phydev;
|
||||
phydev = priv->phydev;
|
||||
phydev->dev_flags = phy_flags;
|
||||
|
||||
ret = phy_connect_direct(dev, phydev, bcmgenet_mii_setup,
|
||||
@ -383,6 +382,8 @@ int bcmgenet_mii_probe(struct net_device *dev)
|
||||
}
|
||||
}
|
||||
|
||||
priv->phydev = phydev;
|
||||
|
||||
/* Configure port multiplexer based on what the probed PHY device since
|
||||
* reading the 'max-speed' property determines the maximum supported
|
||||
* PHY speed which is needed for bcmgenet_mii_config() to configure
|
||||
@ -390,7 +391,7 @@ int bcmgenet_mii_probe(struct net_device *dev)
|
||||
*/
|
||||
ret = bcmgenet_mii_config(dev);
|
||||
if (ret) {
|
||||
phy_disconnect(phydev);
|
||||
phy_disconnect(priv->phydev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -400,7 +401,7 @@ int bcmgenet_mii_probe(struct net_device *dev)
|
||||
* Ethernet MAC ISRs
|
||||
*/
|
||||
if (priv->internal_phy)
|
||||
phydev->irq = PHY_IGNORE_INTERRUPT;
|
||||
priv->phydev->irq = PHY_IGNORE_INTERRUPT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -605,6 +606,7 @@ static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv)
|
||||
|
||||
}
|
||||
|
||||
priv->phydev = phydev;
|
||||
priv->phy_interface = pd->phy_interface;
|
||||
|
||||
return 0;
|
||||
|
@ -18122,14 +18122,14 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
|
||||
|
||||
rtnl_lock();
|
||||
|
||||
/* We needn't recover from permanent error */
|
||||
if (state == pci_channel_io_frozen)
|
||||
tp->pcierr_recovery = true;
|
||||
|
||||
/* We probably don't have netdev yet */
|
||||
if (!netdev || !netif_running(netdev))
|
||||
goto done;
|
||||
|
||||
/* We needn't recover from permanent error */
|
||||
if (state == pci_channel_io_frozen)
|
||||
tp->pcierr_recovery = true;
|
||||
|
||||
tg3_phy_stop(tp);
|
||||
|
||||
tg3_netif_stop(tp);
|
||||
@ -18226,7 +18226,7 @@ static void tg3_io_resume(struct pci_dev *pdev)
|
||||
|
||||
rtnl_lock();
|
||||
|
||||
if (!netif_running(netdev))
|
||||
if (!netdev || !netif_running(netdev))
|
||||
goto done;
|
||||
|
||||
tg3_full_lock(tp, 0);
|
||||
|
@ -89,10 +89,10 @@ static struct platform_device_id fec_devtype[] = {
|
||||
.driver_data = 0,
|
||||
}, {
|
||||
.name = "imx25-fec",
|
||||
.driver_data = FEC_QUIRK_USE_GASKET | FEC_QUIRK_HAS_RACC,
|
||||
.driver_data = FEC_QUIRK_USE_GASKET,
|
||||
}, {
|
||||
.name = "imx27-fec",
|
||||
.driver_data = FEC_QUIRK_HAS_RACC,
|
||||
.driver_data = 0,
|
||||
}, {
|
||||
.name = "imx28-fec",
|
||||
.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
|
||||
@ -180,6 +180,7 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
|
||||
/* FEC receive acceleration */
|
||||
#define FEC_RACC_IPDIS (1 << 1)
|
||||
#define FEC_RACC_PRODIS (1 << 2)
|
||||
#define FEC_RACC_SHIFT16 BIT(7)
|
||||
#define FEC_RACC_OPTIONS (FEC_RACC_IPDIS | FEC_RACC_PRODIS)
|
||||
|
||||
/*
|
||||
@ -945,9 +946,11 @@ fec_restart(struct net_device *ndev)
|
||||
|
||||
#if !defined(CONFIG_M5272)
|
||||
if (fep->quirks & FEC_QUIRK_HAS_RACC) {
|
||||
/* set RX checksum */
|
||||
val = readl(fep->hwp + FEC_RACC);
|
||||
/* align IP header */
|
||||
val |= FEC_RACC_SHIFT16;
|
||||
if (fep->csum_flags & FLAG_RX_CSUM_ENABLED)
|
||||
/* set RX checksum */
|
||||
val |= FEC_RACC_OPTIONS;
|
||||
else
|
||||
val &= ~FEC_RACC_OPTIONS;
|
||||
@ -1428,6 +1431,12 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
|
||||
prefetch(skb->data - NET_IP_ALIGN);
|
||||
skb_put(skb, pkt_len - 4);
|
||||
data = skb->data;
|
||||
|
||||
#if !defined(CONFIG_M5272)
|
||||
if (fep->quirks & FEC_QUIRK_HAS_RACC)
|
||||
data = skb_pull_inline(skb, 2);
|
||||
#endif
|
||||
|
||||
if (!is_copybreak && need_swap)
|
||||
swap_buffer(data, pkt_len);
|
||||
|
||||
|
@ -32,6 +32,7 @@ enum can_mode {
|
||||
* CAN common private data
|
||||
*/
|
||||
struct can_priv {
|
||||
struct net_device *dev;
|
||||
struct can_device_stats can_stats;
|
||||
|
||||
struct can_bittiming bittiming, data_bittiming;
|
||||
@ -47,7 +48,7 @@ struct can_priv {
|
||||
u32 ctrlmode_static; /* static enabled options for driver/hardware */
|
||||
|
||||
int restart_ms;
|
||||
struct timer_list restart_timer;
|
||||
struct delayed_work restart_work;
|
||||
|
||||
int (*do_set_bittiming)(struct net_device *dev);
|
||||
int (*do_set_data_bittiming)(struct net_device *dev);
|
||||
|
@ -120,5 +120,5 @@ struct mfc_cache {
|
||||
struct rtmsg;
|
||||
int ipmr_get_route(struct net *net, struct sk_buff *skb,
|
||||
__be32 saddr, __be32 daddr,
|
||||
struct rtmsg *rtm, int nowait);
|
||||
struct rtmsg *rtm, int nowait, u32 portid);
|
||||
#endif
|
||||
|
@ -116,7 +116,7 @@ struct mfc6_cache {
|
||||
|
||||
struct rtmsg;
|
||||
extern int ip6mr_get_route(struct net *net, struct sk_buff *skb,
|
||||
struct rtmsg *rtm, int nowait);
|
||||
struct rtmsg *rtm, int nowait, u32 portid);
|
||||
|
||||
#ifdef CONFIG_IPV6_MROUTE
|
||||
extern struct sock *mroute6_socket(struct net *net, struct sk_buff *skb);
|
||||
|
@ -554,6 +554,9 @@ struct sctp_chunk {
|
||||
|
||||
atomic_t refcnt;
|
||||
|
||||
/* How many times this chunk have been sent, for prsctp RTX policy */
|
||||
int sent_count;
|
||||
|
||||
/* This is our link to the per-transport transmitted list. */
|
||||
struct list_head transmitted_list;
|
||||
|
||||
@ -603,16 +606,6 @@ struct sctp_chunk {
|
||||
/* This needs to be recoverable for SCTP_SEND_FAILED events. */
|
||||
struct sctp_sndrcvinfo sinfo;
|
||||
|
||||
/* We use this field to record param for prsctp policies,
|
||||
* for TTL policy, it is the time_to_drop of this chunk,
|
||||
* for RTX policy, it is the max_sent_count of this chunk,
|
||||
* for PRIO policy, it is the priority of this chunk.
|
||||
*/
|
||||
unsigned long prsctp_param;
|
||||
|
||||
/* How many times this chunk have been sent, for prsctp RTX policy */
|
||||
int sent_count;
|
||||
|
||||
/* Which association does this belong to? */
|
||||
struct sctp_association *asoc;
|
||||
|
||||
|
@ -2123,7 +2123,7 @@ static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
|
||||
|
||||
int ipmr_get_route(struct net *net, struct sk_buff *skb,
|
||||
__be32 saddr, __be32 daddr,
|
||||
struct rtmsg *rtm, int nowait)
|
||||
struct rtmsg *rtm, int nowait, u32 portid)
|
||||
{
|
||||
struct mfc_cache *cache;
|
||||
struct mr_table *mrt;
|
||||
@ -2168,6 +2168,7 @@ int ipmr_get_route(struct net *net, struct sk_buff *skb,
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
NETLINK_CB(skb2).portid = portid;
|
||||
skb_push(skb2, sizeof(struct iphdr));
|
||||
skb_reset_network_header(skb2);
|
||||
iph = ip_hdr(skb2);
|
||||
|
@ -2503,7 +2503,8 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src, u32 table_id,
|
||||
IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
|
||||
int err = ipmr_get_route(net, skb,
|
||||
fl4->saddr, fl4->daddr,
|
||||
r, nowait);
|
||||
r, nowait, portid);
|
||||
|
||||
if (err <= 0) {
|
||||
if (!nowait) {
|
||||
if (err == 0)
|
||||
|
@ -2329,10 +2329,9 @@ static void DBGUNDO(struct sock *sk, const char *msg)
|
||||
}
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
else if (sk->sk_family == AF_INET6) {
|
||||
struct ipv6_pinfo *np = inet6_sk(sk);
|
||||
pr_debug("Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n",
|
||||
msg,
|
||||
&np->daddr, ntohs(inet->inet_dport),
|
||||
&sk->sk_v6_daddr, ntohs(inet->inet_dport),
|
||||
tp->snd_cwnd, tcp_left_out(tp),
|
||||
tp->snd_ssthresh, tp->prior_ssthresh,
|
||||
tp->packets_out);
|
||||
|
@ -1966,12 +1966,14 @@ static int tcp_mtu_probe(struct sock *sk)
|
||||
len = 0;
|
||||
tcp_for_write_queue_from_safe(skb, next, sk) {
|
||||
copy = min_t(int, skb->len, probe_size - len);
|
||||
if (nskb->ip_summed)
|
||||
if (nskb->ip_summed) {
|
||||
skb_copy_bits(skb, 0, skb_put(nskb, copy), copy);
|
||||
else
|
||||
nskb->csum = skb_copy_and_csum_bits(skb, 0,
|
||||
skb_put(nskb, copy),
|
||||
copy, nskb->csum);
|
||||
} else {
|
||||
__wsum csum = skb_copy_and_csum_bits(skb, 0,
|
||||
skb_put(nskb, copy),
|
||||
copy, 0);
|
||||
nskb->csum = csum_block_add(nskb->csum, csum, len);
|
||||
}
|
||||
|
||||
if (skb->len <= copy) {
|
||||
/* We've eaten all the data from this skb.
|
||||
|
@ -648,7 +648,6 @@ static int ip6gre_xmit_other(struct sk_buff *skb, struct net_device *dev)
|
||||
encap_limit = t->parms.encap_limit;
|
||||
|
||||
memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
|
||||
fl6.flowi6_proto = skb->protocol;
|
||||
|
||||
err = gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM));
|
||||
if (err)
|
||||
|
@ -2285,8 +2285,8 @@ static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
|
||||
return 1;
|
||||
}
|
||||
|
||||
int ip6mr_get_route(struct net *net,
|
||||
struct sk_buff *skb, struct rtmsg *rtm, int nowait)
|
||||
int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm,
|
||||
int nowait, u32 portid)
|
||||
{
|
||||
int err;
|
||||
struct mr6_table *mrt;
|
||||
@ -2331,6 +2331,7 @@ int ip6mr_get_route(struct net *net,
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
NETLINK_CB(skb2).portid = portid;
|
||||
skb_reset_transport_header(skb2);
|
||||
|
||||
skb_put(skb2, sizeof(struct ipv6hdr));
|
||||
|
@ -3202,7 +3202,9 @@ static int rt6_fill_node(struct net *net,
|
||||
if (iif) {
|
||||
#ifdef CONFIG_IPV6_MROUTE
|
||||
if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) {
|
||||
int err = ip6mr_get_route(net, skb, rtm, nowait);
|
||||
int err = ip6mr_get_route(net, skb, rtm, nowait,
|
||||
portid);
|
||||
|
||||
if (err <= 0) {
|
||||
if (!nowait) {
|
||||
if (err == 0)
|
||||
|
@ -53,7 +53,7 @@ int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen, const void *dval)
|
||||
u32 *tlv = (u32 *)(skbdata);
|
||||
u16 totlen = nla_total_size(dlen); /*alignment + hdr */
|
||||
char *dptr = (char *)tlv + NLA_HDRLEN;
|
||||
u32 htlv = attrtype << 16 | dlen;
|
||||
u32 htlv = attrtype << 16 | (dlen + NLA_HDRLEN);
|
||||
|
||||
*tlv = htonl(htlv);
|
||||
memset(dptr, 0, totlen - NLA_HDRLEN);
|
||||
@ -627,7 +627,7 @@ static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
|
||||
struct tcf_ife_info *ife = to_ife(a);
|
||||
int action = ife->tcf_action;
|
||||
struct ifeheadr *ifehdr = (struct ifeheadr *)skb->data;
|
||||
u16 ifehdrln = ifehdr->metalen;
|
||||
int ifehdrln = (int)ifehdr->metalen;
|
||||
struct meta_tlvhdr *tlv = (struct meta_tlvhdr *)(ifehdr->tlv_data);
|
||||
|
||||
spin_lock(&ife->tcf_lock);
|
||||
@ -740,8 +740,6 @@ static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a,
|
||||
return TC_ACT_SHOT;
|
||||
}
|
||||
|
||||
iethh = eth_hdr(skb);
|
||||
|
||||
err = skb_cow_head(skb, hdrm);
|
||||
if (unlikely(err)) {
|
||||
ife->tcf_qstats.drops++;
|
||||
@ -752,6 +750,7 @@ static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a,
|
||||
if (!(at & AT_EGRESS))
|
||||
skb_push(skb, skb->dev->hard_header_len);
|
||||
|
||||
iethh = (struct ethhdr *)skb->data;
|
||||
__skb_push(skb, hdrm);
|
||||
memcpy(skb->data, iethh, skb->mac_len);
|
||||
skb_reset_mac_header(skb);
|
||||
|
@ -1153,6 +1153,7 @@ static struct sk_buff *qfq_dequeue(struct Qdisc *sch)
|
||||
if (!skb)
|
||||
return NULL;
|
||||
|
||||
qdisc_qstats_backlog_dec(sch, skb);
|
||||
sch->q.qlen--;
|
||||
qdisc_bstats_update(sch, skb);
|
||||
|
||||
@ -1256,6 +1257,7 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
}
|
||||
|
||||
bstats_update(&cl->bstats, skb);
|
||||
qdisc_qstats_backlog_inc(sch, skb);
|
||||
++sch->q.qlen;
|
||||
|
||||
agg = cl->agg;
|
||||
@ -1476,6 +1478,7 @@ static void qfq_reset_qdisc(struct Qdisc *sch)
|
||||
qdisc_reset(cl->qdisc);
|
||||
}
|
||||
}
|
||||
sch->qstats.backlog = 0;
|
||||
sch->q.qlen = 0;
|
||||
}
|
||||
|
||||
|
@ -400,6 +400,7 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
enqueue:
|
||||
ret = qdisc_enqueue(skb, child, to_free);
|
||||
if (likely(ret == NET_XMIT_SUCCESS)) {
|
||||
qdisc_qstats_backlog_inc(sch, skb);
|
||||
sch->q.qlen++;
|
||||
increment_qlen(skb, q);
|
||||
} else if (net_xmit_drop_count(ret)) {
|
||||
@ -428,6 +429,7 @@ static struct sk_buff *sfb_dequeue(struct Qdisc *sch)
|
||||
|
||||
if (skb) {
|
||||
qdisc_bstats_update(sch, skb);
|
||||
qdisc_qstats_backlog_dec(sch, skb);
|
||||
sch->q.qlen--;
|
||||
decrement_qlen(skb, q);
|
||||
}
|
||||
@ -450,6 +452,7 @@ static void sfb_reset(struct Qdisc *sch)
|
||||
struct sfb_sched_data *q = qdisc_priv(sch);
|
||||
|
||||
qdisc_reset(q->qdisc);
|
||||
sch->qstats.backlog = 0;
|
||||
sch->q.qlen = 0;
|
||||
q->slot = 0;
|
||||
q->double_buffering = false;
|
||||
|
@ -179,6 +179,11 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
|
||||
msg, msg->expires_at, jiffies);
|
||||
}
|
||||
|
||||
if (asoc->peer.prsctp_capable &&
|
||||
SCTP_PR_TTL_ENABLED(sinfo->sinfo_flags))
|
||||
msg->expires_at =
|
||||
jiffies + msecs_to_jiffies(sinfo->sinfo_timetolive);
|
||||
|
||||
/* This is the biggest possible DATA chunk that can fit into
|
||||
* the packet
|
||||
*/
|
||||
@ -335,7 +340,7 @@ errout:
|
||||
/* Check whether this message has expired. */
|
||||
int sctp_chunk_abandoned(struct sctp_chunk *chunk)
|
||||
{
|
||||
if (!chunk->asoc->prsctp_enable ||
|
||||
if (!chunk->asoc->peer.prsctp_capable ||
|
||||
!SCTP_PR_POLICY(chunk->sinfo.sinfo_flags)) {
|
||||
struct sctp_datamsg *msg = chunk->msg;
|
||||
|
||||
@ -349,14 +354,14 @@ int sctp_chunk_abandoned(struct sctp_chunk *chunk)
|
||||
}
|
||||
|
||||
if (SCTP_PR_TTL_ENABLED(chunk->sinfo.sinfo_flags) &&
|
||||
time_after(jiffies, chunk->prsctp_param)) {
|
||||
time_after(jiffies, chunk->msg->expires_at)) {
|
||||
if (chunk->sent_count)
|
||||
chunk->asoc->abandoned_sent[SCTP_PR_INDEX(TTL)]++;
|
||||
else
|
||||
chunk->asoc->abandoned_unsent[SCTP_PR_INDEX(TTL)]++;
|
||||
return 1;
|
||||
} else if (SCTP_PR_RTX_ENABLED(chunk->sinfo.sinfo_flags) &&
|
||||
chunk->sent_count > chunk->prsctp_param) {
|
||||
chunk->sent_count > chunk->sinfo.sinfo_timetolive) {
|
||||
chunk->asoc->abandoned_sent[SCTP_PR_INDEX(RTX)]++;
|
||||
return 1;
|
||||
}
|
||||
|
@ -326,7 +326,7 @@ int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk, gfp_t gfp)
|
||||
|
||||
sctp_chunk_hold(chunk);
|
||||
sctp_outq_tail_data(q, chunk);
|
||||
if (chunk->asoc->prsctp_enable &&
|
||||
if (chunk->asoc->peer.prsctp_capable &&
|
||||
SCTP_PR_PRIO_ENABLED(chunk->sinfo.sinfo_flags))
|
||||
chunk->asoc->sent_cnt_removable++;
|
||||
if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
|
||||
@ -383,7 +383,7 @@ static int sctp_prsctp_prune_sent(struct sctp_association *asoc,
|
||||
|
||||
list_for_each_entry_safe(chk, temp, queue, transmitted_list) {
|
||||
if (!SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) ||
|
||||
chk->prsctp_param <= sinfo->sinfo_timetolive)
|
||||
chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive)
|
||||
continue;
|
||||
|
||||
list_del_init(&chk->transmitted_list);
|
||||
@ -418,7 +418,7 @@ static int sctp_prsctp_prune_unsent(struct sctp_association *asoc,
|
||||
|
||||
list_for_each_entry_safe(chk, temp, queue, list) {
|
||||
if (!SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) ||
|
||||
chk->prsctp_param <= sinfo->sinfo_timetolive)
|
||||
chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive)
|
||||
continue;
|
||||
|
||||
list_del_init(&chk->list);
|
||||
@ -442,7 +442,7 @@ void sctp_prsctp_prune(struct sctp_association *asoc,
|
||||
{
|
||||
struct sctp_transport *transport;
|
||||
|
||||
if (!asoc->prsctp_enable || !asoc->sent_cnt_removable)
|
||||
if (!asoc->peer.prsctp_capable || !asoc->sent_cnt_removable)
|
||||
return;
|
||||
|
||||
msg_len = sctp_prsctp_prune_sent(asoc, sinfo,
|
||||
@ -1055,7 +1055,7 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
|
||||
|
||||
/* Mark as failed send. */
|
||||
sctp_chunk_fail(chunk, SCTP_ERROR_INV_STRM);
|
||||
if (asoc->prsctp_enable &&
|
||||
if (asoc->peer.prsctp_capable &&
|
||||
SCTP_PR_PRIO_ENABLED(chunk->sinfo.sinfo_flags))
|
||||
asoc->sent_cnt_removable--;
|
||||
sctp_chunk_free(chunk);
|
||||
@ -1347,7 +1347,7 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_chunk *chunk)
|
||||
tsn = ntohl(tchunk->subh.data_hdr->tsn);
|
||||
if (TSN_lte(tsn, ctsn)) {
|
||||
list_del_init(&tchunk->transmitted_list);
|
||||
if (asoc->prsctp_enable &&
|
||||
if (asoc->peer.prsctp_capable &&
|
||||
SCTP_PR_PRIO_ENABLED(chunk->sinfo.sinfo_flags))
|
||||
asoc->sent_cnt_removable--;
|
||||
sctp_chunk_free(tchunk);
|
||||
|
@ -272,28 +272,17 @@ out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int sctp_tsp_dump(struct sctp_transport *tsp, void *p)
|
||||
static int sctp_sock_dump(struct sock *sk, void *p)
|
||||
{
|
||||
struct sctp_endpoint *ep = tsp->asoc->ep;
|
||||
struct sctp_endpoint *ep = sctp_sk(sk)->ep;
|
||||
struct sctp_comm_param *commp = p;
|
||||
struct sock *sk = ep->base.sk;
|
||||
struct sk_buff *skb = commp->skb;
|
||||
struct netlink_callback *cb = commp->cb;
|
||||
const struct inet_diag_req_v2 *r = commp->r;
|
||||
struct sctp_association *assoc =
|
||||
list_entry(ep->asocs.next, struct sctp_association, asocs);
|
||||
struct sctp_association *assoc;
|
||||
int err = 0;
|
||||
|
||||
/* find the ep only once through the transports by this condition */
|
||||
if (tsp->asoc != assoc)
|
||||
goto out;
|
||||
|
||||
if (r->sdiag_family != AF_UNSPEC && sk->sk_family != r->sdiag_family)
|
||||
goto out;
|
||||
|
||||
lock_sock(sk);
|
||||
if (sk != assoc->base.sk)
|
||||
goto release;
|
||||
list_for_each_entry(assoc, &ep->asocs, asocs) {
|
||||
if (cb->args[4] < cb->args[1])
|
||||
goto next;
|
||||
@ -312,7 +301,7 @@ static int sctp_tsp_dump(struct sctp_transport *tsp, void *p)
|
||||
cb->nlh->nlmsg_seq,
|
||||
NLM_F_MULTI, cb->nlh) < 0) {
|
||||
cb->args[3] = 1;
|
||||
err = 2;
|
||||
err = 1;
|
||||
goto release;
|
||||
}
|
||||
cb->args[3] = 1;
|
||||
@ -321,7 +310,7 @@ static int sctp_tsp_dump(struct sctp_transport *tsp, void *p)
|
||||
sk_user_ns(NETLINK_CB(cb->skb).sk),
|
||||
NETLINK_CB(cb->skb).portid,
|
||||
cb->nlh->nlmsg_seq, 0, cb->nlh) < 0) {
|
||||
err = 2;
|
||||
err = 1;
|
||||
goto release;
|
||||
}
|
||||
next:
|
||||
@ -333,10 +322,35 @@ next:
|
||||
cb->args[4] = 0;
|
||||
release:
|
||||
release_sock(sk);
|
||||
sock_put(sk);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int sctp_get_sock(struct sctp_transport *tsp, void *p)
|
||||
{
|
||||
struct sctp_endpoint *ep = tsp->asoc->ep;
|
||||
struct sctp_comm_param *commp = p;
|
||||
struct sock *sk = ep->base.sk;
|
||||
struct netlink_callback *cb = commp->cb;
|
||||
const struct inet_diag_req_v2 *r = commp->r;
|
||||
struct sctp_association *assoc =
|
||||
list_entry(ep->asocs.next, struct sctp_association, asocs);
|
||||
|
||||
/* find the ep only once through the transports by this condition */
|
||||
if (tsp->asoc != assoc)
|
||||
goto out;
|
||||
|
||||
if (r->sdiag_family != AF_UNSPEC && sk->sk_family != r->sdiag_family)
|
||||
goto out;
|
||||
|
||||
sock_hold(sk);
|
||||
cb->args[5] = (long)sk;
|
||||
|
||||
return 1;
|
||||
|
||||
out:
|
||||
cb->args[2]++;
|
||||
return err;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sctp_ep_dump(struct sctp_endpoint *ep, void *p)
|
||||
@ -472,10 +486,18 @@ skip:
|
||||
* 2 : to record the transport pos of this time's traversal
|
||||
* 3 : to mark if we have dumped the ep info of the current asoc
|
||||
* 4 : to work as a temporary variable to traversal list
|
||||
* 5 : to save the sk we get from travelsing the tsp list.
|
||||
*/
|
||||
if (!(idiag_states & ~(TCPF_LISTEN | TCPF_CLOSE)))
|
||||
goto done;
|
||||
sctp_for_each_transport(sctp_tsp_dump, net, cb->args[2], &commp);
|
||||
|
||||
next:
|
||||
cb->args[5] = 0;
|
||||
sctp_for_each_transport(sctp_get_sock, net, cb->args[2], &commp);
|
||||
|
||||
if (cb->args[5] && !sctp_sock_dump((struct sock *)cb->args[5], &commp))
|
||||
goto next;
|
||||
|
||||
done:
|
||||
cb->args[1] = cb->args[4];
|
||||
cb->args[4] = 0;
|
||||
|
@ -706,20 +706,6 @@ nodata:
|
||||
return retval;
|
||||
}
|
||||
|
||||
static void sctp_set_prsctp_policy(struct sctp_chunk *chunk,
|
||||
const struct sctp_sndrcvinfo *sinfo)
|
||||
{
|
||||
if (!chunk->asoc->prsctp_enable)
|
||||
return;
|
||||
|
||||
if (SCTP_PR_TTL_ENABLED(sinfo->sinfo_flags))
|
||||
chunk->prsctp_param =
|
||||
jiffies + msecs_to_jiffies(sinfo->sinfo_timetolive);
|
||||
else if (SCTP_PR_RTX_ENABLED(sinfo->sinfo_flags) ||
|
||||
SCTP_PR_PRIO_ENABLED(sinfo->sinfo_flags))
|
||||
chunk->prsctp_param = sinfo->sinfo_timetolive;
|
||||
}
|
||||
|
||||
/* Make a DATA chunk for the given association from the provided
|
||||
* parameters. However, do not populate the data payload.
|
||||
*/
|
||||
@ -753,7 +739,6 @@ struct sctp_chunk *sctp_make_datafrag_empty(struct sctp_association *asoc,
|
||||
|
||||
retval->subh.data_hdr = sctp_addto_chunk(retval, sizeof(dp), &dp);
|
||||
memcpy(&retval->sinfo, sinfo, sizeof(struct sctp_sndrcvinfo));
|
||||
sctp_set_prsctp_policy(retval, sinfo);
|
||||
|
||||
nodata:
|
||||
return retval;
|
||||
|
@ -4469,17 +4469,21 @@ int sctp_transport_lookup_process(int (*cb)(struct sctp_transport *, void *),
|
||||
const union sctp_addr *paddr, void *p)
|
||||
{
|
||||
struct sctp_transport *transport;
|
||||
int err = 0;
|
||||
int err = -ENOENT;
|
||||
|
||||
rcu_read_lock();
|
||||
transport = sctp_addrs_lookup_transport(net, laddr, paddr);
|
||||
if (!transport || !sctp_transport_hold(transport))
|
||||
goto out;
|
||||
err = cb(transport, p);
|
||||
|
||||
sctp_association_hold(transport->asoc);
|
||||
sctp_transport_put(transport);
|
||||
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
err = cb(transport, p);
|
||||
sctp_association_put(transport->asoc);
|
||||
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sctp_transport_lookup_process);
|
||||
|
@ -465,6 +465,8 @@ void vsock_pending_work(struct work_struct *work)
|
||||
|
||||
if (vsock_is_pending(sk)) {
|
||||
vsock_remove_pending(listener, sk);
|
||||
|
||||
listener->sk_ack_backlog--;
|
||||
} else if (!vsk->rejected) {
|
||||
/* We are not on the pending list and accept() did not reject
|
||||
* us, so we must have been accepted by our user process. We
|
||||
@ -475,8 +477,6 @@ void vsock_pending_work(struct work_struct *work)
|
||||
goto out;
|
||||
}
|
||||
|
||||
listener->sk_ack_backlog--;
|
||||
|
||||
/* We need to remove ourself from the global connected sockets list so
|
||||
* incoming packets can't find this socket, and to reduce the reference
|
||||
* count.
|
||||
@ -2010,5 +2010,5 @@ EXPORT_SYMBOL_GPL(vsock_core_get_transport);
|
||||
|
||||
MODULE_AUTHOR("VMware, Inc.");
|
||||
MODULE_DESCRIPTION("VMware Virtual Socket Family");
|
||||
MODULE_VERSION("1.0.1.0-k");
|
||||
MODULE_VERSION("1.0.2.0-k");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
|
Loading…
Reference in New Issue
Block a user