mirror of
https://github.com/torvalds/linux.git
synced 2024-11-26 22:21:42 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: "The pull requests are getting smaller, that's progress I suppose :-) 1) Fix infinite loop in CIPSO option parsing, from Yujuan Qi. 2) Fix remote checksum handling in VXLAN and GUE tunneling drivers, from Koichiro Den. 3) Missing u64_stats_init() calls in several drivers, from Florian Fainelli. 4) TCP can set the congestion window to an invalid ssthresh value after congestion window reductions, from Yuchung Cheng. 5) Fix BPF jit branch generation on s390, from Daniel Borkmann. 6) Correct MIPS ebpf JIT merge, from David Daney. 7) Correct byte order test in BPF test_verifier.c, from Daniel Borkmann. 8) Fix various crashes and leaks in ASIX driver, from Dean Jenkins. 9) Handle SCTP checksums properly in mlx4 driver, from Davide Caratti. 10) We can potentially enter tcp_connect() with a cached route already, due to fastopen, so we have to explicitly invalidate it. 11) skb_warn_bad_offload() can bark in legitimate situations, fix from Willem de Bruijn" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (52 commits) net: avoid skb_warn_bad_offload false positives on UFO qmi_wwan: fix NULL deref on disconnect ppp: fix xmit recursion detection on ppp channels rds: Reintroduce statistics counting tcp: fastopen: tcp_connect() must refresh the route net: sched: set xt_tgchk_param par.net properly in ipt_init_target net: dsa: mediatek: add adjust link support for user ports net/mlx4_en: don't set CHECKSUM_COMPLETE on SCTP packets qed: Fix a memory allocation failure test in 'qed_mcp_cmd_init()' hysdn: fix to a race condition in put_log_buffer s390/qeth: fix L3 next-hop in xmit qeth hdr asix: Fix small memory leak in ax88772_unbind() asix: Ensure asix_rx_fixup_info members are all reset asix: Add rx->ax_skb = NULL after usbnet_skb_return() bpf: fix selftest/bpf/test_pkt_md_access on s390x netvsc: fix race on sub channel creation bpf: fix byte order test in test_verifier xgene: Always get clk source, but ignore if it's missing for SGMII ports MIPS: Add missing file for eBPF JIT. bpf, s390: fix build for libbpf and selftest suite ...
This commit is contained in:
commit
4530cca198
1950
arch/mips/net/ebpf_jit.c
Normal file
1950
arch/mips/net/ebpf_jit.c
Normal file
File diff suppressed because it is too large
Load Diff
@ -1253,7 +1253,8 @@ static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp)
|
||||
insn_count = bpf_jit_insn(jit, fp, i);
|
||||
if (insn_count < 0)
|
||||
return -1;
|
||||
jit->addrs[i + 1] = jit->prg; /* Next instruction address */
|
||||
/* Next instruction address */
|
||||
jit->addrs[i + insn_count] = jit->prg;
|
||||
}
|
||||
bpf_jit_epilogue(jit);
|
||||
|
||||
|
@ -44,7 +44,6 @@ struct procdata {
|
||||
char log_name[15]; /* log filename */
|
||||
struct log_data *log_head, *log_tail; /* head and tail for queue */
|
||||
int if_used; /* open count for interface */
|
||||
int volatile del_lock; /* lock for delete operations */
|
||||
unsigned char logtmp[LOG_MAX_LINELEN];
|
||||
wait_queue_head_t rd_queue;
|
||||
};
|
||||
@ -102,7 +101,6 @@ put_log_buffer(hysdn_card *card, char *cp)
|
||||
{
|
||||
struct log_data *ib;
|
||||
struct procdata *pd = card->proclog;
|
||||
int i;
|
||||
unsigned long flags;
|
||||
|
||||
if (!pd)
|
||||
@ -126,21 +124,21 @@ put_log_buffer(hysdn_card *card, char *cp)
|
||||
else
|
||||
pd->log_tail->next = ib; /* follows existing messages */
|
||||
pd->log_tail = ib; /* new tail */
|
||||
i = pd->del_lock++; /* get lock state */
|
||||
spin_unlock_irqrestore(&card->hysdn_lock, flags);
|
||||
|
||||
/* delete old entrys */
|
||||
if (!i)
|
||||
while (pd->log_head->next) {
|
||||
if ((pd->log_head->usage_cnt <= 0) &&
|
||||
(pd->log_head->next->usage_cnt <= 0)) {
|
||||
ib = pd->log_head;
|
||||
pd->log_head = pd->log_head->next;
|
||||
kfree(ib);
|
||||
} else
|
||||
break;
|
||||
} /* pd->log_head->next */
|
||||
pd->del_lock--; /* release lock level */
|
||||
while (pd->log_head->next) {
|
||||
if ((pd->log_head->usage_cnt <= 0) &&
|
||||
(pd->log_head->next->usage_cnt <= 0)) {
|
||||
ib = pd->log_head;
|
||||
pd->log_head = pd->log_head->next;
|
||||
kfree(ib);
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
} /* pd->log_head->next */
|
||||
|
||||
spin_unlock_irqrestore(&card->hysdn_lock, flags);
|
||||
|
||||
wake_up_interruptible(&(pd->rd_queue)); /* announce new entry */
|
||||
} /* put_log_buffer */
|
||||
|
||||
|
@ -625,6 +625,44 @@ static void mt7530_adjust_link(struct dsa_switch *ds, int port,
|
||||
* all finished.
|
||||
*/
|
||||
mt7623_pad_clk_setup(ds);
|
||||
} else {
|
||||
u16 lcl_adv = 0, rmt_adv = 0;
|
||||
u8 flowctrl;
|
||||
u32 mcr = PMCR_USERP_LINK | PMCR_FORCE_MODE;
|
||||
|
||||
switch (phydev->speed) {
|
||||
case SPEED_1000:
|
||||
mcr |= PMCR_FORCE_SPEED_1000;
|
||||
break;
|
||||
case SPEED_100:
|
||||
mcr |= PMCR_FORCE_SPEED_100;
|
||||
break;
|
||||
};
|
||||
|
||||
if (phydev->link)
|
||||
mcr |= PMCR_FORCE_LNK;
|
||||
|
||||
if (phydev->duplex) {
|
||||
mcr |= PMCR_FORCE_FDX;
|
||||
|
||||
if (phydev->pause)
|
||||
rmt_adv = LPA_PAUSE_CAP;
|
||||
if (phydev->asym_pause)
|
||||
rmt_adv |= LPA_PAUSE_ASYM;
|
||||
|
||||
if (phydev->advertising & ADVERTISED_Pause)
|
||||
lcl_adv |= ADVERTISE_PAUSE_CAP;
|
||||
if (phydev->advertising & ADVERTISED_Asym_Pause)
|
||||
lcl_adv |= ADVERTISE_PAUSE_ASYM;
|
||||
|
||||
flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
|
||||
|
||||
if (flowctrl & FLOW_CTRL_TX)
|
||||
mcr |= PMCR_TX_FC_EN;
|
||||
if (flowctrl & FLOW_CTRL_RX)
|
||||
mcr |= PMCR_RX_FC_EN;
|
||||
}
|
||||
mt7530_write(priv, MT7530_PMCR_P(port), mcr);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -151,6 +151,7 @@ enum mt7530_stp_state {
|
||||
#define PMCR_TX_FC_EN BIT(5)
|
||||
#define PMCR_RX_FC_EN BIT(4)
|
||||
#define PMCR_FORCE_SPEED_1000 BIT(3)
|
||||
#define PMCR_FORCE_SPEED_100 BIT(2)
|
||||
#define PMCR_FORCE_FDX BIT(1)
|
||||
#define PMCR_FORCE_LNK BIT(0)
|
||||
#define PMCR_COMMON_LINK (PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | \
|
||||
|
@ -1785,9 +1785,9 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
|
||||
|
||||
xgene_enet_gpiod_get(pdata);
|
||||
|
||||
if (pdata->phy_mode != PHY_INTERFACE_MODE_SGMII) {
|
||||
pdata->clk = devm_clk_get(&pdev->dev, NULL);
|
||||
if (IS_ERR(pdata->clk)) {
|
||||
pdata->clk = devm_clk_get(&pdev->dev, NULL);
|
||||
if (IS_ERR(pdata->clk)) {
|
||||
if (pdata->phy_mode != PHY_INTERFACE_MODE_SGMII) {
|
||||
/* Abort if the clock is defined but couldn't be
|
||||
* retrived. Always abort if the clock is missing on
|
||||
* DT system as the driver can't cope with this case.
|
||||
|
@ -2368,6 +2368,7 @@ static int b44_init_one(struct ssb_device *sdev,
|
||||
bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
|
||||
|
||||
spin_lock_init(&bp->lock);
|
||||
u64_stats_init(&bp->hw_stats.syncp);
|
||||
|
||||
bp->rx_pending = B44_DEF_RX_RING_PENDING;
|
||||
bp->tx_pending = B44_DEF_TX_RING_PENDING;
|
||||
|
@ -111,6 +111,7 @@ static void send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
|
||||
static void send_request_unmap(struct ibmvnic_adapter *, u8);
|
||||
static void send_login(struct ibmvnic_adapter *adapter);
|
||||
static void send_cap_queries(struct ibmvnic_adapter *adapter);
|
||||
static int init_sub_crqs(struct ibmvnic_adapter *);
|
||||
static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
|
||||
static int ibmvnic_init(struct ibmvnic_adapter *);
|
||||
static void release_crq_queue(struct ibmvnic_adapter *);
|
||||
@ -651,6 +652,7 @@ static int ibmvnic_login(struct net_device *netdev)
|
||||
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
|
||||
unsigned long timeout = msecs_to_jiffies(30000);
|
||||
struct device *dev = &adapter->vdev->dev;
|
||||
int rc;
|
||||
|
||||
do {
|
||||
if (adapter->renegotiate) {
|
||||
@ -664,6 +666,18 @@ static int ibmvnic_login(struct net_device *netdev)
|
||||
dev_err(dev, "Capabilities query timeout\n");
|
||||
return -1;
|
||||
}
|
||||
rc = init_sub_crqs(adapter);
|
||||
if (rc) {
|
||||
dev_err(dev,
|
||||
"Initialization of SCRQ's failed\n");
|
||||
return -1;
|
||||
}
|
||||
rc = init_sub_crq_irqs(adapter);
|
||||
if (rc) {
|
||||
dev_err(dev,
|
||||
"Initialization of SCRQ's irqs failed\n");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
reinit_completion(&adapter->init_done);
|
||||
@ -3004,7 +3018,6 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
|
||||
*req_value,
|
||||
(long int)be64_to_cpu(crq->request_capability_rsp.
|
||||
number), name);
|
||||
release_sub_crqs(adapter);
|
||||
*req_value = be64_to_cpu(crq->request_capability_rsp.number);
|
||||
ibmvnic_send_req_caps(adapter, 1);
|
||||
return;
|
||||
|
@ -1113,6 +1113,8 @@ int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
|
||||
if (!tx_ring->tx_bi)
|
||||
goto err;
|
||||
|
||||
u64_stats_init(&tx_ring->syncp);
|
||||
|
||||
/* round up to nearest 4K */
|
||||
tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
|
||||
/* add u32 for head writeback, align after this takes care of
|
||||
|
@ -2988,6 +2988,8 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring)
|
||||
if (!tx_ring->tx_buffer_info)
|
||||
goto err;
|
||||
|
||||
u64_stats_init(&tx_ring->syncp);
|
||||
|
||||
/* round up to nearest 4K */
|
||||
tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
|
||||
tx_ring->size = ALIGN(tx_ring->size, 4096);
|
||||
@ -3046,6 +3048,8 @@ int ixgbevf_setup_rx_resources(struct ixgbevf_ring *rx_ring)
|
||||
if (!rx_ring->rx_buffer_info)
|
||||
goto err;
|
||||
|
||||
u64_stats_init(&rx_ring->syncp);
|
||||
|
||||
/* Round up to nearest 4K */
|
||||
rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
|
||||
rx_ring->size = ALIGN(rx_ring->size, 4096);
|
||||
|
@ -223,6 +223,7 @@ static void mlx4_en_get_wol(struct net_device *netdev,
|
||||
struct ethtool_wolinfo *wol)
|
||||
{
|
||||
struct mlx4_en_priv *priv = netdev_priv(netdev);
|
||||
struct mlx4_caps *caps = &priv->mdev->dev->caps;
|
||||
int err = 0;
|
||||
u64 config = 0;
|
||||
u64 mask;
|
||||
@ -235,24 +236,24 @@ static void mlx4_en_get_wol(struct net_device *netdev,
|
||||
mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 :
|
||||
MLX4_DEV_CAP_FLAG_WOL_PORT2;
|
||||
|
||||
if (!(priv->mdev->dev->caps.flags & mask)) {
|
||||
if (!(caps->flags & mask)) {
|
||||
wol->supported = 0;
|
||||
wol->wolopts = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
if (caps->wol_port[priv->port])
|
||||
wol->supported = WAKE_MAGIC;
|
||||
else
|
||||
wol->supported = 0;
|
||||
|
||||
err = mlx4_wol_read(priv->mdev->dev, &config, priv->port);
|
||||
if (err) {
|
||||
en_err(priv, "Failed to get WoL information\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (config & MLX4_EN_WOL_MAGIC)
|
||||
wol->supported = WAKE_MAGIC;
|
||||
else
|
||||
wol->supported = 0;
|
||||
|
||||
if (config & MLX4_EN_WOL_ENABLED)
|
||||
if ((config & MLX4_EN_WOL_ENABLED) && (config & MLX4_EN_WOL_MAGIC))
|
||||
wol->wolopts = WAKE_MAGIC;
|
||||
else
|
||||
wol->wolopts = 0;
|
||||
|
@ -574,16 +574,21 @@ static inline __wsum get_fixed_vlan_csum(__wsum hw_checksum,
|
||||
* header, the HW adds it. To address that, we are subtracting the pseudo
|
||||
* header checksum from the checksum value provided by the HW.
|
||||
*/
|
||||
static void get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb,
|
||||
struct iphdr *iph)
|
||||
static int get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb,
|
||||
struct iphdr *iph)
|
||||
{
|
||||
__u16 length_for_csum = 0;
|
||||
__wsum csum_pseudo_header = 0;
|
||||
__u8 ipproto = iph->protocol;
|
||||
|
||||
if (unlikely(ipproto == IPPROTO_SCTP))
|
||||
return -1;
|
||||
|
||||
length_for_csum = (be16_to_cpu(iph->tot_len) - (iph->ihl << 2));
|
||||
csum_pseudo_header = csum_tcpudp_nofold(iph->saddr, iph->daddr,
|
||||
length_for_csum, iph->protocol, 0);
|
||||
length_for_csum, ipproto, 0);
|
||||
skb->csum = csum_sub(hw_checksum, csum_pseudo_header);
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
@ -594,17 +599,20 @@ static void get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb,
|
||||
static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
|
||||
struct ipv6hdr *ipv6h)
|
||||
{
|
||||
__u8 nexthdr = ipv6h->nexthdr;
|
||||
__wsum csum_pseudo_hdr = 0;
|
||||
|
||||
if (unlikely(ipv6h->nexthdr == IPPROTO_FRAGMENT ||
|
||||
ipv6h->nexthdr == IPPROTO_HOPOPTS))
|
||||
if (unlikely(nexthdr == IPPROTO_FRAGMENT ||
|
||||
nexthdr == IPPROTO_HOPOPTS ||
|
||||
nexthdr == IPPROTO_SCTP))
|
||||
return -1;
|
||||
hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(ipv6h->nexthdr));
|
||||
hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(nexthdr));
|
||||
|
||||
csum_pseudo_hdr = csum_partial(&ipv6h->saddr,
|
||||
sizeof(ipv6h->saddr) + sizeof(ipv6h->daddr), 0);
|
||||
csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ipv6h->payload_len);
|
||||
csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ntohs(ipv6h->nexthdr));
|
||||
csum_pseudo_hdr = csum_add(csum_pseudo_hdr,
|
||||
(__force __wsum)htons(nexthdr));
|
||||
|
||||
skb->csum = csum_sub(hw_checksum, csum_pseudo_hdr);
|
||||
skb->csum = csum_add(skb->csum, csum_partial(ipv6h, sizeof(struct ipv6hdr), 0));
|
||||
@ -627,11 +635,10 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
|
||||
}
|
||||
|
||||
if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4))
|
||||
get_fixed_ipv4_csum(hw_checksum, skb, hdr);
|
||||
return get_fixed_ipv4_csum(hw_checksum, skb, hdr);
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
else if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6))
|
||||
if (unlikely(get_fixed_ipv6_csum(hw_checksum, skb, hdr)))
|
||||
return -1;
|
||||
if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6))
|
||||
return get_fixed_ipv6_csum(hw_checksum, skb, hdr);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
@ -159,8 +159,9 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
|
||||
[32] = "Loopback source checks support",
|
||||
[33] = "RoCEv2 support",
|
||||
[34] = "DMFS Sniffer support (UC & MC)",
|
||||
[35] = "QinQ VST mode support",
|
||||
[36] = "sl to vl mapping table change event support"
|
||||
[35] = "Diag counters per port",
|
||||
[36] = "QinQ VST mode support",
|
||||
[37] = "sl to vl mapping table change event support",
|
||||
};
|
||||
int i;
|
||||
|
||||
@ -764,6 +765,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
|
||||
#define QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET 0x3e
|
||||
#define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f
|
||||
#define QUERY_DEV_CAP_EXT_FLAGS_OFFSET 0x40
|
||||
#define QUERY_DEV_CAP_WOL_OFFSET 0x43
|
||||
#define QUERY_DEV_CAP_FLAGS_OFFSET 0x44
|
||||
#define QUERY_DEV_CAP_RSVD_UAR_OFFSET 0x48
|
||||
#define QUERY_DEV_CAP_UAR_SZ_OFFSET 0x49
|
||||
@ -920,6 +922,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
|
||||
MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
|
||||
MLX4_GET(flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET);
|
||||
dev_cap->flags = flags | (u64)ext_flags << 32;
|
||||
MLX4_GET(field, outbox, QUERY_DEV_CAP_WOL_OFFSET);
|
||||
dev_cap->wol_port[1] = !!(field & 0x20);
|
||||
dev_cap->wol_port[2] = !!(field & 0x40);
|
||||
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET);
|
||||
dev_cap->reserved_uars = field >> 4;
|
||||
MLX4_GET(field, outbox, QUERY_DEV_CAP_UAR_SZ_OFFSET);
|
||||
|
@ -129,6 +129,7 @@ struct mlx4_dev_cap {
|
||||
u32 dmfs_high_rate_qpn_range;
|
||||
struct mlx4_rate_limit_caps rl_caps;
|
||||
struct mlx4_port_cap port_cap[MLX4_MAX_PORTS + 1];
|
||||
bool wol_port[MLX4_MAX_PORTS + 1];
|
||||
};
|
||||
|
||||
struct mlx4_func_cap {
|
||||
|
@ -424,6 +424,8 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
|
||||
dev->caps.stat_rate_support = dev_cap->stat_rate_support;
|
||||
dev->caps.max_gso_sz = dev_cap->max_gso_sz;
|
||||
dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz;
|
||||
dev->caps.wol_port[1] = dev_cap->wol_port[1];
|
||||
dev->caps.wol_port[2] = dev_cap->wol_port[2];
|
||||
|
||||
/* Save uar page shift */
|
||||
if (!mlx4_is_slave(dev)) {
|
||||
|
@ -626,8 +626,8 @@ static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
|
||||
|
||||
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
|
||||
orig_dev);
|
||||
if (WARN_ON(!bridge_port))
|
||||
return -EINVAL;
|
||||
if (!bridge_port)
|
||||
return 0;
|
||||
|
||||
err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
|
||||
MLXSW_SP_FLOOD_TYPE_UC,
|
||||
@ -711,8 +711,8 @@ static int mlxsw_sp_port_attr_mc_router_set(struct mlxsw_sp_port *mlxsw_sp_port,
|
||||
|
||||
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
|
||||
orig_dev);
|
||||
if (WARN_ON(!bridge_port))
|
||||
return -EINVAL;
|
||||
if (!bridge_port)
|
||||
return 0;
|
||||
|
||||
if (!bridge_port->bridge_device->multicast_enabled)
|
||||
return 0;
|
||||
@ -1283,15 +1283,15 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
|
||||
return 0;
|
||||
|
||||
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
|
||||
if (WARN_ON(!bridge_port))
|
||||
return -EINVAL;
|
||||
if (!bridge_port)
|
||||
return 0;
|
||||
|
||||
bridge_device = bridge_port->bridge_device;
|
||||
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
|
||||
bridge_device,
|
||||
mdb->vid);
|
||||
if (WARN_ON(!mlxsw_sp_port_vlan))
|
||||
return -EINVAL;
|
||||
if (!mlxsw_sp_port_vlan)
|
||||
return 0;
|
||||
|
||||
fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
|
||||
|
||||
@ -1407,15 +1407,15 @@ static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
|
||||
int err = 0;
|
||||
|
||||
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
|
||||
if (WARN_ON(!bridge_port))
|
||||
return -EINVAL;
|
||||
if (!bridge_port)
|
||||
return 0;
|
||||
|
||||
bridge_device = bridge_port->bridge_device;
|
||||
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
|
||||
bridge_device,
|
||||
mdb->vid);
|
||||
if (WARN_ON(!mlxsw_sp_port_vlan))
|
||||
return -EINVAL;
|
||||
if (!mlxsw_sp_port_vlan)
|
||||
return 0;
|
||||
|
||||
fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
|
||||
|
||||
@ -1974,6 +1974,17 @@ static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
|
||||
|
||||
}
|
||||
|
||||
static void mlxsw_sp_mids_fini(struct mlxsw_sp *mlxsw_sp)
|
||||
{
|
||||
struct mlxsw_sp_mid *mid, *tmp;
|
||||
|
||||
list_for_each_entry_safe(mid, tmp, &mlxsw_sp->bridge->mids_list, list) {
|
||||
list_del(&mid->list);
|
||||
clear_bit(mid->mid, mlxsw_sp->bridge->mids_bitmap);
|
||||
kfree(mid);
|
||||
}
|
||||
}
|
||||
|
||||
int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
|
||||
{
|
||||
struct mlxsw_sp_bridge *bridge;
|
||||
@ -1996,7 +2007,7 @@ int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
|
||||
void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
|
||||
{
|
||||
mlxsw_sp_fdb_fini(mlxsw_sp);
|
||||
WARN_ON(!list_empty(&mlxsw_sp->bridge->mids_list));
|
||||
mlxsw_sp_mids_fini(mlxsw_sp);
|
||||
WARN_ON(!list_empty(&mlxsw_sp->bridge->bridges_list));
|
||||
kfree(mlxsw_sp->bridge);
|
||||
}
|
||||
|
@ -513,6 +513,7 @@ nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring,
|
||||
tx_ring->idx = idx;
|
||||
tx_ring->r_vec = r_vec;
|
||||
tx_ring->is_xdp = is_xdp;
|
||||
u64_stats_init(&tx_ring->r_vec->tx_sync);
|
||||
|
||||
tx_ring->qcidx = tx_ring->idx * nn->stride_tx;
|
||||
tx_ring->qcp_q = nn->tx_bar + NFP_QCP_QUEUE_OFF(tx_ring->qcidx);
|
||||
@ -532,6 +533,7 @@ nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring,
|
||||
|
||||
rx_ring->idx = idx;
|
||||
rx_ring->r_vec = r_vec;
|
||||
u64_stats_init(&rx_ring->r_vec->rx_sync);
|
||||
|
||||
rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx;
|
||||
rx_ring->qcp_fl = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->fl_qcidx);
|
||||
|
@ -253,7 +253,7 @@ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
|
||||
size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
|
||||
p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL);
|
||||
p_info->mfw_mb_shadow = kzalloc(size, GFP_KERNEL);
|
||||
if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
|
||||
if (!p_info->mfw_mb_cur || !p_info->mfw_mb_shadow)
|
||||
goto err;
|
||||
|
||||
return 0;
|
||||
|
@ -31,9 +31,18 @@
|
||||
|
||||
#include "cpts.h"
|
||||
|
||||
#define CPTS_SKB_TX_WORK_TIMEOUT 1 /* jiffies */
|
||||
|
||||
struct cpts_skb_cb_data {
|
||||
unsigned long tmo;
|
||||
};
|
||||
|
||||
#define cpts_read32(c, r) readl_relaxed(&c->reg->r)
|
||||
#define cpts_write32(c, v, r) writel_relaxed(v, &c->reg->r)
|
||||
|
||||
static int cpts_match(struct sk_buff *skb, unsigned int ptp_class,
|
||||
u16 ts_seqid, u8 ts_msgtype);
|
||||
|
||||
static int event_expired(struct cpts_event *event)
|
||||
{
|
||||
return time_after(jiffies, event->tmo);
|
||||
@ -77,6 +86,47 @@ static int cpts_purge_events(struct cpts *cpts)
|
||||
return removed ? 0 : -1;
|
||||
}
|
||||
|
||||
static bool cpts_match_tx_ts(struct cpts *cpts, struct cpts_event *event)
|
||||
{
|
||||
struct sk_buff *skb, *tmp;
|
||||
u16 seqid;
|
||||
u8 mtype;
|
||||
bool found = false;
|
||||
|
||||
mtype = (event->high >> MESSAGE_TYPE_SHIFT) & MESSAGE_TYPE_MASK;
|
||||
seqid = (event->high >> SEQUENCE_ID_SHIFT) & SEQUENCE_ID_MASK;
|
||||
|
||||
/* no need to grab txq.lock as access is always done under cpts->lock */
|
||||
skb_queue_walk_safe(&cpts->txq, skb, tmp) {
|
||||
struct skb_shared_hwtstamps ssh;
|
||||
unsigned int class = ptp_classify_raw(skb);
|
||||
struct cpts_skb_cb_data *skb_cb =
|
||||
(struct cpts_skb_cb_data *)skb->cb;
|
||||
|
||||
if (cpts_match(skb, class, seqid, mtype)) {
|
||||
u64 ns = timecounter_cyc2time(&cpts->tc, event->low);
|
||||
|
||||
memset(&ssh, 0, sizeof(ssh));
|
||||
ssh.hwtstamp = ns_to_ktime(ns);
|
||||
skb_tstamp_tx(skb, &ssh);
|
||||
found = true;
|
||||
__skb_unlink(skb, &cpts->txq);
|
||||
dev_consume_skb_any(skb);
|
||||
dev_dbg(cpts->dev, "match tx timestamp mtype %u seqid %04x\n",
|
||||
mtype, seqid);
|
||||
} else if (time_after(jiffies, skb_cb->tmo)) {
|
||||
/* timeout any expired skbs over 1s */
|
||||
dev_dbg(cpts->dev,
|
||||
"expiring tx timestamp mtype %u seqid %04x\n",
|
||||
mtype, seqid);
|
||||
__skb_unlink(skb, &cpts->txq);
|
||||
dev_consume_skb_any(skb);
|
||||
}
|
||||
}
|
||||
|
||||
return found;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns zero if matching event type was found.
|
||||
*/
|
||||
@ -101,9 +151,15 @@ static int cpts_fifo_read(struct cpts *cpts, int match)
|
||||
event->low = lo;
|
||||
type = event_type(event);
|
||||
switch (type) {
|
||||
case CPTS_EV_TX:
|
||||
if (cpts_match_tx_ts(cpts, event)) {
|
||||
/* if the new event matches an existing skb,
|
||||
* then don't queue it
|
||||
*/
|
||||
break;
|
||||
}
|
||||
case CPTS_EV_PUSH:
|
||||
case CPTS_EV_RX:
|
||||
case CPTS_EV_TX:
|
||||
list_del_init(&event->list);
|
||||
list_add_tail(&event->list, &cpts->events);
|
||||
break;
|
||||
@ -224,6 +280,24 @@ static int cpts_ptp_enable(struct ptp_clock_info *ptp,
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static long cpts_overflow_check(struct ptp_clock_info *ptp)
|
||||
{
|
||||
struct cpts *cpts = container_of(ptp, struct cpts, info);
|
||||
unsigned long delay = cpts->ov_check_period;
|
||||
struct timespec64 ts;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&cpts->lock, flags);
|
||||
ts = ns_to_timespec64(timecounter_read(&cpts->tc));
|
||||
|
||||
if (!skb_queue_empty(&cpts->txq))
|
||||
delay = CPTS_SKB_TX_WORK_TIMEOUT;
|
||||
spin_unlock_irqrestore(&cpts->lock, flags);
|
||||
|
||||
pr_debug("cpts overflow check at %lld.%09lu\n", ts.tv_sec, ts.tv_nsec);
|
||||
return (long)delay;
|
||||
}
|
||||
|
||||
static struct ptp_clock_info cpts_info = {
|
||||
.owner = THIS_MODULE,
|
||||
.name = "CTPS timer",
|
||||
@ -236,18 +310,9 @@ static struct ptp_clock_info cpts_info = {
|
||||
.gettime64 = cpts_ptp_gettime,
|
||||
.settime64 = cpts_ptp_settime,
|
||||
.enable = cpts_ptp_enable,
|
||||
.do_aux_work = cpts_overflow_check,
|
||||
};
|
||||
|
||||
static void cpts_overflow_check(struct work_struct *work)
|
||||
{
|
||||
struct timespec64 ts;
|
||||
struct cpts *cpts = container_of(work, struct cpts, overflow_work.work);
|
||||
|
||||
cpts_ptp_gettime(&cpts->info, &ts);
|
||||
pr_debug("cpts overflow check at %lld.%09lu\n", ts.tv_sec, ts.tv_nsec);
|
||||
schedule_delayed_work(&cpts->overflow_work, cpts->ov_check_period);
|
||||
}
|
||||
|
||||
static int cpts_match(struct sk_buff *skb, unsigned int ptp_class,
|
||||
u16 ts_seqid, u8 ts_msgtype)
|
||||
{
|
||||
@ -299,7 +364,7 @@ static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb, int ev_type)
|
||||
return 0;
|
||||
|
||||
spin_lock_irqsave(&cpts->lock, flags);
|
||||
cpts_fifo_read(cpts, CPTS_EV_PUSH);
|
||||
cpts_fifo_read(cpts, -1);
|
||||
list_for_each_safe(this, next, &cpts->events) {
|
||||
event = list_entry(this, struct cpts_event, list);
|
||||
if (event_expired(event)) {
|
||||
@ -317,6 +382,19 @@ static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb, int ev_type)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (ev_type == CPTS_EV_TX && !ns) {
|
||||
struct cpts_skb_cb_data *skb_cb =
|
||||
(struct cpts_skb_cb_data *)skb->cb;
|
||||
/* Not found, add frame to queue for processing later.
|
||||
* The periodic FIFO check will handle this.
|
||||
*/
|
||||
skb_get(skb);
|
||||
/* get the timestamp for timeouts */
|
||||
skb_cb->tmo = jiffies + msecs_to_jiffies(100);
|
||||
__skb_queue_tail(&cpts->txq, skb);
|
||||
ptp_schedule_worker(cpts->clock, 0);
|
||||
}
|
||||
spin_unlock_irqrestore(&cpts->lock, flags);
|
||||
|
||||
return ns;
|
||||
@ -358,6 +436,7 @@ int cpts_register(struct cpts *cpts)
|
||||
{
|
||||
int err, i;
|
||||
|
||||
skb_queue_head_init(&cpts->txq);
|
||||
INIT_LIST_HEAD(&cpts->events);
|
||||
INIT_LIST_HEAD(&cpts->pool);
|
||||
for (i = 0; i < CPTS_MAX_EVENTS; i++)
|
||||
@ -378,7 +457,7 @@ int cpts_register(struct cpts *cpts)
|
||||
}
|
||||
cpts->phc_index = ptp_clock_index(cpts->clock);
|
||||
|
||||
schedule_delayed_work(&cpts->overflow_work, cpts->ov_check_period);
|
||||
ptp_schedule_worker(cpts->clock, cpts->ov_check_period);
|
||||
return 0;
|
||||
|
||||
err_ptp:
|
||||
@ -392,14 +471,15 @@ void cpts_unregister(struct cpts *cpts)
|
||||
if (WARN_ON(!cpts->clock))
|
||||
return;
|
||||
|
||||
cancel_delayed_work_sync(&cpts->overflow_work);
|
||||
|
||||
ptp_clock_unregister(cpts->clock);
|
||||
cpts->clock = NULL;
|
||||
|
||||
cpts_write32(cpts, 0, int_enable);
|
||||
cpts_write32(cpts, 0, control);
|
||||
|
||||
/* Drop all packet */
|
||||
skb_queue_purge(&cpts->txq);
|
||||
|
||||
clk_disable(cpts->refclk);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cpts_unregister);
|
||||
@ -476,7 +556,6 @@ struct cpts *cpts_create(struct device *dev, void __iomem *regs,
|
||||
cpts->dev = dev;
|
||||
cpts->reg = (struct cpsw_cpts __iomem *)regs;
|
||||
spin_lock_init(&cpts->lock);
|
||||
INIT_DELAYED_WORK(&cpts->overflow_work, cpts_overflow_check);
|
||||
|
||||
ret = cpts_of_parse(cpts, node);
|
||||
if (ret)
|
||||
|
@ -119,13 +119,13 @@ struct cpts {
|
||||
u32 cc_mult; /* for the nominal frequency */
|
||||
struct cyclecounter cc;
|
||||
struct timecounter tc;
|
||||
struct delayed_work overflow_work;
|
||||
int phc_index;
|
||||
struct clk *refclk;
|
||||
struct list_head events;
|
||||
struct list_head pool;
|
||||
struct cpts_event pool_data[CPTS_MAX_EVENTS];
|
||||
unsigned long ov_check_period;
|
||||
struct sk_buff_head txq;
|
||||
};
|
||||
|
||||
void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb);
|
||||
|
@ -364,7 +364,7 @@ static int gtp_dev_init(struct net_device *dev)
|
||||
|
||||
gtp->dev = dev;
|
||||
|
||||
dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
|
||||
dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
|
||||
if (!dev->tstats)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -765,7 +765,8 @@ struct netvsc_device {
|
||||
u32 max_chn;
|
||||
u32 num_chn;
|
||||
|
||||
refcount_t sc_offered;
|
||||
atomic_t open_chn;
|
||||
wait_queue_head_t subchan_open;
|
||||
|
||||
struct rndis_device *extension;
|
||||
|
||||
|
@ -78,6 +78,7 @@ static struct netvsc_device *alloc_net_device(void)
|
||||
net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
|
||||
net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
|
||||
init_completion(&net_device->channel_init_wait);
|
||||
init_waitqueue_head(&net_device->subchan_open);
|
||||
|
||||
return net_device;
|
||||
}
|
||||
@ -1302,6 +1303,8 @@ int netvsc_device_add(struct hv_device *device,
|
||||
struct netvsc_channel *nvchan = &net_device->chan_table[i];
|
||||
|
||||
nvchan->channel = device->channel;
|
||||
u64_stats_init(&nvchan->tx_stats.syncp);
|
||||
u64_stats_init(&nvchan->rx_stats.syncp);
|
||||
}
|
||||
|
||||
/* Enable NAPI handler before init callbacks */
|
||||
|
@ -1048,8 +1048,8 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
|
||||
else
|
||||
netif_napi_del(&nvchan->napi);
|
||||
|
||||
if (refcount_dec_and_test(&nvscdev->sc_offered))
|
||||
complete(&nvscdev->channel_init_wait);
|
||||
atomic_inc(&nvscdev->open_chn);
|
||||
wake_up(&nvscdev->subchan_open);
|
||||
}
|
||||
|
||||
int rndis_filter_device_add(struct hv_device *dev,
|
||||
@ -1090,8 +1090,6 @@ int rndis_filter_device_add(struct hv_device *dev,
|
||||
net_device->max_chn = 1;
|
||||
net_device->num_chn = 1;
|
||||
|
||||
refcount_set(&net_device->sc_offered, 0);
|
||||
|
||||
net_device->extension = rndis_device;
|
||||
rndis_device->ndev = net;
|
||||
|
||||
@ -1221,11 +1219,11 @@ int rndis_filter_device_add(struct hv_device *dev,
|
||||
rndis_device->ind_table[i] = ethtool_rxfh_indir_default(i,
|
||||
net_device->num_chn);
|
||||
|
||||
atomic_set(&net_device->open_chn, 1);
|
||||
num_rss_qs = net_device->num_chn - 1;
|
||||
if (num_rss_qs == 0)
|
||||
return 0;
|
||||
|
||||
refcount_set(&net_device->sc_offered, num_rss_qs);
|
||||
vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open);
|
||||
|
||||
init_packet = &net_device->channel_init_pkt;
|
||||
@ -1242,15 +1240,19 @@ int rndis_filter_device_add(struct hv_device *dev,
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
wait_for_completion(&net_device->channel_init_wait);
|
||||
if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) {
|
||||
ret = -ENODEV;
|
||||
goto out;
|
||||
}
|
||||
wait_for_completion(&net_device->channel_init_wait);
|
||||
|
||||
net_device->num_chn = 1 +
|
||||
init_packet->msg.v5_msg.subchn_comp.num_subchannels;
|
||||
|
||||
/* wait for all sub channels to open */
|
||||
wait_event(net_device->subchan_open,
|
||||
atomic_read(&net_device->open_chn) == net_device->num_chn);
|
||||
|
||||
/* ignore failues from setting rss parameters, still have channels */
|
||||
rndis_filter_set_rss_param(rndis_device, netvsc_hash_key,
|
||||
net_device->num_chn);
|
||||
|
@ -192,7 +192,7 @@ static int ipvlan_init(struct net_device *dev)
|
||||
|
||||
netdev_lockdep_set_classes(dev);
|
||||
|
||||
ipvlan->pcpu_stats = alloc_percpu(struct ipvl_pcpu_stats);
|
||||
ipvlan->pcpu_stats = netdev_alloc_pcpu_stats(struct ipvl_pcpu_stats);
|
||||
if (!ipvlan->pcpu_stats)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -1915,21 +1915,23 @@ static void __ppp_channel_push(struct channel *pch)
|
||||
spin_unlock(&pch->downl);
|
||||
/* see if there is anything from the attached unit to be sent */
|
||||
if (skb_queue_empty(&pch->file.xq)) {
|
||||
read_lock(&pch->upl);
|
||||
ppp = pch->ppp;
|
||||
if (ppp)
|
||||
ppp_xmit_process(ppp);
|
||||
read_unlock(&pch->upl);
|
||||
__ppp_xmit_process(ppp);
|
||||
}
|
||||
}
|
||||
|
||||
static void ppp_channel_push(struct channel *pch)
|
||||
{
|
||||
local_bh_disable();
|
||||
|
||||
__ppp_channel_push(pch);
|
||||
|
||||
local_bh_enable();
|
||||
read_lock_bh(&pch->upl);
|
||||
if (pch->ppp) {
|
||||
(*this_cpu_ptr(pch->ppp->xmit_recursion))++;
|
||||
__ppp_channel_push(pch);
|
||||
(*this_cpu_ptr(pch->ppp->xmit_recursion))--;
|
||||
} else {
|
||||
__ppp_channel_push(pch);
|
||||
}
|
||||
read_unlock_bh(&pch->upl);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -209,6 +209,7 @@ void asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value,
|
||||
int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
|
||||
struct asix_rx_fixup_info *rx);
|
||||
int asix_rx_fixup_common(struct usbnet *dev, struct sk_buff *skb);
|
||||
void asix_rx_fixup_common_free(struct asix_common_private *dp);
|
||||
|
||||
struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
|
||||
gfp_t flags);
|
||||
|
@ -75,6 +75,27 @@ void asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index,
|
||||
value, index, data, size);
|
||||
}
|
||||
|
||||
static void reset_asix_rx_fixup_info(struct asix_rx_fixup_info *rx)
|
||||
{
|
||||
/* Reset the variables that have a lifetime outside of
|
||||
* asix_rx_fixup_internal() so that future processing starts from a
|
||||
* known set of initial conditions.
|
||||
*/
|
||||
|
||||
if (rx->ax_skb) {
|
||||
/* Discard any incomplete Ethernet frame in the netdev buffer */
|
||||
kfree_skb(rx->ax_skb);
|
||||
rx->ax_skb = NULL;
|
||||
}
|
||||
|
||||
/* Assume the Data header 32-bit word is at the start of the current
|
||||
* or next URB socket buffer so reset all the state variables.
|
||||
*/
|
||||
rx->remaining = 0;
|
||||
rx->split_head = false;
|
||||
rx->header = 0;
|
||||
}
|
||||
|
||||
int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
|
||||
struct asix_rx_fixup_info *rx)
|
||||
{
|
||||
@ -99,15 +120,7 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
|
||||
if (size != ((~rx->header >> 16) & 0x7ff)) {
|
||||
netdev_err(dev->net, "asix_rx_fixup() Data Header synchronisation was lost, remaining %d\n",
|
||||
rx->remaining);
|
||||
if (rx->ax_skb) {
|
||||
kfree_skb(rx->ax_skb);
|
||||
rx->ax_skb = NULL;
|
||||
/* Discard the incomplete netdev Ethernet frame
|
||||
* and assume the Data header is at the start of
|
||||
* the current URB socket buffer.
|
||||
*/
|
||||
}
|
||||
rx->remaining = 0;
|
||||
reset_asix_rx_fixup_info(rx);
|
||||
}
|
||||
}
|
||||
|
||||
@ -139,11 +152,13 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
|
||||
if (size != ((~rx->header >> 16) & 0x7ff)) {
|
||||
netdev_err(dev->net, "asix_rx_fixup() Bad Header Length 0x%x, offset %d\n",
|
||||
rx->header, offset);
|
||||
reset_asix_rx_fixup_info(rx);
|
||||
return 0;
|
||||
}
|
||||
if (size > dev->net->mtu + ETH_HLEN + VLAN_HLEN) {
|
||||
netdev_dbg(dev->net, "asix_rx_fixup() Bad RX Length %d\n",
|
||||
size);
|
||||
reset_asix_rx_fixup_info(rx);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -168,8 +183,10 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
|
||||
if (rx->ax_skb) {
|
||||
skb_put_data(rx->ax_skb, skb->data + offset,
|
||||
copy_length);
|
||||
if (!rx->remaining)
|
||||
if (!rx->remaining) {
|
||||
usbnet_skb_return(dev, rx->ax_skb);
|
||||
rx->ax_skb = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
offset += (copy_length + 1) & 0xfffe;
|
||||
@ -178,6 +195,7 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
|
||||
if (skb->len != offset) {
|
||||
netdev_err(dev->net, "asix_rx_fixup() Bad SKB Length %d, %d\n",
|
||||
skb->len, offset);
|
||||
reset_asix_rx_fixup_info(rx);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -192,6 +210,21 @@ int asix_rx_fixup_common(struct usbnet *dev, struct sk_buff *skb)
|
||||
return asix_rx_fixup_internal(dev, skb, rx);
|
||||
}
|
||||
|
||||
void asix_rx_fixup_common_free(struct asix_common_private *dp)
|
||||
{
|
||||
struct asix_rx_fixup_info *rx;
|
||||
|
||||
if (!dp)
|
||||
return;
|
||||
|
||||
rx = &dp->rx_fixup_info;
|
||||
|
||||
if (rx->ax_skb) {
|
||||
kfree_skb(rx->ax_skb);
|
||||
rx->ax_skb = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
|
||||
gfp_t flags)
|
||||
{
|
||||
|
@ -764,6 +764,7 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
|
||||
|
||||
static void ax88772_unbind(struct usbnet *dev, struct usb_interface *intf)
|
||||
{
|
||||
asix_rx_fixup_common_free(dev->driver_priv);
|
||||
kfree(dev->driver_priv);
|
||||
}
|
||||
|
||||
|
@ -2367,9 +2367,6 @@ static int lan78xx_reset(struct lan78xx_net *dev)
|
||||
/* Init LTM */
|
||||
lan78xx_init_ltm(dev);
|
||||
|
||||
dev->net->hard_header_len += TX_OVERHEAD;
|
||||
dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
|
||||
|
||||
if (dev->udev->speed == USB_SPEED_SUPER) {
|
||||
buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
|
||||
dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
|
||||
@ -2855,16 +2852,19 @@ static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
|
||||
return ret;
|
||||
}
|
||||
|
||||
dev->net->hard_header_len += TX_OVERHEAD;
|
||||
dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
|
||||
|
||||
/* Init all registers */
|
||||
ret = lan78xx_reset(dev);
|
||||
|
||||
lan78xx_mdio_init(dev);
|
||||
ret = lan78xx_mdio_init(dev);
|
||||
|
||||
dev->net->flags |= IFF_MULTICAST;
|
||||
|
||||
pdata->wol = WAKE_MAGIC;
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
|
||||
@ -3525,11 +3525,11 @@ static int lan78xx_probe(struct usb_interface *intf,
|
||||
udev = interface_to_usbdev(intf);
|
||||
udev = usb_get_dev(udev);
|
||||
|
||||
ret = -ENOMEM;
|
||||
netdev = alloc_etherdev(sizeof(struct lan78xx_net));
|
||||
if (!netdev) {
|
||||
dev_err(&intf->dev, "Error: OOM\n");
|
||||
goto out1;
|
||||
dev_err(&intf->dev, "Error: OOM\n");
|
||||
ret = -ENOMEM;
|
||||
goto out1;
|
||||
}
|
||||
|
||||
/* netdev_printk() needs this */
|
||||
@ -3610,7 +3610,7 @@ static int lan78xx_probe(struct usb_interface *intf,
|
||||
ret = register_netdev(netdev);
|
||||
if (ret != 0) {
|
||||
netif_err(dev, probe, netdev, "couldn't register the device\n");
|
||||
goto out2;
|
||||
goto out3;
|
||||
}
|
||||
|
||||
usb_set_intfdata(intf, dev);
|
||||
|
@ -1175,6 +1175,7 @@ static const struct usb_device_id products[] = {
|
||||
{QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */
|
||||
{QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
|
||||
{QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */
|
||||
{QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */
|
||||
{QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
|
||||
{QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
|
||||
{QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
|
||||
@ -1340,10 +1341,14 @@ static int qmi_wwan_probe(struct usb_interface *intf,
|
||||
static void qmi_wwan_disconnect(struct usb_interface *intf)
|
||||
{
|
||||
struct usbnet *dev = usb_get_intfdata(intf);
|
||||
struct qmi_wwan_state *info = (void *)&dev->data;
|
||||
struct qmi_wwan_state *info;
|
||||
struct list_head *iter;
|
||||
struct net_device *ldev;
|
||||
|
||||
/* called twice if separate control and data intf */
|
||||
if (!dev)
|
||||
return;
|
||||
info = (void *)&dev->data;
|
||||
if (info->flags & QMI_WWAN_FLAG_MUX) {
|
||||
if (!rtnl_trylock()) {
|
||||
restart_syscall();
|
||||
|
@ -623,6 +623,7 @@ static struct sk_buff **vxlan_gro_receive(struct sock *sk,
|
||||
|
||||
out:
|
||||
skb_gro_remcsum_cleanup(skb, &grc);
|
||||
skb->remcsum_offload = 0;
|
||||
NAPI_GRO_CB(skb)->flush |= flush;
|
||||
|
||||
return pp;
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <uapi/linux/sched/types.h>
|
||||
|
||||
#include "ptp_private.h"
|
||||
|
||||
@ -184,6 +185,19 @@ static void delete_ptp_clock(struct posix_clock *pc)
|
||||
kfree(ptp);
|
||||
}
|
||||
|
||||
static void ptp_aux_kworker(struct kthread_work *work)
|
||||
{
|
||||
struct ptp_clock *ptp = container_of(work, struct ptp_clock,
|
||||
aux_work.work);
|
||||
struct ptp_clock_info *info = ptp->info;
|
||||
long delay;
|
||||
|
||||
delay = info->do_aux_work(info);
|
||||
|
||||
if (delay >= 0)
|
||||
kthread_queue_delayed_work(ptp->kworker, &ptp->aux_work, delay);
|
||||
}
|
||||
|
||||
/* public interface */
|
||||
|
||||
struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
|
||||
@ -217,6 +231,20 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
|
||||
mutex_init(&ptp->pincfg_mux);
|
||||
init_waitqueue_head(&ptp->tsev_wq);
|
||||
|
||||
if (ptp->info->do_aux_work) {
|
||||
char *worker_name = kasprintf(GFP_KERNEL, "ptp%d", ptp->index);
|
||||
|
||||
kthread_init_delayed_work(&ptp->aux_work, ptp_aux_kworker);
|
||||
ptp->kworker = kthread_create_worker(0, worker_name ?
|
||||
worker_name : info->name);
|
||||
kfree(worker_name);
|
||||
if (IS_ERR(ptp->kworker)) {
|
||||
err = PTR_ERR(ptp->kworker);
|
||||
pr_err("failed to create ptp aux_worker %d\n", err);
|
||||
goto kworker_err;
|
||||
}
|
||||
}
|
||||
|
||||
err = ptp_populate_pin_groups(ptp);
|
||||
if (err)
|
||||
goto no_pin_groups;
|
||||
@ -259,6 +287,9 @@ no_pps:
|
||||
no_device:
|
||||
ptp_cleanup_pin_groups(ptp);
|
||||
no_pin_groups:
|
||||
if (ptp->kworker)
|
||||
kthread_destroy_worker(ptp->kworker);
|
||||
kworker_err:
|
||||
mutex_destroy(&ptp->tsevq_mux);
|
||||
mutex_destroy(&ptp->pincfg_mux);
|
||||
ida_simple_remove(&ptp_clocks_map, index);
|
||||
@ -274,6 +305,11 @@ int ptp_clock_unregister(struct ptp_clock *ptp)
|
||||
ptp->defunct = 1;
|
||||
wake_up_interruptible(&ptp->tsev_wq);
|
||||
|
||||
if (ptp->kworker) {
|
||||
kthread_cancel_delayed_work_sync(&ptp->aux_work);
|
||||
kthread_destroy_worker(ptp->kworker);
|
||||
}
|
||||
|
||||
/* Release the clock's resources. */
|
||||
if (ptp->pps_source)
|
||||
pps_unregister_source(ptp->pps_source);
|
||||
@ -339,6 +375,12 @@ int ptp_find_pin(struct ptp_clock *ptp,
|
||||
}
|
||||
EXPORT_SYMBOL(ptp_find_pin);
|
||||
|
||||
int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay)
|
||||
{
|
||||
return kthread_mod_delayed_work(ptp->kworker, &ptp->aux_work, delay);
|
||||
}
|
||||
EXPORT_SYMBOL(ptp_schedule_worker);
|
||||
|
||||
/* module operations */
|
||||
|
||||
static void __exit ptp_exit(void)
|
||||
|
@ -22,6 +22,7 @@
|
||||
|
||||
#include <linux/cdev.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/posix-clock.h>
|
||||
#include <linux/ptp_clock.h>
|
||||
@ -56,6 +57,8 @@ struct ptp_clock {
|
||||
struct attribute_group pin_attr_group;
|
||||
/* 1st entry is a pointer to the real group, 2nd is NULL terminator */
|
||||
const struct attribute_group *pin_attr_groups[2];
|
||||
struct kthread_worker *kworker;
|
||||
struct kthread_delayed_work aux_work;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -2512,7 +2512,7 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
|
||||
struct rtable *rt = (struct rtable *) dst;
|
||||
__be32 *pkey = &ip_hdr(skb)->daddr;
|
||||
|
||||
if (rt->rt_gateway)
|
||||
if (rt && rt->rt_gateway)
|
||||
pkey = &rt->rt_gateway;
|
||||
|
||||
/* IPv4 */
|
||||
@ -2523,7 +2523,7 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
|
||||
struct rt6_info *rt = (struct rt6_info *) dst;
|
||||
struct in6_addr *pkey = &ipv6_hdr(skb)->daddr;
|
||||
|
||||
if (!ipv6_addr_any(&rt->rt6i_gateway))
|
||||
if (rt && !ipv6_addr_any(&rt->rt6i_gateway))
|
||||
pkey = &rt->rt6i_gateway;
|
||||
|
||||
/* IPv6 */
|
||||
|
@ -620,6 +620,7 @@ struct mlx4_caps {
|
||||
u32 dmfs_high_rate_qpn_base;
|
||||
u32 dmfs_high_rate_qpn_range;
|
||||
u32 vf_caps;
|
||||
bool wol_port[MLX4_MAX_PORTS + 1];
|
||||
struct mlx4_rate_limit_caps rl_caps;
|
||||
};
|
||||
|
||||
|
@ -99,6 +99,11 @@ struct system_device_crosststamp;
|
||||
* parameter func: the desired function to use.
|
||||
* parameter chan: the function channel index to use.
|
||||
*
|
||||
* @do_work: Request driver to perform auxiliary (periodic) operations
|
||||
* Driver should return delay of the next auxiliary work scheduling
|
||||
* time (>=0) or negative value in case further scheduling
|
||||
* is not required.
|
||||
*
|
||||
* Drivers should embed their ptp_clock_info within a private
|
||||
* structure, obtaining a reference to it using container_of().
|
||||
*
|
||||
@ -126,6 +131,7 @@ struct ptp_clock_info {
|
||||
struct ptp_clock_request *request, int on);
|
||||
int (*verify)(struct ptp_clock_info *ptp, unsigned int pin,
|
||||
enum ptp_pin_function func, unsigned int chan);
|
||||
long (*do_aux_work)(struct ptp_clock_info *ptp);
|
||||
};
|
||||
|
||||
struct ptp_clock;
|
||||
@ -211,6 +217,16 @@ extern int ptp_clock_index(struct ptp_clock *ptp);
|
||||
int ptp_find_pin(struct ptp_clock *ptp,
|
||||
enum ptp_pin_function func, unsigned int chan);
|
||||
|
||||
/**
|
||||
* ptp_schedule_worker() - schedule ptp auxiliary work
|
||||
*
|
||||
* @ptp: The clock obtained from ptp_clock_register().
|
||||
* @delay: number of jiffies to wait before queuing
|
||||
* See kthread_queue_delayed_work() for more info.
|
||||
*/
|
||||
|
||||
int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay);
|
||||
|
||||
#else
|
||||
static inline struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
|
||||
struct device *parent)
|
||||
@ -225,6 +241,10 @@ static inline int ptp_clock_index(struct ptp_clock *ptp)
|
||||
static inline int ptp_find_pin(struct ptp_clock *ptp,
|
||||
enum ptp_pin_function func, unsigned int chan)
|
||||
{ return -1; }
|
||||
static inline int ptp_schedule_worker(struct ptp_clock *ptp,
|
||||
unsigned long delay)
|
||||
{ return -EOPNOTSUPP; }
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
@ -1916,6 +1916,16 @@ extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
|
||||
u64 xmit_time);
|
||||
extern void tcp_rack_reo_timeout(struct sock *sk);
|
||||
|
||||
/* At how many usecs into the future should the RTO fire? */
|
||||
static inline s64 tcp_rto_delta_us(const struct sock *sk)
|
||||
{
|
||||
const struct sk_buff *skb = tcp_write_queue_head(sk);
|
||||
u32 rto = inet_csk(sk)->icsk_rto;
|
||||
u64 rto_time_stamp_us = skb->skb_mstamp + jiffies_to_usecs(rto);
|
||||
|
||||
return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp;
|
||||
}
|
||||
|
||||
/*
|
||||
* Save and compile IPv4 options, return a pointer to it
|
||||
*/
|
||||
|
@ -1549,9 +1549,41 @@ batadv_tt_global_entry_has_orig(const struct batadv_tt_global_entry *entry,
|
||||
return found;
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_tt_global_sync_flags - update TT sync flags
|
||||
* @tt_global: the TT global entry to update sync flags in
|
||||
*
|
||||
* Updates the sync flag bits in the tt_global flag attribute with a logical
|
||||
* OR of all sync flags from any of its TT orig entries.
|
||||
*/
|
||||
static void
|
||||
batadv_tt_global_sync_flags(struct batadv_tt_global_entry *tt_global)
|
||||
{
|
||||
struct batadv_tt_orig_list_entry *orig_entry;
|
||||
const struct hlist_head *head;
|
||||
u16 flags = BATADV_NO_FLAGS;
|
||||
|
||||
rcu_read_lock();
|
||||
head = &tt_global->orig_list;
|
||||
hlist_for_each_entry_rcu(orig_entry, head, list)
|
||||
flags |= orig_entry->flags;
|
||||
rcu_read_unlock();
|
||||
|
||||
flags |= tt_global->common.flags & (~BATADV_TT_SYNC_MASK);
|
||||
tt_global->common.flags = flags;
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_tt_global_orig_entry_add - add or update a TT orig entry
|
||||
* @tt_global: the TT global entry to add an orig entry in
|
||||
* @orig_node: the originator to add an orig entry for
|
||||
* @ttvn: translation table version number of this changeset
|
||||
* @flags: TT sync flags
|
||||
*/
|
||||
static void
|
||||
batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
|
||||
struct batadv_orig_node *orig_node, int ttvn)
|
||||
struct batadv_orig_node *orig_node, int ttvn,
|
||||
u8 flags)
|
||||
{
|
||||
struct batadv_tt_orig_list_entry *orig_entry;
|
||||
|
||||
@ -1561,7 +1593,8 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
|
||||
* was added during a "temporary client detection"
|
||||
*/
|
||||
orig_entry->ttvn = ttvn;
|
||||
goto out;
|
||||
orig_entry->flags = flags;
|
||||
goto sync_flags;
|
||||
}
|
||||
|
||||
orig_entry = kmem_cache_zalloc(batadv_tt_orig_cache, GFP_ATOMIC);
|
||||
@ -1573,6 +1606,7 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
|
||||
batadv_tt_global_size_inc(orig_node, tt_global->common.vid);
|
||||
orig_entry->orig_node = orig_node;
|
||||
orig_entry->ttvn = ttvn;
|
||||
orig_entry->flags = flags;
|
||||
kref_init(&orig_entry->refcount);
|
||||
|
||||
spin_lock_bh(&tt_global->list_lock);
|
||||
@ -1582,6 +1616,8 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
|
||||
spin_unlock_bh(&tt_global->list_lock);
|
||||
atomic_inc(&tt_global->orig_list_count);
|
||||
|
||||
sync_flags:
|
||||
batadv_tt_global_sync_flags(tt_global);
|
||||
out:
|
||||
if (orig_entry)
|
||||
batadv_tt_orig_list_entry_put(orig_entry);
|
||||
@ -1703,10 +1739,10 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv,
|
||||
}
|
||||
|
||||
/* the change can carry possible "attribute" flags like the
|
||||
* TT_CLIENT_WIFI, therefore they have to be copied in the
|
||||
* TT_CLIENT_TEMP, therefore they have to be copied in the
|
||||
* client entry
|
||||
*/
|
||||
common->flags |= flags;
|
||||
common->flags |= flags & (~BATADV_TT_SYNC_MASK);
|
||||
|
||||
/* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only
|
||||
* one originator left in the list and we previously received a
|
||||
@ -1723,7 +1759,8 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv,
|
||||
}
|
||||
add_orig_entry:
|
||||
/* add the new orig_entry (if needed) or update it */
|
||||
batadv_tt_global_orig_entry_add(tt_global_entry, orig_node, ttvn);
|
||||
batadv_tt_global_orig_entry_add(tt_global_entry, orig_node, ttvn,
|
||||
flags & BATADV_TT_SYNC_MASK);
|
||||
|
||||
batadv_dbg(BATADV_DBG_TT, bat_priv,
|
||||
"Creating new global tt entry: %pM (vid: %d, via %pM)\n",
|
||||
@ -1946,6 +1983,7 @@ batadv_tt_global_dump_subentry(struct sk_buff *msg, u32 portid, u32 seq,
|
||||
struct batadv_tt_orig_list_entry *orig,
|
||||
bool best)
|
||||
{
|
||||
u16 flags = (common->flags & (~BATADV_TT_SYNC_MASK)) | orig->flags;
|
||||
void *hdr;
|
||||
struct batadv_orig_node_vlan *vlan;
|
||||
u8 last_ttvn;
|
||||
@ -1975,7 +2013,7 @@ batadv_tt_global_dump_subentry(struct sk_buff *msg, u32 portid, u32 seq,
|
||||
nla_put_u8(msg, BATADV_ATTR_TT_LAST_TTVN, last_ttvn) ||
|
||||
nla_put_u32(msg, BATADV_ATTR_TT_CRC32, crc) ||
|
||||
nla_put_u16(msg, BATADV_ATTR_TT_VID, common->vid) ||
|
||||
nla_put_u32(msg, BATADV_ATTR_TT_FLAGS, common->flags))
|
||||
nla_put_u32(msg, BATADV_ATTR_TT_FLAGS, flags))
|
||||
goto nla_put_failure;
|
||||
|
||||
if (best && nla_put_flag(msg, BATADV_ATTR_FLAG_BEST))
|
||||
@ -2589,6 +2627,7 @@ static u32 batadv_tt_global_crc(struct batadv_priv *bat_priv,
|
||||
unsigned short vid)
|
||||
{
|
||||
struct batadv_hashtable *hash = bat_priv->tt.global_hash;
|
||||
struct batadv_tt_orig_list_entry *tt_orig;
|
||||
struct batadv_tt_common_entry *tt_common;
|
||||
struct batadv_tt_global_entry *tt_global;
|
||||
struct hlist_head *head;
|
||||
@ -2627,8 +2666,9 @@ static u32 batadv_tt_global_crc(struct batadv_priv *bat_priv,
|
||||
/* find out if this global entry is announced by this
|
||||
* originator
|
||||
*/
|
||||
if (!batadv_tt_global_entry_has_orig(tt_global,
|
||||
orig_node))
|
||||
tt_orig = batadv_tt_global_orig_entry_find(tt_global,
|
||||
orig_node);
|
||||
if (!tt_orig)
|
||||
continue;
|
||||
|
||||
/* use network order to read the VID: this ensures that
|
||||
@ -2640,10 +2680,12 @@ static u32 batadv_tt_global_crc(struct batadv_priv *bat_priv,
|
||||
/* compute the CRC on flags that have to be kept in sync
|
||||
* among nodes
|
||||
*/
|
||||
flags = tt_common->flags & BATADV_TT_SYNC_MASK;
|
||||
flags = tt_orig->flags;
|
||||
crc_tmp = crc32c(crc_tmp, &flags, sizeof(flags));
|
||||
|
||||
crc ^= crc32c(crc_tmp, tt_common->addr, ETH_ALEN);
|
||||
|
||||
batadv_tt_orig_list_entry_put(tt_orig);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
@ -1260,6 +1260,7 @@ struct batadv_tt_global_entry {
|
||||
* struct batadv_tt_orig_list_entry - orig node announcing a non-mesh client
|
||||
* @orig_node: pointer to orig node announcing this non-mesh client
|
||||
* @ttvn: translation table version number which added the non-mesh client
|
||||
* @flags: per orig entry TT sync flags
|
||||
* @list: list node for batadv_tt_global_entry::orig_list
|
||||
* @refcount: number of contexts the object is used
|
||||
* @rcu: struct used for freeing in an RCU-safe manner
|
||||
@ -1267,6 +1268,7 @@ struct batadv_tt_global_entry {
|
||||
struct batadv_tt_orig_list_entry {
|
||||
struct batadv_orig_node *orig_node;
|
||||
u8 ttvn;
|
||||
u8 flags;
|
||||
struct hlist_node list;
|
||||
struct kref refcount;
|
||||
struct rcu_head rcu;
|
||||
|
@ -2739,7 +2739,7 @@ static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
|
||||
{
|
||||
if (tx_path)
|
||||
return skb->ip_summed != CHECKSUM_PARTIAL &&
|
||||
skb->ip_summed != CHECKSUM_NONE;
|
||||
skb->ip_summed != CHECKSUM_UNNECESSARY;
|
||||
|
||||
return skb->ip_summed == CHECKSUM_NONE;
|
||||
}
|
||||
|
@ -1523,9 +1523,17 @@ unsigned char *cipso_v4_optptr(const struct sk_buff *skb)
|
||||
int taglen;
|
||||
|
||||
for (optlen = iph->ihl*4 - sizeof(struct iphdr); optlen > 0; ) {
|
||||
if (optptr[0] == IPOPT_CIPSO)
|
||||
switch (optptr[0]) {
|
||||
case IPOPT_CIPSO:
|
||||
return optptr;
|
||||
taglen = optptr[1];
|
||||
case IPOPT_END:
|
||||
return NULL;
|
||||
case IPOPT_NOOP:
|
||||
taglen = 1;
|
||||
break;
|
||||
default:
|
||||
taglen = optptr[1];
|
||||
}
|
||||
optlen -= taglen;
|
||||
optptr += taglen;
|
||||
}
|
||||
|
@ -450,6 +450,7 @@ out_unlock:
|
||||
out:
|
||||
NAPI_GRO_CB(skb)->flush |= flush;
|
||||
skb_gro_remcsum_cleanup(skb, &grc);
|
||||
skb->remcsum_offload = 0;
|
||||
|
||||
return pp;
|
||||
}
|
||||
|
@ -107,6 +107,7 @@ int sysctl_tcp_invalid_ratelimit __read_mostly = HZ/2;
|
||||
#define FLAG_ORIG_SACK_ACKED 0x200 /* Never retransmitted data are (s)acked */
|
||||
#define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */
|
||||
#define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */
|
||||
#define FLAG_SET_XMIT_TIMER 0x1000 /* Set TLP or RTO timer */
|
||||
#define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */
|
||||
#define FLAG_UPDATE_TS_RECENT 0x4000 /* tcp_replace_ts_recent() */
|
||||
#define FLAG_NO_CHALLENGE_ACK 0x8000 /* do not call tcp_send_challenge_ack() */
|
||||
@ -2520,8 +2521,8 @@ static inline void tcp_end_cwnd_reduction(struct sock *sk)
|
||||
return;
|
||||
|
||||
/* Reset cwnd to ssthresh in CWR or Recovery (unless it's undone) */
|
||||
if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR ||
|
||||
(tp->undo_marker && tp->snd_ssthresh < TCP_INFINITE_SSTHRESH)) {
|
||||
if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH &&
|
||||
(inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || tp->undo_marker)) {
|
||||
tp->snd_cwnd = tp->snd_ssthresh;
|
||||
tp->snd_cwnd_stamp = tcp_jiffies32;
|
||||
}
|
||||
@ -3004,10 +3005,7 @@ void tcp_rearm_rto(struct sock *sk)
|
||||
/* Offset the time elapsed after installing regular RTO */
|
||||
if (icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
|
||||
icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
|
||||
struct sk_buff *skb = tcp_write_queue_head(sk);
|
||||
u64 rto_time_stamp = skb->skb_mstamp +
|
||||
jiffies_to_usecs(rto);
|
||||
s64 delta_us = rto_time_stamp - tp->tcp_mstamp;
|
||||
s64 delta_us = tcp_rto_delta_us(sk);
|
||||
/* delta_us may not be positive if the socket is locked
|
||||
* when the retrans timer fires and is rescheduled.
|
||||
*/
|
||||
@ -3019,6 +3017,13 @@ void tcp_rearm_rto(struct sock *sk)
|
||||
}
|
||||
}
|
||||
|
||||
/* Try to schedule a loss probe; if that doesn't work, then schedule an RTO. */
|
||||
static void tcp_set_xmit_timer(struct sock *sk)
|
||||
{
|
||||
if (!tcp_schedule_loss_probe(sk))
|
||||
tcp_rearm_rto(sk);
|
||||
}
|
||||
|
||||
/* If we get here, the whole TSO packet has not been acked. */
|
||||
static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
@ -3180,7 +3185,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
|
||||
ca_rtt_us, sack->rate);
|
||||
|
||||
if (flag & FLAG_ACKED) {
|
||||
tcp_rearm_rto(sk);
|
||||
flag |= FLAG_SET_XMIT_TIMER; /* set TLP or RTO timer */
|
||||
if (unlikely(icsk->icsk_mtup.probe_size &&
|
||||
!after(tp->mtu_probe.probe_seq_end, tp->snd_una))) {
|
||||
tcp_mtup_probe_success(sk);
|
||||
@ -3208,7 +3213,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
|
||||
* after when the head was last (re)transmitted. Otherwise the
|
||||
* timeout may continue to extend in loss recovery.
|
||||
*/
|
||||
tcp_rearm_rto(sk);
|
||||
flag |= FLAG_SET_XMIT_TIMER; /* set TLP or RTO timer */
|
||||
}
|
||||
|
||||
if (icsk->icsk_ca_ops->pkts_acked) {
|
||||
@ -3580,9 +3585,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
|
||||
if (after(ack, tp->snd_nxt))
|
||||
goto invalid_ack;
|
||||
|
||||
if (icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)
|
||||
tcp_rearm_rto(sk);
|
||||
|
||||
if (after(ack, prior_snd_una)) {
|
||||
flag |= FLAG_SND_UNA_ADVANCED;
|
||||
icsk->icsk_retransmits = 0;
|
||||
@ -3647,18 +3649,20 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
|
||||
flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una, &acked,
|
||||
&sack_state);
|
||||
|
||||
if (tp->tlp_high_seq)
|
||||
tcp_process_tlp_ack(sk, ack, flag);
|
||||
/* If needed, reset TLP/RTO timer; RACK may later override this. */
|
||||
if (flag & FLAG_SET_XMIT_TIMER)
|
||||
tcp_set_xmit_timer(sk);
|
||||
|
||||
if (tcp_ack_is_dubious(sk, flag)) {
|
||||
is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
|
||||
tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit);
|
||||
}
|
||||
if (tp->tlp_high_seq)
|
||||
tcp_process_tlp_ack(sk, ack, flag);
|
||||
|
||||
if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP))
|
||||
sk_dst_confirm(sk);
|
||||
|
||||
if (icsk->icsk_pending == ICSK_TIME_RETRANS)
|
||||
tcp_schedule_loss_probe(sk);
|
||||
delivered = tp->delivered - delivered; /* freshly ACKed or SACKed */
|
||||
lost = tp->lost - lost; /* freshly marked lost */
|
||||
tcp_rate_gen(sk, delivered, lost, sack_state.rate);
|
||||
|
@ -2377,24 +2377,15 @@ bool tcp_schedule_loss_probe(struct sock *sk)
|
||||
{
|
||||
struct inet_connection_sock *icsk = inet_csk(sk);
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
u32 timeout, tlp_time_stamp, rto_time_stamp;
|
||||
u32 rtt = usecs_to_jiffies(tp->srtt_us >> 3);
|
||||
u32 timeout, rto_delta_us;
|
||||
|
||||
/* No consecutive loss probes. */
|
||||
if (WARN_ON(icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)) {
|
||||
tcp_rearm_rto(sk);
|
||||
return false;
|
||||
}
|
||||
/* Don't do any loss probe on a Fast Open connection before 3WHS
|
||||
* finishes.
|
||||
*/
|
||||
if (tp->fastopen_rsk)
|
||||
return false;
|
||||
|
||||
/* TLP is only scheduled when next timer event is RTO. */
|
||||
if (icsk->icsk_pending != ICSK_TIME_RETRANS)
|
||||
return false;
|
||||
|
||||
/* Schedule a loss probe in 2*RTT for SACK capable connections
|
||||
* in Open state, that are either limited by cwnd or application.
|
||||
*/
|
||||
@ -2417,14 +2408,10 @@ bool tcp_schedule_loss_probe(struct sock *sk)
|
||||
(rtt + (rtt >> 1) + TCP_DELACK_MAX));
|
||||
timeout = max_t(u32, timeout, msecs_to_jiffies(10));
|
||||
|
||||
/* If RTO is shorter, just schedule TLP in its place. */
|
||||
tlp_time_stamp = tcp_jiffies32 + timeout;
|
||||
rto_time_stamp = (u32)inet_csk(sk)->icsk_timeout;
|
||||
if ((s32)(tlp_time_stamp - rto_time_stamp) > 0) {
|
||||
s32 delta = rto_time_stamp - tcp_jiffies32;
|
||||
if (delta > 0)
|
||||
timeout = delta;
|
||||
}
|
||||
/* If the RTO formula yields an earlier time, then use that time. */
|
||||
rto_delta_us = tcp_rto_delta_us(sk); /* How far in future is RTO? */
|
||||
if (rto_delta_us > 0)
|
||||
timeout = min_t(u32, timeout, usecs_to_jiffies(rto_delta_us));
|
||||
|
||||
inet_csk_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout,
|
||||
TCP_RTO_MAX);
|
||||
@ -3449,6 +3436,10 @@ int tcp_connect(struct sock *sk)
|
||||
int err;
|
||||
|
||||
tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_CONNECT_CB);
|
||||
|
||||
if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
|
||||
return -EHOSTUNREACH; /* Routing failure or similar. */
|
||||
|
||||
tcp_connect_init(sk);
|
||||
|
||||
if (unlikely(tp->repair)) {
|
||||
|
@ -652,7 +652,8 @@ static void tcp_keepalive_timer (unsigned long data)
|
||||
goto death;
|
||||
}
|
||||
|
||||
if (!sock_flag(sk, SOCK_KEEPOPEN) || sk->sk_state == TCP_CLOSE)
|
||||
if (!sock_flag(sk, SOCK_KEEPOPEN) ||
|
||||
((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)))
|
||||
goto out;
|
||||
|
||||
elapsed = keepalive_time_when(tp);
|
||||
|
@ -235,7 +235,7 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
|
||||
if (uh->check == 0)
|
||||
uh->check = CSUM_MANGLED_0;
|
||||
|
||||
skb->ip_summed = CHECKSUM_NONE;
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
|
||||
/* If there is no outer header we can fake a checksum offload
|
||||
* due to the fact that we have already done the checksum in
|
||||
|
@ -2351,6 +2351,7 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
|
||||
if (on_link)
|
||||
nrt->rt6i_flags &= ~RTF_GATEWAY;
|
||||
|
||||
nrt->rt6i_protocol = RTPROT_REDIRECT;
|
||||
nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
|
||||
|
||||
if (ip6_ins_rt(nrt))
|
||||
@ -2461,6 +2462,7 @@ static struct rt6_info *rt6_add_route_info(struct net *net,
|
||||
.fc_dst_len = prefixlen,
|
||||
.fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
|
||||
RTF_UP | RTF_PREF(pref),
|
||||
.fc_protocol = RTPROT_RA,
|
||||
.fc_nlinfo.portid = 0,
|
||||
.fc_nlinfo.nlh = NULL,
|
||||
.fc_nlinfo.nl_net = net,
|
||||
@ -2513,6 +2515,7 @@ struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr,
|
||||
.fc_ifindex = dev->ifindex,
|
||||
.fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
|
||||
RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
|
||||
.fc_protocol = RTPROT_RA,
|
||||
.fc_nlinfo.portid = 0,
|
||||
.fc_nlinfo.nlh = NULL,
|
||||
.fc_nlinfo.nl_net = dev_net(dev),
|
||||
@ -3424,14 +3427,6 @@ static int rt6_fill_node(struct net *net,
|
||||
rtm->rtm_flags = 0;
|
||||
rtm->rtm_scope = RT_SCOPE_UNIVERSE;
|
||||
rtm->rtm_protocol = rt->rt6i_protocol;
|
||||
if (rt->rt6i_flags & RTF_DYNAMIC)
|
||||
rtm->rtm_protocol = RTPROT_REDIRECT;
|
||||
else if (rt->rt6i_flags & RTF_ADDRCONF) {
|
||||
if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ROUTEINFO))
|
||||
rtm->rtm_protocol = RTPROT_RA;
|
||||
else
|
||||
rtm->rtm_protocol = RTPROT_KERNEL;
|
||||
}
|
||||
|
||||
if (rt->rt6i_flags & RTF_CACHE)
|
||||
rtm->rtm_flags |= RTM_F_CLONED;
|
||||
|
@ -72,7 +72,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
|
||||
if (uh->check == 0)
|
||||
uh->check = CSUM_MANGLED_0;
|
||||
|
||||
skb->ip_summed = CHECKSUM_NONE;
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
|
||||
/* If there is no outer header we can fake a checksum offload
|
||||
* due to the fact that we have already done the checksum in
|
||||
|
@ -1015,8 +1015,10 @@ void rds_ib_recv_cqe_handler(struct rds_ib_connection *ic,
|
||||
if (rds_ib_ring_empty(&ic->i_recv_ring))
|
||||
rds_ib_stats_inc(s_ib_rx_ring_empty);
|
||||
|
||||
if (rds_ib_ring_low(&ic->i_recv_ring))
|
||||
if (rds_ib_ring_low(&ic->i_recv_ring)) {
|
||||
rds_ib_recv_refill(conn, 0, GFP_NOWAIT);
|
||||
rds_ib_stats_inc(s_ib_rx_refill_from_cq);
|
||||
}
|
||||
}
|
||||
|
||||
int rds_ib_recv_path(struct rds_conn_path *cp)
|
||||
@ -1029,6 +1031,7 @@ int rds_ib_recv_path(struct rds_conn_path *cp)
|
||||
if (rds_conn_up(conn)) {
|
||||
rds_ib_attempt_ack(ic);
|
||||
rds_ib_recv_refill(conn, 0, GFP_KERNEL);
|
||||
rds_ib_stats_inc(s_ib_rx_refill_from_thread);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -36,8 +36,8 @@ static struct tc_action_ops act_ipt_ops;
|
||||
static unsigned int xt_net_id;
|
||||
static struct tc_action_ops act_xt_ops;
|
||||
|
||||
static int ipt_init_target(struct xt_entry_target *t, char *table,
|
||||
unsigned int hook)
|
||||
static int ipt_init_target(struct net *net, struct xt_entry_target *t,
|
||||
char *table, unsigned int hook)
|
||||
{
|
||||
struct xt_tgchk_param par;
|
||||
struct xt_target *target;
|
||||
@ -49,6 +49,7 @@ static int ipt_init_target(struct xt_entry_target *t, char *table,
|
||||
return PTR_ERR(target);
|
||||
|
||||
t->u.kernel.target = target;
|
||||
par.net = net;
|
||||
par.table = table;
|
||||
par.entryinfo = NULL;
|
||||
par.target = target;
|
||||
@ -91,10 +92,11 @@ static const struct nla_policy ipt_policy[TCA_IPT_MAX + 1] = {
|
||||
[TCA_IPT_TARG] = { .len = sizeof(struct xt_entry_target) },
|
||||
};
|
||||
|
||||
static int __tcf_ipt_init(struct tc_action_net *tn, struct nlattr *nla,
|
||||
static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla,
|
||||
struct nlattr *est, struct tc_action **a,
|
||||
const struct tc_action_ops *ops, int ovr, int bind)
|
||||
{
|
||||
struct tc_action_net *tn = net_generic(net, id);
|
||||
struct nlattr *tb[TCA_IPT_MAX + 1];
|
||||
struct tcf_ipt *ipt;
|
||||
struct xt_entry_target *td, *t;
|
||||
@ -159,7 +161,7 @@ static int __tcf_ipt_init(struct tc_action_net *tn, struct nlattr *nla,
|
||||
if (unlikely(!t))
|
||||
goto err2;
|
||||
|
||||
err = ipt_init_target(t, tname, hook);
|
||||
err = ipt_init_target(net, t, tname, hook);
|
||||
if (err < 0)
|
||||
goto err3;
|
||||
|
||||
@ -193,18 +195,16 @@ static int tcf_ipt_init(struct net *net, struct nlattr *nla,
|
||||
struct nlattr *est, struct tc_action **a, int ovr,
|
||||
int bind)
|
||||
{
|
||||
struct tc_action_net *tn = net_generic(net, ipt_net_id);
|
||||
|
||||
return __tcf_ipt_init(tn, nla, est, a, &act_ipt_ops, ovr, bind);
|
||||
return __tcf_ipt_init(net, ipt_net_id, nla, est, a, &act_ipt_ops, ovr,
|
||||
bind);
|
||||
}
|
||||
|
||||
static int tcf_xt_init(struct net *net, struct nlattr *nla,
|
||||
struct nlattr *est, struct tc_action **a, int ovr,
|
||||
int bind)
|
||||
{
|
||||
struct tc_action_net *tn = net_generic(net, xt_net_id);
|
||||
|
||||
return __tcf_ipt_init(tn, nla, est, a, &act_xt_ops, ovr, bind);
|
||||
return __tcf_ipt_init(net, xt_net_id, nla, est, a, &act_xt_ops, ovr,
|
||||
bind);
|
||||
}
|
||||
|
||||
static int tcf_ipt(struct sk_buff *skb, const struct tc_action *a,
|
||||
|
@ -11,6 +11,8 @@
|
||||
# define __NR_bpf 280
|
||||
# elif defined(__sparc__)
|
||||
# define __NR_bpf 349
|
||||
# elif defined(__s390__)
|
||||
# define __NR_bpf 351
|
||||
# else
|
||||
# error __NR_bpf not defined. libbpf does not support your arch.
|
||||
# endif
|
||||
|
@ -39,6 +39,8 @@
|
||||
# define __NR_bpf 280
|
||||
# elif defined(__sparc__)
|
||||
# define __NR_bpf 349
|
||||
# elif defined(__s390__)
|
||||
# define __NR_bpf 351
|
||||
# else
|
||||
# error __NR_bpf not defined. libbpf does not support your arch.
|
||||
# endif
|
||||
|
@ -12,12 +12,23 @@
|
||||
|
||||
int _version SEC("version") = 1;
|
||||
|
||||
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
|
||||
#define TEST_FIELD(TYPE, FIELD, MASK) \
|
||||
{ \
|
||||
TYPE tmp = *(volatile TYPE *)&skb->FIELD; \
|
||||
if (tmp != ((*(volatile __u32 *)&skb->FIELD) & MASK)) \
|
||||
return TC_ACT_SHOT; \
|
||||
}
|
||||
#else
|
||||
#define TEST_FIELD_OFFSET(a, b) ((sizeof(a) - sizeof(b)) / sizeof(b))
|
||||
#define TEST_FIELD(TYPE, FIELD, MASK) \
|
||||
{ \
|
||||
TYPE tmp = *((volatile TYPE *)&skb->FIELD + \
|
||||
TEST_FIELD_OFFSET(skb->FIELD, TYPE)); \
|
||||
if (tmp != ((*(volatile __u32 *)&skb->FIELD) & MASK)) \
|
||||
return TC_ACT_SHOT; \
|
||||
}
|
||||
#endif
|
||||
|
||||
SEC("test1")
|
||||
int process(struct __sk_buff *skb)
|
||||
|
@ -8,6 +8,7 @@
|
||||
* License as published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <endian.h>
|
||||
#include <asm/types.h>
|
||||
#include <linux/types.h>
|
||||
#include <stdint.h>
|
||||
@ -1098,7 +1099,7 @@ static struct bpf_test tests[] = {
|
||||
"check skb->hash byte load permitted",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
#ifdef __LITTLE_ENDIAN
|
||||
#if __BYTE_ORDER == __LITTLE_ENDIAN
|
||||
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, hash)),
|
||||
#else
|
||||
@ -1135,7 +1136,7 @@ static struct bpf_test tests[] = {
|
||||
"check skb->hash byte load not permitted 3",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
#ifdef __LITTLE_ENDIAN
|
||||
#if __BYTE_ORDER == __LITTLE_ENDIAN
|
||||
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, hash) + 3),
|
||||
#else
|
||||
@ -1244,7 +1245,7 @@ static struct bpf_test tests[] = {
|
||||
"check skb->hash half load permitted",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
#ifdef __LITTLE_ENDIAN
|
||||
#if __BYTE_ORDER == __LITTLE_ENDIAN
|
||||
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, hash)),
|
||||
#else
|
||||
@ -1259,7 +1260,7 @@ static struct bpf_test tests[] = {
|
||||
"check skb->hash half load not permitted",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
#ifdef __LITTLE_ENDIAN
|
||||
#if __BYTE_ORDER == __LITTLE_ENDIAN
|
||||
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, hash) + 2),
|
||||
#else
|
||||
@ -5422,7 +5423,7 @@ static struct bpf_test tests[] = {
|
||||
"check bpf_perf_event_data->sample_period byte load permitted",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
#ifdef __LITTLE_ENDIAN
|
||||
#if __BYTE_ORDER == __LITTLE_ENDIAN
|
||||
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
|
||||
offsetof(struct bpf_perf_event_data, sample_period)),
|
||||
#else
|
||||
@ -5438,7 +5439,7 @@ static struct bpf_test tests[] = {
|
||||
"check bpf_perf_event_data->sample_period half load permitted",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
#ifdef __LITTLE_ENDIAN
|
||||
#if __BYTE_ORDER == __LITTLE_ENDIAN
|
||||
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
|
||||
offsetof(struct bpf_perf_event_data, sample_period)),
|
||||
#else
|
||||
@ -5454,7 +5455,7 @@ static struct bpf_test tests[] = {
|
||||
"check bpf_perf_event_data->sample_period word load permitted",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
#ifdef __LITTLE_ENDIAN
|
||||
#if __BYTE_ORDER == __LITTLE_ENDIAN
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
||||
offsetof(struct bpf_perf_event_data, sample_period)),
|
||||
#else
|
||||
@ -5481,7 +5482,7 @@ static struct bpf_test tests[] = {
|
||||
"check skb->data half load not permitted",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
#ifdef __LITTLE_ENDIAN
|
||||
#if __BYTE_ORDER == __LITTLE_ENDIAN
|
||||
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, data)),
|
||||
#else
|
||||
@ -5497,7 +5498,7 @@ static struct bpf_test tests[] = {
|
||||
"check skb->tc_classid half load not permitted for lwt prog",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
#ifdef __LITTLE_ENDIAN
|
||||
#if __BYTE_ORDER == __LITTLE_ENDIAN
|
||||
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, tc_classid)),
|
||||
#else
|
||||
|
Loading…
Reference in New Issue
Block a user