forked from Minki/linux
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Merge in the left-over fixes before the net-next pull-request. Conflicts: drivers/net/ethernet/mediatek/mtk_ppe.cae3ed15da5
("net: ethernet: mtk_eth_soc: fix state in __mtk_foe_entry_clear")9d8cb4c096
("net: ethernet: mtk_eth_soc: add foe_entry_size to mtk_eth_soc") https://lore.kernel.org/all/6cb6893b-4921-a068-4c30-1109795110bb@tessares.net/ kernel/bpf/helpers.c8addbfc7b3
("bpf: Gate dynptr API behind CAP_BPF")5679ff2f13
("bpf: Move bpf_loop and bpf_for_each_map_elem under CAP_BPF")8a67f2de9b
("bpf: expose bpf_strtol and bpf_strtoul to all program types") https://lore.kernel.org/all/20221003201957.13149-1-daniel@iogearbox.net/ Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
e52f7c1ddf
@ -3833,6 +3833,7 @@ F: kernel/bpf/dispatcher.c
|
|||||||
F: kernel/bpf/trampoline.c
|
F: kernel/bpf/trampoline.c
|
||||||
F: include/linux/bpf*
|
F: include/linux/bpf*
|
||||||
F: include/linux/filter.h
|
F: include/linux/filter.h
|
||||||
|
F: include/linux/tnum.h
|
||||||
|
|
||||||
BPF [BTF]
|
BPF [BTF]
|
||||||
M: Martin KaFai Lau <martin.lau@linux.dev>
|
M: Martin KaFai Lau <martin.lau@linux.dev>
|
||||||
|
@ -59,6 +59,7 @@ struct l1oip {
|
|||||||
int bundle; /* bundle channels in one frm */
|
int bundle; /* bundle channels in one frm */
|
||||||
int codec; /* codec to use for transmis. */
|
int codec; /* codec to use for transmis. */
|
||||||
int limit; /* limit number of bchannels */
|
int limit; /* limit number of bchannels */
|
||||||
|
bool shutdown; /* if card is released */
|
||||||
|
|
||||||
/* timer */
|
/* timer */
|
||||||
struct timer_list keep_tl;
|
struct timer_list keep_tl;
|
||||||
|
@ -275,7 +275,7 @@ l1oip_socket_send(struct l1oip *hc, u8 localcodec, u8 channel, u32 chanmask,
|
|||||||
p = frame;
|
p = frame;
|
||||||
|
|
||||||
/* restart timer */
|
/* restart timer */
|
||||||
if (time_before(hc->keep_tl.expires, jiffies + 5 * HZ))
|
if (time_before(hc->keep_tl.expires, jiffies + 5 * HZ) && !hc->shutdown)
|
||||||
mod_timer(&hc->keep_tl, jiffies + L1OIP_KEEPALIVE * HZ);
|
mod_timer(&hc->keep_tl, jiffies + L1OIP_KEEPALIVE * HZ);
|
||||||
else
|
else
|
||||||
hc->keep_tl.expires = jiffies + L1OIP_KEEPALIVE * HZ;
|
hc->keep_tl.expires = jiffies + L1OIP_KEEPALIVE * HZ;
|
||||||
@ -601,7 +601,9 @@ multiframe:
|
|||||||
goto multiframe;
|
goto multiframe;
|
||||||
|
|
||||||
/* restart timer */
|
/* restart timer */
|
||||||
if (time_before(hc->timeout_tl.expires, jiffies + 5 * HZ) || !hc->timeout_on) {
|
if ((time_before(hc->timeout_tl.expires, jiffies + 5 * HZ) ||
|
||||||
|
!hc->timeout_on) &&
|
||||||
|
!hc->shutdown) {
|
||||||
hc->timeout_on = 1;
|
hc->timeout_on = 1;
|
||||||
mod_timer(&hc->timeout_tl, jiffies + L1OIP_TIMEOUT * HZ);
|
mod_timer(&hc->timeout_tl, jiffies + L1OIP_TIMEOUT * HZ);
|
||||||
} else /* only adjust timer */
|
} else /* only adjust timer */
|
||||||
@ -1232,11 +1234,10 @@ release_card(struct l1oip *hc)
|
|||||||
{
|
{
|
||||||
int ch;
|
int ch;
|
||||||
|
|
||||||
if (timer_pending(&hc->keep_tl))
|
hc->shutdown = true;
|
||||||
del_timer(&hc->keep_tl);
|
|
||||||
|
|
||||||
if (timer_pending(&hc->timeout_tl))
|
del_timer_sync(&hc->keep_tl);
|
||||||
del_timer(&hc->timeout_tl);
|
del_timer_sync(&hc->timeout_tl);
|
||||||
|
|
||||||
cancel_work_sync(&hc->workq);
|
cancel_work_sync(&hc->workq);
|
||||||
|
|
||||||
|
@ -1912,11 +1912,14 @@ static int alx_suspend(struct device *dev)
|
|||||||
|
|
||||||
if (!netif_running(alx->dev))
|
if (!netif_running(alx->dev))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
rtnl_lock();
|
||||||
netif_device_detach(alx->dev);
|
netif_device_detach(alx->dev);
|
||||||
|
|
||||||
mutex_lock(&alx->mtx);
|
mutex_lock(&alx->mtx);
|
||||||
__alx_stop(alx);
|
__alx_stop(alx);
|
||||||
mutex_unlock(&alx->mtx);
|
mutex_unlock(&alx->mtx);
|
||||||
|
rtnl_unlock();
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -1927,6 +1930,7 @@ static int alx_resume(struct device *dev)
|
|||||||
struct alx_hw *hw = &alx->hw;
|
struct alx_hw *hw = &alx->hw;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
rtnl_lock();
|
||||||
mutex_lock(&alx->mtx);
|
mutex_lock(&alx->mtx);
|
||||||
alx_reset_phy(hw);
|
alx_reset_phy(hw);
|
||||||
|
|
||||||
@ -1943,6 +1947,7 @@ static int alx_resume(struct device *dev)
|
|||||||
|
|
||||||
unlock:
|
unlock:
|
||||||
mutex_unlock(&alx->mtx);
|
mutex_unlock(&alx->mtx);
|
||||||
|
rtnl_unlock();
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -787,6 +787,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
|
|||||||
BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
|
BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
|
||||||
pad, len, fp->rx_buf_size);
|
pad, len, fp->rx_buf_size);
|
||||||
bnx2x_panic();
|
bnx2x_panic();
|
||||||
|
bnx2x_frag_free(fp, new_data);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -1530,6 +1530,7 @@ u32 mvpp2_read(struct mvpp2 *priv, u32 offset);
|
|||||||
void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name);
|
void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name);
|
||||||
|
|
||||||
void mvpp2_dbgfs_cleanup(struct mvpp2 *priv);
|
void mvpp2_dbgfs_cleanup(struct mvpp2 *priv);
|
||||||
|
void mvpp2_dbgfs_exit(void);
|
||||||
|
|
||||||
void mvpp23_rx_fifo_fc_en(struct mvpp2 *priv, int port, bool en);
|
void mvpp23_rx_fifo_fc_en(struct mvpp2 *priv, int port, bool en);
|
||||||
|
|
||||||
|
@ -691,6 +691,13 @@ static int mvpp2_dbgfs_port_init(struct dentry *parent,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct dentry *mvpp2_root;
|
||||||
|
|
||||||
|
void mvpp2_dbgfs_exit(void)
|
||||||
|
{
|
||||||
|
debugfs_remove(mvpp2_root);
|
||||||
|
}
|
||||||
|
|
||||||
void mvpp2_dbgfs_cleanup(struct mvpp2 *priv)
|
void mvpp2_dbgfs_cleanup(struct mvpp2 *priv)
|
||||||
{
|
{
|
||||||
debugfs_remove_recursive(priv->dbgfs_dir);
|
debugfs_remove_recursive(priv->dbgfs_dir);
|
||||||
@ -700,10 +707,9 @@ void mvpp2_dbgfs_cleanup(struct mvpp2 *priv)
|
|||||||
|
|
||||||
void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name)
|
void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name)
|
||||||
{
|
{
|
||||||
struct dentry *mvpp2_dir, *mvpp2_root;
|
struct dentry *mvpp2_dir;
|
||||||
int ret, i;
|
int ret, i;
|
||||||
|
|
||||||
mvpp2_root = debugfs_lookup(MVPP2_DRIVER_NAME, NULL);
|
|
||||||
if (!mvpp2_root)
|
if (!mvpp2_root)
|
||||||
mvpp2_root = debugfs_create_dir(MVPP2_DRIVER_NAME, NULL);
|
mvpp2_root = debugfs_create_dir(MVPP2_DRIVER_NAME, NULL);
|
||||||
|
|
||||||
|
@ -7704,7 +7704,18 @@ static struct platform_driver mvpp2_driver = {
|
|||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
module_platform_driver(mvpp2_driver);
|
static int __init mvpp2_driver_init(void)
|
||||||
|
{
|
||||||
|
return platform_driver_register(&mvpp2_driver);
|
||||||
|
}
|
||||||
|
module_init(mvpp2_driver_init);
|
||||||
|
|
||||||
|
static void __exit mvpp2_driver_exit(void)
|
||||||
|
{
|
||||||
|
platform_driver_unregister(&mvpp2_driver);
|
||||||
|
mvpp2_dbgfs_exit();
|
||||||
|
}
|
||||||
|
module_exit(mvpp2_driver_exit);
|
||||||
|
|
||||||
MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com");
|
MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com");
|
||||||
MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
|
MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
|
||||||
|
@ -185,10 +185,14 @@ err_rhashtable_init:
|
|||||||
return ERR_PTR(err);
|
return ERR_PTR(err);
|
||||||
}
|
}
|
||||||
|
|
||||||
void prestera_acl_ruleset_keymask_set(struct prestera_acl_ruleset *ruleset,
|
int prestera_acl_ruleset_keymask_set(struct prestera_acl_ruleset *ruleset,
|
||||||
void *keymask)
|
void *keymask)
|
||||||
{
|
{
|
||||||
ruleset->keymask = kmemdup(keymask, ACL_KEYMASK_SIZE, GFP_KERNEL);
|
ruleset->keymask = kmemdup(keymask, ACL_KEYMASK_SIZE, GFP_KERNEL);
|
||||||
|
if (!ruleset->keymask)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int prestera_acl_ruleset_offload(struct prestera_acl_ruleset *ruleset)
|
int prestera_acl_ruleset_offload(struct prestera_acl_ruleset *ruleset)
|
||||||
|
@ -185,8 +185,8 @@ struct prestera_acl_ruleset *
|
|||||||
prestera_acl_ruleset_lookup(struct prestera_acl *acl,
|
prestera_acl_ruleset_lookup(struct prestera_acl *acl,
|
||||||
struct prestera_flow_block *block,
|
struct prestera_flow_block *block,
|
||||||
u32 chain_index);
|
u32 chain_index);
|
||||||
void prestera_acl_ruleset_keymask_set(struct prestera_acl_ruleset *ruleset,
|
int prestera_acl_ruleset_keymask_set(struct prestera_acl_ruleset *ruleset,
|
||||||
void *keymask);
|
void *keymask);
|
||||||
bool prestera_acl_ruleset_is_offload(struct prestera_acl_ruleset *ruleset);
|
bool prestera_acl_ruleset_is_offload(struct prestera_acl_ruleset *ruleset);
|
||||||
int prestera_acl_ruleset_offload(struct prestera_acl_ruleset *ruleset);
|
int prestera_acl_ruleset_offload(struct prestera_acl_ruleset *ruleset);
|
||||||
void prestera_acl_ruleset_put(struct prestera_acl_ruleset *ruleset);
|
void prestera_acl_ruleset_put(struct prestera_acl_ruleset *ruleset);
|
||||||
|
@ -500,7 +500,9 @@ int prestera_flower_tmplt_create(struct prestera_flow_block *block,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* preserve keymask/template to this ruleset */
|
/* preserve keymask/template to this ruleset */
|
||||||
prestera_acl_ruleset_keymask_set(ruleset, rule.re_key.match.mask);
|
err = prestera_acl_ruleset_keymask_set(ruleset, rule.re_key.match.mask);
|
||||||
|
if (err)
|
||||||
|
goto err_ruleset_keymask_set;
|
||||||
|
|
||||||
/* skip error, as it is not possible to reject template operation,
|
/* skip error, as it is not possible to reject template operation,
|
||||||
* so, keep the reference to the ruleset for rules to be added
|
* so, keep the reference to the ruleset for rules to be added
|
||||||
@ -516,6 +518,8 @@ int prestera_flower_tmplt_create(struct prestera_flow_block *block,
|
|||||||
list_add_rcu(&template->list, &block->template_list);
|
list_add_rcu(&template->list, &block->template_list);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
err_ruleset_keymask_set:
|
||||||
|
prestera_acl_ruleset_put(ruleset);
|
||||||
err_ruleset_get:
|
err_ruleset_get:
|
||||||
kfree(template);
|
kfree(template);
|
||||||
err_malloc:
|
err_malloc:
|
||||||
|
@ -442,7 +442,7 @@ __mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
|
|||||||
struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, entry->hash);
|
struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, entry->hash);
|
||||||
|
|
||||||
hwe->ib1 &= ~MTK_FOE_IB1_STATE;
|
hwe->ib1 &= ~MTK_FOE_IB1_STATE;
|
||||||
hwe->ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_UNBIND);
|
hwe->ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_INVALID);
|
||||||
dma_wmb();
|
dma_wmb();
|
||||||
}
|
}
|
||||||
entry->hash = 0xffff;
|
entry->hash = 0xffff;
|
||||||
|
@ -1049,6 +1049,10 @@ static int lan743x_ptpci_verify_pin_config(struct ptp_clock_info *ptp,
|
|||||||
enum ptp_pin_function func,
|
enum ptp_pin_function func,
|
||||||
unsigned int chan)
|
unsigned int chan)
|
||||||
{
|
{
|
||||||
|
struct lan743x_ptp *lan_ptp =
|
||||||
|
container_of(ptp, struct lan743x_ptp, ptp_clock_info);
|
||||||
|
struct lan743x_adapter *adapter =
|
||||||
|
container_of(lan_ptp, struct lan743x_adapter, ptp);
|
||||||
int result = 0;
|
int result = 0;
|
||||||
|
|
||||||
/* Confirm the requested function is supported. Parameter
|
/* Confirm the requested function is supported. Parameter
|
||||||
@ -1057,7 +1061,10 @@ static int lan743x_ptpci_verify_pin_config(struct ptp_clock_info *ptp,
|
|||||||
switch (func) {
|
switch (func) {
|
||||||
case PTP_PF_NONE:
|
case PTP_PF_NONE:
|
||||||
case PTP_PF_PEROUT:
|
case PTP_PF_PEROUT:
|
||||||
|
break;
|
||||||
case PTP_PF_EXTTS:
|
case PTP_PF_EXTTS:
|
||||||
|
if (!adapter->is_pci11x1x)
|
||||||
|
result = -1;
|
||||||
break;
|
break;
|
||||||
case PTP_PF_PHYSYNC:
|
case PTP_PF_PHYSYNC:
|
||||||
default:
|
default:
|
||||||
|
@ -305,7 +305,7 @@ struct frame_info {
|
|||||||
void sparx5_xtr_flush(struct sparx5 *sparx5, u8 grp);
|
void sparx5_xtr_flush(struct sparx5 *sparx5, u8 grp);
|
||||||
void sparx5_ifh_parse(u32 *ifh, struct frame_info *info);
|
void sparx5_ifh_parse(u32 *ifh, struct frame_info *info);
|
||||||
irqreturn_t sparx5_xtr_handler(int irq, void *_priv);
|
irqreturn_t sparx5_xtr_handler(int irq, void *_priv);
|
||||||
int sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev);
|
netdev_tx_t sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev);
|
||||||
int sparx5_manual_injection_mode(struct sparx5 *sparx5);
|
int sparx5_manual_injection_mode(struct sparx5 *sparx5);
|
||||||
void sparx5_port_inj_timer_setup(struct sparx5_port *port);
|
void sparx5_port_inj_timer_setup(struct sparx5_port *port);
|
||||||
|
|
||||||
|
@ -222,13 +222,13 @@ static int sparx5_inject(struct sparx5 *sparx5,
|
|||||||
return NETDEV_TX_OK;
|
return NETDEV_TX_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
int sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev)
|
netdev_tx_t sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev)
|
||||||
{
|
{
|
||||||
struct net_device_stats *stats = &dev->stats;
|
struct net_device_stats *stats = &dev->stats;
|
||||||
struct sparx5_port *port = netdev_priv(dev);
|
struct sparx5_port *port = netdev_priv(dev);
|
||||||
struct sparx5 *sparx5 = port->sparx5;
|
struct sparx5 *sparx5 = port->sparx5;
|
||||||
u32 ifh[IFH_LEN];
|
u32 ifh[IFH_LEN];
|
||||||
int ret;
|
netdev_tx_t ret;
|
||||||
|
|
||||||
memset(ifh, 0, IFH_LEN * 4);
|
memset(ifh, 0, IFH_LEN * 4);
|
||||||
sparx5_set_port_ifh(ifh, port->portno);
|
sparx5_set_port_ifh(ifh, port->portno);
|
||||||
|
@ -249,8 +249,8 @@ static int spl2sw_nvmem_get_mac_address(struct device *dev, struct device_node *
|
|||||||
|
|
||||||
/* Check if mac address is valid */
|
/* Check if mac address is valid */
|
||||||
if (!is_valid_ether_addr(mac)) {
|
if (!is_valid_ether_addr(mac)) {
|
||||||
kfree(mac);
|
|
||||||
dev_info(dev, "Invalid mac address in nvmem (%pM)!\n", mac);
|
dev_info(dev, "Invalid mac address in nvmem (%pM)!\n", mac);
|
||||||
|
kfree(mac);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1875,7 +1875,9 @@ static void intr_callback(struct urb *urb)
|
|||||||
"Stop submitting intr, status %d\n", status);
|
"Stop submitting intr, status %d\n", status);
|
||||||
return;
|
return;
|
||||||
case -EOVERFLOW:
|
case -EOVERFLOW:
|
||||||
netif_info(tp, intr, tp->netdev, "intr status -EOVERFLOW\n");
|
if (net_ratelimit())
|
||||||
|
netif_info(tp, intr, tp->netdev,
|
||||||
|
"intr status -EOVERFLOW\n");
|
||||||
goto resubmit;
|
goto resubmit;
|
||||||
/* -EPIPE: should clear the halt */
|
/* -EPIPE: should clear the halt */
|
||||||
default:
|
default:
|
||||||
|
@ -323,15 +323,16 @@ struct iosm_wwan *ipc_wwan_init(struct iosm_imem *ipc_imem, struct device *dev)
|
|||||||
ipc_wwan->dev = dev;
|
ipc_wwan->dev = dev;
|
||||||
ipc_wwan->ipc_imem = ipc_imem;
|
ipc_wwan->ipc_imem = ipc_imem;
|
||||||
|
|
||||||
|
mutex_init(&ipc_wwan->if_mutex);
|
||||||
|
|
||||||
/* WWAN core will create a netdev for the default IP MUX channel */
|
/* WWAN core will create a netdev for the default IP MUX channel */
|
||||||
if (wwan_register_ops(ipc_wwan->dev, &iosm_wwan_ops, ipc_wwan,
|
if (wwan_register_ops(ipc_wwan->dev, &iosm_wwan_ops, ipc_wwan,
|
||||||
IP_MUX_SESSION_DEFAULT)) {
|
IP_MUX_SESSION_DEFAULT)) {
|
||||||
|
mutex_destroy(&ipc_wwan->if_mutex);
|
||||||
kfree(ipc_wwan);
|
kfree(ipc_wwan);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_init(&ipc_wwan->if_mutex);
|
|
||||||
|
|
||||||
return ipc_wwan;
|
return ipc_wwan;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -393,7 +393,7 @@ vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq,
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
pkt->buf = kmalloc(pkt->len, GFP_KERNEL);
|
pkt->buf = kvmalloc(pkt->len, GFP_KERNEL);
|
||||||
if (!pkt->buf) {
|
if (!pkt->buf) {
|
||||||
kfree(pkt);
|
kfree(pkt);
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -295,7 +295,7 @@ struct tcp_sock {
|
|||||||
u32 packets_out; /* Packets which are "in flight" */
|
u32 packets_out; /* Packets which are "in flight" */
|
||||||
u32 retrans_out; /* Retransmitted packets out */
|
u32 retrans_out; /* Retransmitted packets out */
|
||||||
u32 max_packets_out; /* max packets_out in last window */
|
u32 max_packets_out; /* max packets_out in last window */
|
||||||
u32 max_packets_seq; /* right edge of max_packets_out flight */
|
u32 cwnd_usage_seq; /* right edge of cwnd usage tracking flight */
|
||||||
|
|
||||||
u16 urg_data; /* Saved octet of OOB data and control flags */
|
u16 urg_data; /* Saved octet of OOB data and control flags */
|
||||||
u8 ecn_flags; /* ECN status bits. */
|
u8 ecn_flags; /* ECN status bits. */
|
||||||
|
@ -1302,11 +1302,14 @@ static inline bool tcp_is_cwnd_limited(const struct sock *sk)
|
|||||||
{
|
{
|
||||||
const struct tcp_sock *tp = tcp_sk(sk);
|
const struct tcp_sock *tp = tcp_sk(sk);
|
||||||
|
|
||||||
|
if (tp->is_cwnd_limited)
|
||||||
|
return true;
|
||||||
|
|
||||||
/* If in slow start, ensure cwnd grows to twice what was ACKed. */
|
/* If in slow start, ensure cwnd grows to twice what was ACKed. */
|
||||||
if (tcp_in_slow_start(tp))
|
if (tcp_in_slow_start(tp))
|
||||||
return tcp_snd_cwnd(tp) < 2 * tp->max_packets_out;
|
return tcp_snd_cwnd(tp) < 2 * tp->max_packets_out;
|
||||||
|
|
||||||
return tp->is_cwnd_limited;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* BBR congestion control needs pacing.
|
/* BBR congestion control needs pacing.
|
||||||
|
@ -95,7 +95,7 @@ struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
|
|||||||
struct xdp_umem *umem);
|
struct xdp_umem *umem);
|
||||||
int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev,
|
int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev,
|
||||||
u16 queue_id, u16 flags);
|
u16 queue_id, u16 flags);
|
||||||
int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_umem *umem,
|
int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_sock *umem_xs,
|
||||||
struct net_device *dev, u16 queue_id);
|
struct net_device *dev, u16 queue_id);
|
||||||
int xp_alloc_tx_descs(struct xsk_buff_pool *pool, struct xdp_sock *xs);
|
int xp_alloc_tx_descs(struct xsk_buff_pool *pool, struct xdp_sock *xs);
|
||||||
void xp_destroy(struct xsk_buff_pool *pool);
|
void xp_destroy(struct xsk_buff_pool *pool);
|
||||||
|
@ -1259,7 +1259,7 @@ enum {
|
|||||||
|
|
||||||
/* Query effective (directly attached + inherited from ancestor cgroups)
|
/* Query effective (directly attached + inherited from ancestor cgroups)
|
||||||
* programs that will be executed for events within a cgroup.
|
* programs that will be executed for events within a cgroup.
|
||||||
* attach_flags with this flag are returned only for directly attached programs.
|
* attach_flags with this flag are always returned 0.
|
||||||
*/
|
*/
|
||||||
#define BPF_F_QUERY_EFFECTIVE (1U << 0)
|
#define BPF_F_QUERY_EFFECTIVE (1U << 0)
|
||||||
|
|
||||||
@ -1458,7 +1458,10 @@ union bpf_attr {
|
|||||||
__u32 attach_flags;
|
__u32 attach_flags;
|
||||||
__aligned_u64 prog_ids;
|
__aligned_u64 prog_ids;
|
||||||
__u32 prog_cnt;
|
__u32 prog_cnt;
|
||||||
__aligned_u64 prog_attach_flags; /* output: per-program attach_flags */
|
/* output: per-program attach_flags.
|
||||||
|
* not allowed to be set during effective query.
|
||||||
|
*/
|
||||||
|
__aligned_u64 prog_attach_flags;
|
||||||
} query;
|
} query;
|
||||||
|
|
||||||
struct { /* anonymous struct used by BPF_RAW_TRACEPOINT_OPEN command */
|
struct { /* anonymous struct used by BPF_RAW_TRACEPOINT_OPEN command */
|
||||||
|
@ -3128,7 +3128,7 @@ static int btf_struct_resolve(struct btf_verifier_env *env,
|
|||||||
if (v->next_member) {
|
if (v->next_member) {
|
||||||
const struct btf_type *last_member_type;
|
const struct btf_type *last_member_type;
|
||||||
const struct btf_member *last_member;
|
const struct btf_member *last_member;
|
||||||
u16 last_member_type_id;
|
u32 last_member_type_id;
|
||||||
|
|
||||||
last_member = btf_type_member(v->t) + v->next_member - 1;
|
last_member = btf_type_member(v->t) + v->next_member - 1;
|
||||||
last_member_type_id = last_member->type;
|
last_member_type_id = last_member->type;
|
||||||
|
@ -1020,6 +1020,7 @@ static int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
|
|||||||
union bpf_attr __user *uattr)
|
union bpf_attr __user *uattr)
|
||||||
{
|
{
|
||||||
__u32 __user *prog_attach_flags = u64_to_user_ptr(attr->query.prog_attach_flags);
|
__u32 __user *prog_attach_flags = u64_to_user_ptr(attr->query.prog_attach_flags);
|
||||||
|
bool effective_query = attr->query.query_flags & BPF_F_QUERY_EFFECTIVE;
|
||||||
__u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
|
__u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
|
||||||
enum bpf_attach_type type = attr->query.attach_type;
|
enum bpf_attach_type type = attr->query.attach_type;
|
||||||
enum cgroup_bpf_attach_type from_atype, to_atype;
|
enum cgroup_bpf_attach_type from_atype, to_atype;
|
||||||
@ -1029,8 +1030,12 @@ static int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
|
|||||||
int total_cnt = 0;
|
int total_cnt = 0;
|
||||||
u32 flags;
|
u32 flags;
|
||||||
|
|
||||||
|
if (effective_query && prog_attach_flags)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
if (type == BPF_LSM_CGROUP) {
|
if (type == BPF_LSM_CGROUP) {
|
||||||
if (attr->query.prog_cnt && prog_ids && !prog_attach_flags)
|
if (!effective_query && attr->query.prog_cnt &&
|
||||||
|
prog_ids && !prog_attach_flags)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
from_atype = CGROUP_LSM_START;
|
from_atype = CGROUP_LSM_START;
|
||||||
@ -1045,7 +1050,7 @@ static int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
|
|||||||
}
|
}
|
||||||
|
|
||||||
for (atype = from_atype; atype <= to_atype; atype++) {
|
for (atype = from_atype; atype <= to_atype; atype++) {
|
||||||
if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) {
|
if (effective_query) {
|
||||||
effective = rcu_dereference_protected(cgrp->bpf.effective[atype],
|
effective = rcu_dereference_protected(cgrp->bpf.effective[atype],
|
||||||
lockdep_is_held(&cgroup_mutex));
|
lockdep_is_held(&cgroup_mutex));
|
||||||
total_cnt += bpf_prog_array_length(effective);
|
total_cnt += bpf_prog_array_length(effective);
|
||||||
@ -1054,6 +1059,8 @@ static int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* always output uattr->query.attach_flags as 0 during effective query */
|
||||||
|
flags = effective_query ? 0 : flags;
|
||||||
if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)))
|
if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
if (copy_to_user(&uattr->query.prog_cnt, &total_cnt, sizeof(total_cnt)))
|
if (copy_to_user(&uattr->query.prog_cnt, &total_cnt, sizeof(total_cnt)))
|
||||||
@ -1068,7 +1075,7 @@ static int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
|
|||||||
}
|
}
|
||||||
|
|
||||||
for (atype = from_atype; atype <= to_atype && total_cnt; atype++) {
|
for (atype = from_atype; atype <= to_atype && total_cnt; atype++) {
|
||||||
if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) {
|
if (effective_query) {
|
||||||
effective = rcu_dereference_protected(cgrp->bpf.effective[atype],
|
effective = rcu_dereference_protected(cgrp->bpf.effective[atype],
|
||||||
lockdep_is_held(&cgroup_mutex));
|
lockdep_is_held(&cgroup_mutex));
|
||||||
cnt = min_t(int, bpf_prog_array_length(effective), total_cnt);
|
cnt = min_t(int, bpf_prog_array_length(effective), total_cnt);
|
||||||
@ -1090,15 +1097,16 @@ static int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
|
|||||||
if (++i == cnt)
|
if (++i == cnt)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
if (prog_attach_flags) {
|
if (prog_attach_flags) {
|
||||||
flags = cgrp->bpf.flags[atype];
|
flags = cgrp->bpf.flags[atype];
|
||||||
|
|
||||||
for (i = 0; i < cnt; i++)
|
for (i = 0; i < cnt; i++)
|
||||||
if (copy_to_user(prog_attach_flags + i, &flags, sizeof(flags)))
|
if (copy_to_user(prog_attach_flags + i,
|
||||||
return -EFAULT;
|
&flags, sizeof(flags)))
|
||||||
prog_attach_flags += cnt;
|
return -EFAULT;
|
||||||
|
prog_attach_flags += cnt;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
prog_ids += cnt;
|
prog_ids += cnt;
|
||||||
|
@ -1609,26 +1609,12 @@ bpf_base_func_proto(enum bpf_func_id func_id)
|
|||||||
return &bpf_ringbuf_discard_proto;
|
return &bpf_ringbuf_discard_proto;
|
||||||
case BPF_FUNC_ringbuf_query:
|
case BPF_FUNC_ringbuf_query:
|
||||||
return &bpf_ringbuf_query_proto;
|
return &bpf_ringbuf_query_proto;
|
||||||
case BPF_FUNC_ringbuf_reserve_dynptr:
|
|
||||||
return &bpf_ringbuf_reserve_dynptr_proto;
|
|
||||||
case BPF_FUNC_ringbuf_submit_dynptr:
|
|
||||||
return &bpf_ringbuf_submit_dynptr_proto;
|
|
||||||
case BPF_FUNC_ringbuf_discard_dynptr:
|
|
||||||
return &bpf_ringbuf_discard_dynptr_proto;
|
|
||||||
case BPF_FUNC_strncmp:
|
case BPF_FUNC_strncmp:
|
||||||
return &bpf_strncmp_proto;
|
return &bpf_strncmp_proto;
|
||||||
case BPF_FUNC_strtol:
|
case BPF_FUNC_strtol:
|
||||||
return &bpf_strtol_proto;
|
return &bpf_strtol_proto;
|
||||||
case BPF_FUNC_strtoul:
|
case BPF_FUNC_strtoul:
|
||||||
return &bpf_strtoul_proto;
|
return &bpf_strtoul_proto;
|
||||||
case BPF_FUNC_dynptr_from_mem:
|
|
||||||
return &bpf_dynptr_from_mem_proto;
|
|
||||||
case BPF_FUNC_dynptr_read:
|
|
||||||
return &bpf_dynptr_read_proto;
|
|
||||||
case BPF_FUNC_dynptr_write:
|
|
||||||
return &bpf_dynptr_write_proto;
|
|
||||||
case BPF_FUNC_dynptr_data:
|
|
||||||
return &bpf_dynptr_data_proto;
|
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -1663,6 +1649,20 @@ bpf_base_func_proto(enum bpf_func_id func_id)
|
|||||||
return &bpf_loop_proto;
|
return &bpf_loop_proto;
|
||||||
case BPF_FUNC_user_ringbuf_drain:
|
case BPF_FUNC_user_ringbuf_drain:
|
||||||
return &bpf_user_ringbuf_drain_proto;
|
return &bpf_user_ringbuf_drain_proto;
|
||||||
|
case BPF_FUNC_ringbuf_reserve_dynptr:
|
||||||
|
return &bpf_ringbuf_reserve_dynptr_proto;
|
||||||
|
case BPF_FUNC_ringbuf_submit_dynptr:
|
||||||
|
return &bpf_ringbuf_submit_dynptr_proto;
|
||||||
|
case BPF_FUNC_ringbuf_discard_dynptr:
|
||||||
|
return &bpf_ringbuf_discard_dynptr_proto;
|
||||||
|
case BPF_FUNC_dynptr_from_mem:
|
||||||
|
return &bpf_dynptr_from_mem_proto;
|
||||||
|
case BPF_FUNC_dynptr_read:
|
||||||
|
return &bpf_dynptr_read_proto;
|
||||||
|
case BPF_FUNC_dynptr_write:
|
||||||
|
return &bpf_dynptr_write_proto;
|
||||||
|
case BPF_FUNC_dynptr_data:
|
||||||
|
return &bpf_dynptr_data_proto;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -4405,7 +4405,9 @@ static int bpf_task_fd_query(const union bpf_attr *attr,
|
|||||||
if (attr->task_fd_query.flags != 0)
|
if (attr->task_fd_query.flags != 0)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
rcu_read_lock();
|
||||||
task = get_pid_task(find_vpid(pid), PIDTYPE_PID);
|
task = get_pid_task(find_vpid(pid), PIDTYPE_PID);
|
||||||
|
rcu_read_unlock();
|
||||||
if (!task)
|
if (!task)
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
|
|
||||||
|
@ -251,6 +251,9 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
|
|||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!size)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
lock_sock(sk);
|
lock_sock(sk);
|
||||||
if (!sk->sk_bound_dev_if)
|
if (!sk->sk_bound_dev_if)
|
||||||
dev = dev_getfirstbyhwtype(sock_net(sk), ARPHRD_IEEE802154);
|
dev = dev_getfirstbyhwtype(sock_net(sk), ARPHRD_IEEE802154);
|
||||||
|
@ -110,7 +110,10 @@ static struct sk_buff *xfrm4_tunnel_gso_segment(struct xfrm_state *x,
|
|||||||
struct sk_buff *skb,
|
struct sk_buff *skb,
|
||||||
netdev_features_t features)
|
netdev_features_t features)
|
||||||
{
|
{
|
||||||
return skb_eth_gso_segment(skb, features, htons(ETH_P_IP));
|
__be16 type = x->inner_mode.family == AF_INET6 ? htons(ETH_P_IPV6)
|
||||||
|
: htons(ETH_P_IP);
|
||||||
|
|
||||||
|
return skb_eth_gso_segment(skb, features, type);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct sk_buff *xfrm4_transport_gso_segment(struct xfrm_state *x,
|
static struct sk_buff *xfrm4_transport_gso_segment(struct xfrm_state *x,
|
||||||
|
@ -3136,6 +3136,8 @@ int tcp_disconnect(struct sock *sk, int flags)
|
|||||||
tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
|
tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
|
||||||
tcp_snd_cwnd_set(tp, TCP_INIT_CWND);
|
tcp_snd_cwnd_set(tp, TCP_INIT_CWND);
|
||||||
tp->snd_cwnd_cnt = 0;
|
tp->snd_cwnd_cnt = 0;
|
||||||
|
tp->is_cwnd_limited = 0;
|
||||||
|
tp->max_packets_out = 0;
|
||||||
tp->window_clamp = 0;
|
tp->window_clamp = 0;
|
||||||
tp->delivered = 0;
|
tp->delivered = 0;
|
||||||
tp->delivered_ce = 0;
|
tp->delivered_ce = 0;
|
||||||
|
@ -1875,15 +1875,20 @@ static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited)
|
|||||||
const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
|
const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
|
||||||
struct tcp_sock *tp = tcp_sk(sk);
|
struct tcp_sock *tp = tcp_sk(sk);
|
||||||
|
|
||||||
/* Track the maximum number of outstanding packets in each
|
/* Track the strongest available signal of the degree to which the cwnd
|
||||||
* window, and remember whether we were cwnd-limited then.
|
* is fully utilized. If cwnd-limited then remember that fact for the
|
||||||
|
* current window. If not cwnd-limited then track the maximum number of
|
||||||
|
* outstanding packets in the current window. (If cwnd-limited then we
|
||||||
|
* chose to not update tp->max_packets_out to avoid an extra else
|
||||||
|
* clause with no functional impact.)
|
||||||
*/
|
*/
|
||||||
if (!before(tp->snd_una, tp->max_packets_seq) ||
|
if (!before(tp->snd_una, tp->cwnd_usage_seq) ||
|
||||||
tp->packets_out > tp->max_packets_out ||
|
is_cwnd_limited ||
|
||||||
is_cwnd_limited) {
|
(!tp->is_cwnd_limited &&
|
||||||
tp->max_packets_out = tp->packets_out;
|
tp->packets_out > tp->max_packets_out)) {
|
||||||
tp->max_packets_seq = tp->snd_nxt;
|
|
||||||
tp->is_cwnd_limited = is_cwnd_limited;
|
tp->is_cwnd_limited = is_cwnd_limited;
|
||||||
|
tp->max_packets_out = tp->packets_out;
|
||||||
|
tp->cwnd_usage_seq = tp->snd_nxt;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (tcp_is_cwnd_limited(sk)) {
|
if (tcp_is_cwnd_limited(sk)) {
|
||||||
|
@ -145,7 +145,10 @@ static struct sk_buff *xfrm6_tunnel_gso_segment(struct xfrm_state *x,
|
|||||||
struct sk_buff *skb,
|
struct sk_buff *skb,
|
||||||
netdev_features_t features)
|
netdev_features_t features)
|
||||||
{
|
{
|
||||||
return skb_eth_gso_segment(skb, features, htons(ETH_P_IPV6));
|
__be16 type = x->inner_mode.family == AF_INET ? htons(ETH_P_IP)
|
||||||
|
: htons(ETH_P_IPV6);
|
||||||
|
|
||||||
|
return skb_eth_gso_segment(skb, features, type);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct sk_buff *xfrm6_transport_gso_segment(struct xfrm_state *x,
|
static struct sk_buff *xfrm6_transport_gso_segment(struct xfrm_state *x,
|
||||||
|
@ -166,10 +166,10 @@ void rds_tcp_reset_callbacks(struct socket *sock,
|
|||||||
*/
|
*/
|
||||||
atomic_set(&cp->cp_state, RDS_CONN_RESETTING);
|
atomic_set(&cp->cp_state, RDS_CONN_RESETTING);
|
||||||
wait_event(cp->cp_waitq, !test_bit(RDS_IN_XMIT, &cp->cp_flags));
|
wait_event(cp->cp_waitq, !test_bit(RDS_IN_XMIT, &cp->cp_flags));
|
||||||
lock_sock(osock->sk);
|
|
||||||
/* reset receive side state for rds_tcp_data_recv() for osock */
|
/* reset receive side state for rds_tcp_data_recv() for osock */
|
||||||
cancel_delayed_work_sync(&cp->cp_send_w);
|
cancel_delayed_work_sync(&cp->cp_send_w);
|
||||||
cancel_delayed_work_sync(&cp->cp_recv_w);
|
cancel_delayed_work_sync(&cp->cp_recv_w);
|
||||||
|
lock_sock(osock->sk);
|
||||||
if (tc->t_tinc) {
|
if (tc->t_tinc) {
|
||||||
rds_inc_put(&tc->t_tinc->ti_inc);
|
rds_inc_put(&tc->t_tinc->ti_inc);
|
||||||
tc->t_tinc = NULL;
|
tc->t_tinc = NULL;
|
||||||
|
@ -863,12 +863,17 @@ int sctp_auth_set_key(struct sctp_endpoint *ep,
|
|||||||
}
|
}
|
||||||
|
|
||||||
list_del_init(&shkey->key_list);
|
list_del_init(&shkey->key_list);
|
||||||
sctp_auth_shkey_release(shkey);
|
|
||||||
list_add(&cur_key->key_list, sh_keys);
|
list_add(&cur_key->key_list, sh_keys);
|
||||||
|
|
||||||
if (asoc && asoc->active_key_id == auth_key->sca_keynumber)
|
if (asoc && asoc->active_key_id == auth_key->sca_keynumber &&
|
||||||
sctp_auth_asoc_init_active_key(asoc, GFP_KERNEL);
|
sctp_auth_asoc_init_active_key(asoc, GFP_KERNEL)) {
|
||||||
|
list_del_init(&cur_key->key_list);
|
||||||
|
sctp_auth_shkey_release(cur_key);
|
||||||
|
list_add(&shkey->key_list, sh_keys);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
sctp_auth_shkey_release(shkey);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -902,8 +907,13 @@ int sctp_auth_set_active_key(struct sctp_endpoint *ep,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (asoc) {
|
if (asoc) {
|
||||||
|
__u16 active_key_id = asoc->active_key_id;
|
||||||
|
|
||||||
asoc->active_key_id = key_id;
|
asoc->active_key_id = key_id;
|
||||||
sctp_auth_asoc_init_active_key(asoc, GFP_KERNEL);
|
if (sctp_auth_asoc_init_active_key(asoc, GFP_KERNEL)) {
|
||||||
|
asoc->active_key_id = active_key_id;
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
} else
|
} else
|
||||||
ep->active_key_id = key_id;
|
ep->active_key_id = key_id;
|
||||||
|
|
||||||
|
@ -569,12 +569,6 @@ static void unix_sock_destructor(struct sock *sk)
|
|||||||
|
|
||||||
skb_queue_purge(&sk->sk_receive_queue);
|
skb_queue_purge(&sk->sk_receive_queue);
|
||||||
|
|
||||||
#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
|
|
||||||
if (u->oob_skb) {
|
|
||||||
kfree_skb(u->oob_skb);
|
|
||||||
u->oob_skb = NULL;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
DEBUG_NET_WARN_ON_ONCE(refcount_read(&sk->sk_wmem_alloc));
|
DEBUG_NET_WARN_ON_ONCE(refcount_read(&sk->sk_wmem_alloc));
|
||||||
DEBUG_NET_WARN_ON_ONCE(!sk_unhashed(sk));
|
DEBUG_NET_WARN_ON_ONCE(!sk_unhashed(sk));
|
||||||
DEBUG_NET_WARN_ON_ONCE(sk->sk_socket);
|
DEBUG_NET_WARN_ON_ONCE(sk->sk_socket);
|
||||||
@ -620,6 +614,13 @@ static void unix_release_sock(struct sock *sk, int embrion)
|
|||||||
|
|
||||||
unix_state_unlock(sk);
|
unix_state_unlock(sk);
|
||||||
|
|
||||||
|
#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
|
||||||
|
if (u->oob_skb) {
|
||||||
|
kfree_skb(u->oob_skb);
|
||||||
|
u->oob_skb = NULL;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
wake_up_interruptible_all(&u->peer_wait);
|
wake_up_interruptible_all(&u->peer_wait);
|
||||||
|
|
||||||
if (skpair != NULL) {
|
if (skpair != NULL) {
|
||||||
|
@ -1339,7 +1339,7 @@ EXPORT_SYMBOL_GPL(virtio_transport_recv_pkt);
|
|||||||
|
|
||||||
void virtio_transport_free_pkt(struct virtio_vsock_pkt *pkt)
|
void virtio_transport_free_pkt(struct virtio_vsock_pkt *pkt)
|
||||||
{
|
{
|
||||||
kfree(pkt->buf);
|
kvfree(pkt->buf);
|
||||||
kfree(pkt);
|
kfree(pkt);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(virtio_transport_free_pkt);
|
EXPORT_SYMBOL_GPL(virtio_transport_free_pkt);
|
||||||
|
@ -355,16 +355,15 @@ static u32 xsk_tx_peek_release_fallback(struct xsk_buff_pool *pool, u32 max_entr
|
|||||||
return nb_pkts;
|
return nb_pkts;
|
||||||
}
|
}
|
||||||
|
|
||||||
u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max_entries)
|
u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 nb_pkts)
|
||||||
{
|
{
|
||||||
struct xdp_sock *xs;
|
struct xdp_sock *xs;
|
||||||
u32 nb_pkts;
|
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
if (!list_is_singular(&pool->xsk_tx_list)) {
|
if (!list_is_singular(&pool->xsk_tx_list)) {
|
||||||
/* Fallback to the non-batched version */
|
/* Fallback to the non-batched version */
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
return xsk_tx_peek_release_fallback(pool, max_entries);
|
return xsk_tx_peek_release_fallback(pool, nb_pkts);
|
||||||
}
|
}
|
||||||
|
|
||||||
xs = list_first_or_null_rcu(&pool->xsk_tx_list, struct xdp_sock, tx_list);
|
xs = list_first_or_null_rcu(&pool->xsk_tx_list, struct xdp_sock, tx_list);
|
||||||
@ -373,12 +372,7 @@ u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max_entries)
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
max_entries = xskq_cons_nb_entries(xs->tx, max_entries);
|
nb_pkts = xskq_cons_nb_entries(xs->tx, nb_pkts);
|
||||||
nb_pkts = xskq_cons_read_desc_batch(xs->tx, pool, max_entries);
|
|
||||||
if (!nb_pkts) {
|
|
||||||
xs->tx->queue_empty_descs++;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* This is the backpressure mechanism for the Tx path. Try to
|
/* This is the backpressure mechanism for the Tx path. Try to
|
||||||
* reserve space in the completion queue for all packets, but
|
* reserve space in the completion queue for all packets, but
|
||||||
@ -386,12 +380,18 @@ u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max_entries)
|
|||||||
* packets. This avoids having to implement any buffering in
|
* packets. This avoids having to implement any buffering in
|
||||||
* the Tx path.
|
* the Tx path.
|
||||||
*/
|
*/
|
||||||
nb_pkts = xskq_prod_reserve_addr_batch(pool->cq, pool->tx_descs, nb_pkts);
|
nb_pkts = xskq_prod_nb_free(pool->cq, nb_pkts);
|
||||||
if (!nb_pkts)
|
if (!nb_pkts)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
xskq_cons_release_n(xs->tx, max_entries);
|
nb_pkts = xskq_cons_read_desc_batch(xs->tx, pool, nb_pkts);
|
||||||
|
if (!nb_pkts) {
|
||||||
|
xs->tx->queue_empty_descs++;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
__xskq_cons_release(xs->tx);
|
__xskq_cons_release(xs->tx);
|
||||||
|
xskq_prod_write_addr_batch(pool->cq, pool->tx_descs, nb_pkts);
|
||||||
xs->sk.sk_write_space(&xs->sk);
|
xs->sk.sk_write_space(&xs->sk);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
@ -954,8 +954,8 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
|
|||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = xp_assign_dev_shared(xs->pool, umem_xs->umem,
|
err = xp_assign_dev_shared(xs->pool, umem_xs, dev,
|
||||||
dev, qid);
|
qid);
|
||||||
if (err) {
|
if (err) {
|
||||||
xp_destroy(xs->pool);
|
xp_destroy(xs->pool);
|
||||||
xs->pool = NULL;
|
xs->pool = NULL;
|
||||||
|
@ -212,17 +212,18 @@ err_unreg_pool:
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_umem *umem,
|
int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_sock *umem_xs,
|
||||||
struct net_device *dev, u16 queue_id)
|
struct net_device *dev, u16 queue_id)
|
||||||
{
|
{
|
||||||
u16 flags;
|
u16 flags;
|
||||||
|
struct xdp_umem *umem = umem_xs->umem;
|
||||||
|
|
||||||
/* One fill and completion ring required for each queue id. */
|
/* One fill and completion ring required for each queue id. */
|
||||||
if (!pool->fq || !pool->cq)
|
if (!pool->fq || !pool->cq)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
flags = umem->zc ? XDP_ZEROCOPY : XDP_COPY;
|
flags = umem->zc ? XDP_ZEROCOPY : XDP_COPY;
|
||||||
if (pool->uses_need_wakeup)
|
if (umem_xs->pool->uses_need_wakeup)
|
||||||
flags |= XDP_USE_NEED_WAKEUP;
|
flags |= XDP_USE_NEED_WAKEUP;
|
||||||
|
|
||||||
return xp_assign_dev(pool, dev, queue_id, flags);
|
return xp_assign_dev(pool, dev, queue_id, flags);
|
||||||
|
@ -205,6 +205,11 @@ static inline bool xskq_cons_read_desc(struct xsk_queue *q,
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void xskq_cons_release_n(struct xsk_queue *q, u32 cnt)
|
||||||
|
{
|
||||||
|
q->cached_cons += cnt;
|
||||||
|
}
|
||||||
|
|
||||||
static inline u32 xskq_cons_read_desc_batch(struct xsk_queue *q, struct xsk_buff_pool *pool,
|
static inline u32 xskq_cons_read_desc_batch(struct xsk_queue *q, struct xsk_buff_pool *pool,
|
||||||
u32 max)
|
u32 max)
|
||||||
{
|
{
|
||||||
@ -226,6 +231,8 @@ static inline u32 xskq_cons_read_desc_batch(struct xsk_queue *q, struct xsk_buff
|
|||||||
cached_cons++;
|
cached_cons++;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Release valid plus any invalid entries */
|
||||||
|
xskq_cons_release_n(q, cached_cons - q->cached_cons);
|
||||||
return nb_entries;
|
return nb_entries;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -291,11 +298,6 @@ static inline void xskq_cons_release(struct xsk_queue *q)
|
|||||||
q->cached_cons++;
|
q->cached_cons++;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void xskq_cons_release_n(struct xsk_queue *q, u32 cnt)
|
|
||||||
{
|
|
||||||
q->cached_cons += cnt;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline u32 xskq_cons_present_entries(struct xsk_queue *q)
|
static inline u32 xskq_cons_present_entries(struct xsk_queue *q)
|
||||||
{
|
{
|
||||||
/* No barriers needed since data is not accessed */
|
/* No barriers needed since data is not accessed */
|
||||||
@ -350,21 +352,17 @@ static inline int xskq_prod_reserve_addr(struct xsk_queue *q, u64 addr)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline u32 xskq_prod_reserve_addr_batch(struct xsk_queue *q, struct xdp_desc *descs,
|
static inline void xskq_prod_write_addr_batch(struct xsk_queue *q, struct xdp_desc *descs,
|
||||||
u32 max)
|
u32 nb_entries)
|
||||||
{
|
{
|
||||||
struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
|
struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
|
||||||
u32 nb_entries, i, cached_prod;
|
u32 i, cached_prod;
|
||||||
|
|
||||||
nb_entries = xskq_prod_nb_free(q, max);
|
|
||||||
|
|
||||||
/* A, matches D */
|
/* A, matches D */
|
||||||
cached_prod = q->cached_prod;
|
cached_prod = q->cached_prod;
|
||||||
for (i = 0; i < nb_entries; i++)
|
for (i = 0; i < nb_entries; i++)
|
||||||
ring->desc[cached_prod++ & q->ring_mask] = descs[i].addr;
|
ring->desc[cached_prod++ & q->ring_mask] = descs[i].addr;
|
||||||
q->cached_prod = cached_prod;
|
q->cached_prod = cached_prod;
|
||||||
|
|
||||||
return nb_entries;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int xskq_prod_reserve_desc(struct xsk_queue *q,
|
static inline int xskq_prod_reserve_desc(struct xsk_queue *q,
|
||||||
|
@ -25,7 +25,8 @@
|
|||||||
#include "xfrm_inout.h"
|
#include "xfrm_inout.h"
|
||||||
|
|
||||||
struct xfrm_trans_tasklet {
|
struct xfrm_trans_tasklet {
|
||||||
struct tasklet_struct tasklet;
|
struct work_struct work;
|
||||||
|
spinlock_t queue_lock;
|
||||||
struct sk_buff_head queue;
|
struct sk_buff_head queue;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -763,18 +764,22 @@ int xfrm_input_resume(struct sk_buff *skb, int nexthdr)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(xfrm_input_resume);
|
EXPORT_SYMBOL(xfrm_input_resume);
|
||||||
|
|
||||||
static void xfrm_trans_reinject(struct tasklet_struct *t)
|
static void xfrm_trans_reinject(struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct xfrm_trans_tasklet *trans = from_tasklet(trans, t, tasklet);
|
struct xfrm_trans_tasklet *trans = container_of(work, struct xfrm_trans_tasklet, work);
|
||||||
struct sk_buff_head queue;
|
struct sk_buff_head queue;
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
|
|
||||||
__skb_queue_head_init(&queue);
|
__skb_queue_head_init(&queue);
|
||||||
|
spin_lock_bh(&trans->queue_lock);
|
||||||
skb_queue_splice_init(&trans->queue, &queue);
|
skb_queue_splice_init(&trans->queue, &queue);
|
||||||
|
spin_unlock_bh(&trans->queue_lock);
|
||||||
|
|
||||||
|
local_bh_disable();
|
||||||
while ((skb = __skb_dequeue(&queue)))
|
while ((skb = __skb_dequeue(&queue)))
|
||||||
XFRM_TRANS_SKB_CB(skb)->finish(XFRM_TRANS_SKB_CB(skb)->net,
|
XFRM_TRANS_SKB_CB(skb)->finish(XFRM_TRANS_SKB_CB(skb)->net,
|
||||||
NULL, skb);
|
NULL, skb);
|
||||||
|
local_bh_enable();
|
||||||
}
|
}
|
||||||
|
|
||||||
int xfrm_trans_queue_net(struct net *net, struct sk_buff *skb,
|
int xfrm_trans_queue_net(struct net *net, struct sk_buff *skb,
|
||||||
@ -792,8 +797,10 @@ int xfrm_trans_queue_net(struct net *net, struct sk_buff *skb,
|
|||||||
|
|
||||||
XFRM_TRANS_SKB_CB(skb)->finish = finish;
|
XFRM_TRANS_SKB_CB(skb)->finish = finish;
|
||||||
XFRM_TRANS_SKB_CB(skb)->net = net;
|
XFRM_TRANS_SKB_CB(skb)->net = net;
|
||||||
|
spin_lock_bh(&trans->queue_lock);
|
||||||
__skb_queue_tail(&trans->queue, skb);
|
__skb_queue_tail(&trans->queue, skb);
|
||||||
tasklet_schedule(&trans->tasklet);
|
spin_unlock_bh(&trans->queue_lock);
|
||||||
|
schedule_work(&trans->work);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(xfrm_trans_queue_net);
|
EXPORT_SYMBOL(xfrm_trans_queue_net);
|
||||||
@ -820,7 +827,8 @@ void __init xfrm_input_init(void)
|
|||||||
struct xfrm_trans_tasklet *trans;
|
struct xfrm_trans_tasklet *trans;
|
||||||
|
|
||||||
trans = &per_cpu(xfrm_trans_tasklet, i);
|
trans = &per_cpu(xfrm_trans_tasklet, i);
|
||||||
|
spin_lock_init(&trans->queue_lock);
|
||||||
__skb_queue_head_init(&trans->queue);
|
__skb_queue_head_init(&trans->queue);
|
||||||
tasklet_setup(&trans->tasklet, xfrm_trans_reinject);
|
INIT_WORK(&trans->work, xfrm_trans_reinject);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -203,6 +203,7 @@ static void ipcomp_free_scratches(void)
|
|||||||
vfree(*per_cpu_ptr(scratches, i));
|
vfree(*per_cpu_ptr(scratches, i));
|
||||||
|
|
||||||
free_percpu(scratches);
|
free_percpu(scratches);
|
||||||
|
ipcomp_scratches = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void * __percpu *ipcomp_alloc_scratches(void)
|
static void * __percpu *ipcomp_alloc_scratches(void)
|
||||||
|
@ -136,8 +136,8 @@ static int show_bpf_prog(int id, enum bpf_attach_type attach_type,
|
|||||||
jsonw_string_field(json_wtr, "attach_type", attach_type_str);
|
jsonw_string_field(json_wtr, "attach_type", attach_type_str);
|
||||||
else
|
else
|
||||||
jsonw_uint_field(json_wtr, "attach_type", attach_type);
|
jsonw_uint_field(json_wtr, "attach_type", attach_type);
|
||||||
jsonw_string_field(json_wtr, "attach_flags",
|
if (!(query_flags & BPF_F_QUERY_EFFECTIVE))
|
||||||
attach_flags_str);
|
jsonw_string_field(json_wtr, "attach_flags", attach_flags_str);
|
||||||
jsonw_string_field(json_wtr, "name", prog_name);
|
jsonw_string_field(json_wtr, "name", prog_name);
|
||||||
if (attach_btf_name)
|
if (attach_btf_name)
|
||||||
jsonw_string_field(json_wtr, "attach_btf_name", attach_btf_name);
|
jsonw_string_field(json_wtr, "attach_btf_name", attach_btf_name);
|
||||||
@ -150,7 +150,10 @@ static int show_bpf_prog(int id, enum bpf_attach_type attach_type,
|
|||||||
printf("%-15s", attach_type_str);
|
printf("%-15s", attach_type_str);
|
||||||
else
|
else
|
||||||
printf("type %-10u", attach_type);
|
printf("type %-10u", attach_type);
|
||||||
printf(" %-15s %-15s", attach_flags_str, prog_name);
|
if (query_flags & BPF_F_QUERY_EFFECTIVE)
|
||||||
|
printf(" %-15s", prog_name);
|
||||||
|
else
|
||||||
|
printf(" %-15s %-15s", attach_flags_str, prog_name);
|
||||||
if (attach_btf_name)
|
if (attach_btf_name)
|
||||||
printf(" %-15s", attach_btf_name);
|
printf(" %-15s", attach_btf_name);
|
||||||
else if (info.attach_btf_id)
|
else if (info.attach_btf_id)
|
||||||
@ -195,6 +198,32 @@ static int cgroup_has_attached_progs(int cgroup_fd)
|
|||||||
|
|
||||||
return no_prog ? 0 : 1;
|
return no_prog ? 0 : 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int show_effective_bpf_progs(int cgroup_fd, enum bpf_attach_type type,
|
||||||
|
int level)
|
||||||
|
{
|
||||||
|
LIBBPF_OPTS(bpf_prog_query_opts, p);
|
||||||
|
__u32 prog_ids[1024] = {0};
|
||||||
|
__u32 iter;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
p.query_flags = query_flags;
|
||||||
|
p.prog_cnt = ARRAY_SIZE(prog_ids);
|
||||||
|
p.prog_ids = prog_ids;
|
||||||
|
|
||||||
|
ret = bpf_prog_query_opts(cgroup_fd, type, &p);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
if (p.prog_cnt == 0)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
for (iter = 0; iter < p.prog_cnt; iter++)
|
||||||
|
show_bpf_prog(prog_ids[iter], type, NULL, level);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int show_attached_bpf_progs(int cgroup_fd, enum bpf_attach_type type,
|
static int show_attached_bpf_progs(int cgroup_fd, enum bpf_attach_type type,
|
||||||
int level)
|
int level)
|
||||||
{
|
{
|
||||||
@ -245,6 +274,14 @@ static int show_attached_bpf_progs(int cgroup_fd, enum bpf_attach_type type,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int show_bpf_progs(int cgroup_fd, enum bpf_attach_type type,
|
||||||
|
int level)
|
||||||
|
{
|
||||||
|
return query_flags & BPF_F_QUERY_EFFECTIVE ?
|
||||||
|
show_effective_bpf_progs(cgroup_fd, type, level) :
|
||||||
|
show_attached_bpf_progs(cgroup_fd, type, level);
|
||||||
|
}
|
||||||
|
|
||||||
static int do_show(int argc, char **argv)
|
static int do_show(int argc, char **argv)
|
||||||
{
|
{
|
||||||
enum bpf_attach_type type;
|
enum bpf_attach_type type;
|
||||||
@ -292,6 +329,8 @@ static int do_show(int argc, char **argv)
|
|||||||
|
|
||||||
if (json_output)
|
if (json_output)
|
||||||
jsonw_start_array(json_wtr);
|
jsonw_start_array(json_wtr);
|
||||||
|
else if (query_flags & BPF_F_QUERY_EFFECTIVE)
|
||||||
|
printf("%-8s %-15s %-15s\n", "ID", "AttachType", "Name");
|
||||||
else
|
else
|
||||||
printf("%-8s %-15s %-15s %-15s\n", "ID", "AttachType",
|
printf("%-8s %-15s %-15s %-15s\n", "ID", "AttachType",
|
||||||
"AttachFlags", "Name");
|
"AttachFlags", "Name");
|
||||||
@ -304,7 +343,7 @@ static int do_show(int argc, char **argv)
|
|||||||
* If we were able to get the show for at least one
|
* If we were able to get the show for at least one
|
||||||
* attach type, let's return 0.
|
* attach type, let's return 0.
|
||||||
*/
|
*/
|
||||||
if (show_attached_bpf_progs(cgroup_fd, type, 0) == 0)
|
if (show_bpf_progs(cgroup_fd, type, 0) == 0)
|
||||||
ret = 0;
|
ret = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -362,7 +401,7 @@ static int do_show_tree_fn(const char *fpath, const struct stat *sb,
|
|||||||
|
|
||||||
btf_vmlinux = libbpf_find_kernel_btf();
|
btf_vmlinux = libbpf_find_kernel_btf();
|
||||||
for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++)
|
for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++)
|
||||||
show_attached_bpf_progs(cgroup_fd, type, ftw->level);
|
show_bpf_progs(cgroup_fd, type, ftw->level);
|
||||||
|
|
||||||
if (errno == EINVAL)
|
if (errno == EINVAL)
|
||||||
/* Last attach type does not support query.
|
/* Last attach type does not support query.
|
||||||
@ -436,6 +475,11 @@ static int do_show_tree(int argc, char **argv)
|
|||||||
|
|
||||||
if (json_output)
|
if (json_output)
|
||||||
jsonw_start_array(json_wtr);
|
jsonw_start_array(json_wtr);
|
||||||
|
else if (query_flags & BPF_F_QUERY_EFFECTIVE)
|
||||||
|
printf("%s\n"
|
||||||
|
"%-8s %-15s %-15s\n",
|
||||||
|
"CgroupPath",
|
||||||
|
"ID", "AttachType", "Name");
|
||||||
else
|
else
|
||||||
printf("%s\n"
|
printf("%s\n"
|
||||||
"%-8s %-15s %-15s %-15s\n",
|
"%-8s %-15s %-15s %-15s\n",
|
||||||
|
@ -1259,7 +1259,7 @@ enum {
|
|||||||
|
|
||||||
/* Query effective (directly attached + inherited from ancestor cgroups)
|
/* Query effective (directly attached + inherited from ancestor cgroups)
|
||||||
* programs that will be executed for events within a cgroup.
|
* programs that will be executed for events within a cgroup.
|
||||||
* attach_flags with this flag are returned only for directly attached programs.
|
* attach_flags with this flag are always returned 0.
|
||||||
*/
|
*/
|
||||||
#define BPF_F_QUERY_EFFECTIVE (1U << 0)
|
#define BPF_F_QUERY_EFFECTIVE (1U << 0)
|
||||||
|
|
||||||
@ -1458,7 +1458,10 @@ union bpf_attr {
|
|||||||
__u32 attach_flags;
|
__u32 attach_flags;
|
||||||
__aligned_u64 prog_ids;
|
__aligned_u64 prog_ids;
|
||||||
__u32 prog_cnt;
|
__u32 prog_cnt;
|
||||||
__aligned_u64 prog_attach_flags; /* output: per-program attach_flags */
|
/* output: per-program attach_flags.
|
||||||
|
* not allowed to be set during effective query.
|
||||||
|
*/
|
||||||
|
__aligned_u64 prog_attach_flags;
|
||||||
} query;
|
} query;
|
||||||
|
|
||||||
struct { /* anonymous struct used by BPF_RAW_TRACEPOINT_OPEN command */
|
struct { /* anonymous struct used by BPF_RAW_TRACEPOINT_OPEN command */
|
||||||
|
@ -71,10 +71,9 @@ void serial_test_cgroup_link(void)
|
|||||||
|
|
||||||
ping_and_check(cg_nr, 0);
|
ping_and_check(cg_nr, 0);
|
||||||
|
|
||||||
/* query the number of effective progs and attach flags in root cg */
|
/* query the number of attached progs and attach flags in root cg */
|
||||||
err = bpf_prog_query(cgs[0].fd, BPF_CGROUP_INET_EGRESS,
|
err = bpf_prog_query(cgs[0].fd, BPF_CGROUP_INET_EGRESS,
|
||||||
BPF_F_QUERY_EFFECTIVE, &attach_flags, NULL,
|
0, &attach_flags, NULL, &prog_cnt);
|
||||||
&prog_cnt);
|
|
||||||
CHECK_FAIL(err);
|
CHECK_FAIL(err);
|
||||||
CHECK_FAIL(attach_flags != BPF_F_ALLOW_MULTI);
|
CHECK_FAIL(attach_flags != BPF_F_ALLOW_MULTI);
|
||||||
if (CHECK(prog_cnt != 1, "effect_cnt", "exp %d, got %d\n", 1, prog_cnt))
|
if (CHECK(prog_cnt != 1, "effect_cnt", "exp %d, got %d\n", 1, prog_cnt))
|
||||||
@ -85,17 +84,15 @@ void serial_test_cgroup_link(void)
|
|||||||
BPF_F_QUERY_EFFECTIVE, NULL, NULL,
|
BPF_F_QUERY_EFFECTIVE, NULL, NULL,
|
||||||
&prog_cnt);
|
&prog_cnt);
|
||||||
CHECK_FAIL(err);
|
CHECK_FAIL(err);
|
||||||
CHECK_FAIL(attach_flags != BPF_F_ALLOW_MULTI);
|
|
||||||
if (CHECK(prog_cnt != cg_nr, "effect_cnt", "exp %d, got %d\n",
|
if (CHECK(prog_cnt != cg_nr, "effect_cnt", "exp %d, got %d\n",
|
||||||
cg_nr, prog_cnt))
|
cg_nr, prog_cnt))
|
||||||
goto cleanup;
|
goto cleanup;
|
||||||
|
|
||||||
/* query the effective prog IDs in last cg */
|
/* query the effective prog IDs in last cg */
|
||||||
err = bpf_prog_query(cgs[last_cg].fd, BPF_CGROUP_INET_EGRESS,
|
err = bpf_prog_query(cgs[last_cg].fd, BPF_CGROUP_INET_EGRESS,
|
||||||
BPF_F_QUERY_EFFECTIVE, &attach_flags,
|
BPF_F_QUERY_EFFECTIVE, NULL, prog_ids,
|
||||||
prog_ids, &prog_cnt);
|
&prog_cnt);
|
||||||
CHECK_FAIL(err);
|
CHECK_FAIL(err);
|
||||||
CHECK_FAIL(attach_flags != BPF_F_ALLOW_MULTI);
|
|
||||||
if (CHECK(prog_cnt != cg_nr, "effect_cnt", "exp %d, got %d\n",
|
if (CHECK(prog_cnt != cg_nr, "effect_cnt", "exp %d, got %d\n",
|
||||||
cg_nr, prog_cnt))
|
cg_nr, prog_cnt))
|
||||||
goto cleanup;
|
goto cleanup;
|
||||||
|
@ -1842,6 +1842,8 @@ static struct ifobject *ifobject_create(void)
|
|||||||
if (!ifobj->umem)
|
if (!ifobj->umem)
|
||||||
goto out_umem;
|
goto out_umem;
|
||||||
|
|
||||||
|
ifobj->ns_fd = -1;
|
||||||
|
|
||||||
return ifobj;
|
return ifobj;
|
||||||
|
|
||||||
out_umem:
|
out_umem:
|
||||||
@ -1853,6 +1855,8 @@ out_xsk_arr:
|
|||||||
|
|
||||||
static void ifobject_delete(struct ifobject *ifobj)
|
static void ifobject_delete(struct ifobject *ifobj)
|
||||||
{
|
{
|
||||||
|
if (ifobj->ns_fd != -1)
|
||||||
|
close(ifobj->ns_fd);
|
||||||
free(ifobj->umem);
|
free(ifobj->umem);
|
||||||
free(ifobj->xsk_arr);
|
free(ifobj->xsk_arr);
|
||||||
free(ifobj);
|
free(ifobj);
|
||||||
|
Loading…
Reference in New Issue
Block a user