mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 20:22:09 +00:00
net, xdp: Introduce xdp_init_buff utility routine
Introduce xdp_init_buff utility routine to initialize xdp_buff fields const over NAPI iterations (e.g. frame_sz or rxq pointer). Rely on xdp_init_buff in all XDP capable drivers. Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Reviewed-by: Alexander Duyck <alexanderduyck@fb.com> Acked-by: Jesper Dangaard Brouer <brouer@redhat.com> Acked-by: John Fastabend <john.fastabend@gmail.com> Acked-by: Shay Agroskin <shayagr@amazon.com> Acked-by: Martin Habets <habetsm.xilinx@gmail.com> Acked-by: Camelia Groza <camelia.groza@nxp.com> Acked-by: Marcin Wojtas <mw@semihalf.com> Link: https://lore.kernel.org/bpf/7f8329b6da1434dc2b05a77f2e800b29628a8913.1608670965.git.lorenzo@kernel.org Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
parent
ec24e11e08
commit
43b5169d83
@ -1634,8 +1634,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
|
||||
netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
|
||||
"%s qid %d\n", __func__, rx_ring->qid);
|
||||
res_budget = budget;
|
||||
xdp.rxq = &rx_ring->xdp_rxq;
|
||||
xdp.frame_sz = ENA_PAGE_SIZE;
|
||||
xdp_init_buff(&xdp, ENA_PAGE_SIZE, &rx_ring->xdp_rxq);
|
||||
|
||||
do {
|
||||
xdp_verdict = XDP_PASS;
|
||||
|
@ -133,12 +133,12 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
|
||||
dma_sync_single_for_cpu(&pdev->dev, mapping + offset, *len, bp->rx_dir);
|
||||
|
||||
txr = rxr->bnapi->tx_ring;
|
||||
/* BNXT_RX_PAGE_MODE(bp) when XDP enabled */
|
||||
xdp_init_buff(&xdp, PAGE_SIZE, &rxr->xdp_rxq);
|
||||
xdp.data_hard_start = *data_ptr - offset;
|
||||
xdp.data = *data_ptr;
|
||||
xdp_set_data_meta_invalid(&xdp);
|
||||
xdp.data_end = *data_ptr + *len;
|
||||
xdp.rxq = &rxr->xdp_rxq;
|
||||
xdp.frame_sz = PAGE_SIZE; /* BNXT_RX_PAGE_MODE(bp) when XDP enabled */
|
||||
orig_data = xdp.data;
|
||||
|
||||
rcu_read_lock();
|
||||
|
@ -547,12 +547,12 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
|
||||
cpu_addr = (u64)phys_to_virt(cpu_addr);
|
||||
page = virt_to_page((void *)cpu_addr);
|
||||
|
||||
xdp_init_buff(&xdp, RCV_FRAG_LEN + XDP_PACKET_HEADROOM,
|
||||
&rq->xdp_rxq);
|
||||
xdp.data_hard_start = page_address(page);
|
||||
xdp.data = (void *)cpu_addr;
|
||||
xdp_set_data_meta_invalid(&xdp);
|
||||
xdp.data_end = xdp.data + len;
|
||||
xdp.rxq = &rq->xdp_rxq;
|
||||
xdp.frame_sz = RCV_FRAG_LEN + XDP_PACKET_HEADROOM;
|
||||
orig_data = xdp.data;
|
||||
|
||||
rcu_read_lock();
|
||||
|
@ -2532,12 +2532,12 @@ static u32 dpaa_run_xdp(struct dpaa_priv *priv, struct qm_fd *fd, void *vaddr,
|
||||
return XDP_PASS;
|
||||
}
|
||||
|
||||
xdp_init_buff(&xdp, DPAA_BP_RAW_SIZE - DPAA_TX_PRIV_DATA_SIZE,
|
||||
&dpaa_fq->xdp_rxq);
|
||||
xdp.data = vaddr + fd_off;
|
||||
xdp.data_meta = xdp.data;
|
||||
xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
|
||||
xdp.data_end = xdp.data + qm_fd_get_length(fd);
|
||||
xdp.frame_sz = DPAA_BP_RAW_SIZE - DPAA_TX_PRIV_DATA_SIZE;
|
||||
xdp.rxq = &dpaa_fq->xdp_rxq;
|
||||
|
||||
/* We reserve a fixed headroom of 256 bytes under the erratum and we
|
||||
* offer it all to XDP programs to use. If no room is left for the
|
||||
|
@ -358,14 +358,14 @@ static u32 dpaa2_eth_run_xdp(struct dpaa2_eth_priv *priv,
|
||||
if (!xdp_prog)
|
||||
goto out;
|
||||
|
||||
xdp_init_buff(&xdp,
|
||||
DPAA2_ETH_RX_BUF_RAW_SIZE -
|
||||
(dpaa2_fd_get_offset(fd) - XDP_PACKET_HEADROOM),
|
||||
&ch->xdp_rxq);
|
||||
xdp.data = vaddr + dpaa2_fd_get_offset(fd);
|
||||
xdp.data_end = xdp.data + dpaa2_fd_get_len(fd);
|
||||
xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
|
||||
xdp_set_data_meta_invalid(&xdp);
|
||||
xdp.rxq = &ch->xdp_rxq;
|
||||
|
||||
xdp.frame_sz = DPAA2_ETH_RX_BUF_RAW_SIZE -
|
||||
(dpaa2_fd_get_offset(fd) - XDP_PACKET_HEADROOM);
|
||||
|
||||
xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
|
||||
|
||||
|
@ -2344,7 +2344,7 @@ static void i40e_inc_ntc(struct i40e_ring *rx_ring)
|
||||
**/
|
||||
static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
|
||||
{
|
||||
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
|
||||
unsigned int total_rx_bytes = 0, total_rx_packets = 0, frame_sz = 0;
|
||||
struct sk_buff *skb = rx_ring->skb;
|
||||
u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
|
||||
unsigned int xdp_xmit = 0;
|
||||
@ -2352,9 +2352,9 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
|
||||
struct xdp_buff xdp;
|
||||
|
||||
#if (PAGE_SIZE < 8192)
|
||||
xdp.frame_sz = i40e_rx_frame_truesize(rx_ring, 0);
|
||||
frame_sz = i40e_rx_frame_truesize(rx_ring, 0);
|
||||
#endif
|
||||
xdp.rxq = &rx_ring->xdp_rxq;
|
||||
xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
|
||||
|
||||
while (likely(total_rx_packets < (unsigned int)budget)) {
|
||||
struct i40e_rx_buffer *rx_buffer;
|
||||
|
@ -1089,18 +1089,18 @@ ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
|
||||
*/
|
||||
int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
|
||||
{
|
||||
unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
|
||||
unsigned int total_rx_bytes = 0, total_rx_pkts = 0, frame_sz = 0;
|
||||
u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
|
||||
unsigned int xdp_res, xdp_xmit = 0;
|
||||
struct bpf_prog *xdp_prog = NULL;
|
||||
struct xdp_buff xdp;
|
||||
bool failure;
|
||||
|
||||
xdp.rxq = &rx_ring->xdp_rxq;
|
||||
/* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
|
||||
#if (PAGE_SIZE < 8192)
|
||||
xdp.frame_sz = ice_rx_frame_truesize(rx_ring, 0);
|
||||
frame_sz = ice_rx_frame_truesize(rx_ring, 0);
|
||||
#endif
|
||||
xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
|
||||
|
||||
/* start the loop to process Rx packets bounded by 'budget' */
|
||||
while (likely(total_rx_pkts < (unsigned int)budget)) {
|
||||
|
@ -8681,13 +8681,13 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
|
||||
u16 cleaned_count = igb_desc_unused(rx_ring);
|
||||
unsigned int xdp_xmit = 0;
|
||||
struct xdp_buff xdp;
|
||||
|
||||
xdp.rxq = &rx_ring->xdp_rxq;
|
||||
u32 frame_sz = 0;
|
||||
|
||||
/* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
|
||||
#if (PAGE_SIZE < 8192)
|
||||
xdp.frame_sz = igb_rx_frame_truesize(rx_ring, 0);
|
||||
frame_sz = igb_rx_frame_truesize(rx_ring, 0);
|
||||
#endif
|
||||
xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
|
||||
|
||||
while (likely(total_packets < budget)) {
|
||||
union e1000_adv_rx_desc *rx_desc;
|
||||
|
@ -2291,7 +2291,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
|
||||
struct ixgbe_ring *rx_ring,
|
||||
const int budget)
|
||||
{
|
||||
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
|
||||
unsigned int total_rx_bytes = 0, total_rx_packets = 0, frame_sz = 0;
|
||||
struct ixgbe_adapter *adapter = q_vector->adapter;
|
||||
#ifdef IXGBE_FCOE
|
||||
int ddp_bytes;
|
||||
@ -2301,12 +2301,11 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
|
||||
unsigned int xdp_xmit = 0;
|
||||
struct xdp_buff xdp;
|
||||
|
||||
xdp.rxq = &rx_ring->xdp_rxq;
|
||||
|
||||
/* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
|
||||
#if (PAGE_SIZE < 8192)
|
||||
xdp.frame_sz = ixgbe_rx_frame_truesize(rx_ring, 0);
|
||||
frame_sz = ixgbe_rx_frame_truesize(rx_ring, 0);
|
||||
#endif
|
||||
xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
|
||||
|
||||
while (likely(total_rx_packets < budget)) {
|
||||
union ixgbe_adv_rx_desc *rx_desc;
|
||||
|
@ -1121,19 +1121,18 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
|
||||
struct ixgbevf_ring *rx_ring,
|
||||
int budget)
|
||||
{
|
||||
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
|
||||
unsigned int total_rx_bytes = 0, total_rx_packets = 0, frame_sz = 0;
|
||||
struct ixgbevf_adapter *adapter = q_vector->adapter;
|
||||
u16 cleaned_count = ixgbevf_desc_unused(rx_ring);
|
||||
struct sk_buff *skb = rx_ring->skb;
|
||||
bool xdp_xmit = false;
|
||||
struct xdp_buff xdp;
|
||||
|
||||
xdp.rxq = &rx_ring->xdp_rxq;
|
||||
|
||||
/* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
|
||||
#if (PAGE_SIZE < 8192)
|
||||
xdp.frame_sz = ixgbevf_rx_frame_truesize(rx_ring, 0);
|
||||
frame_sz = ixgbevf_rx_frame_truesize(rx_ring, 0);
|
||||
#endif
|
||||
xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
|
||||
|
||||
while (likely(total_rx_packets < budget)) {
|
||||
struct ixgbevf_rx_buffer *rx_buffer;
|
||||
|
@ -2363,9 +2363,8 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
|
||||
u32 desc_status, frame_sz;
|
||||
struct xdp_buff xdp_buf;
|
||||
|
||||
xdp_init_buff(&xdp_buf, PAGE_SIZE, &rxq->xdp_rxq);
|
||||
xdp_buf.data_hard_start = NULL;
|
||||
xdp_buf.frame_sz = PAGE_SIZE;
|
||||
xdp_buf.rxq = &rxq->xdp_rxq;
|
||||
|
||||
sinfo.nr_frags = 0;
|
||||
|
||||
|
@ -3563,16 +3563,18 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
|
||||
frag_size = bm_pool->frag_size;
|
||||
|
||||
if (xdp_prog) {
|
||||
struct xdp_rxq_info *xdp_rxq;
|
||||
|
||||
xdp.data_hard_start = data;
|
||||
xdp.data = data + MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM;
|
||||
xdp.data_end = xdp.data + rx_bytes;
|
||||
xdp.frame_sz = PAGE_SIZE;
|
||||
|
||||
if (bm_pool->pkt_size == MVPP2_BM_SHORT_PKT_SIZE)
|
||||
xdp.rxq = &rxq->xdp_rxq_short;
|
||||
xdp_rxq = &rxq->xdp_rxq_short;
|
||||
else
|
||||
xdp.rxq = &rxq->xdp_rxq_long;
|
||||
xdp_rxq = &rxq->xdp_rxq_long;
|
||||
|
||||
xdp_init_buff(&xdp, PAGE_SIZE, xdp_rxq);
|
||||
xdp_set_data_meta_invalid(&xdp);
|
||||
|
||||
ret = mvpp2_run_xdp(port, rxq, xdp_prog, &xdp, pp, &ps);
|
||||
|
@ -682,8 +682,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
|
||||
/* Protect accesses to: ring->xdp_prog, priv->mac_hash list */
|
||||
rcu_read_lock();
|
||||
xdp_prog = rcu_dereference(ring->xdp_prog);
|
||||
xdp.rxq = &ring->xdp_rxq;
|
||||
xdp.frame_sz = priv->frag_info[0].frag_stride;
|
||||
xdp_init_buff(&xdp, priv->frag_info[0].frag_stride, &ring->xdp_rxq);
|
||||
doorbell_pending = false;
|
||||
|
||||
/* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
|
||||
|
@ -1126,12 +1126,11 @@ struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va,
|
||||
static void mlx5e_fill_xdp_buff(struct mlx5e_rq *rq, void *va, u16 headroom,
|
||||
u32 len, struct xdp_buff *xdp)
|
||||
{
|
||||
xdp_init_buff(xdp, rq->buff.frame0_sz, &rq->xdp_rxq);
|
||||
xdp->data_hard_start = va;
|
||||
xdp->data = va + headroom;
|
||||
xdp_set_data_meta_invalid(xdp);
|
||||
xdp->data_end = xdp->data + len;
|
||||
xdp->rxq = &rq->xdp_rxq;
|
||||
xdp->frame_sz = rq->buff.frame0_sz;
|
||||
}
|
||||
|
||||
static struct sk_buff *
|
||||
|
@ -1822,8 +1822,8 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
|
||||
rcu_read_lock();
|
||||
xdp_prog = READ_ONCE(dp->xdp_prog);
|
||||
true_bufsz = xdp_prog ? PAGE_SIZE : dp->fl_bufsz;
|
||||
xdp.frame_sz = PAGE_SIZE - NFP_NET_RX_BUF_HEADROOM;
|
||||
xdp.rxq = &rx_ring->xdp_rxq;
|
||||
xdp_init_buff(&xdp, PAGE_SIZE - NFP_NET_RX_BUF_HEADROOM,
|
||||
&rx_ring->xdp_rxq);
|
||||
tx_ring = r_vec->xdp_ring;
|
||||
|
||||
while (pkts_polled < budget) {
|
||||
|
@ -1090,12 +1090,11 @@ static bool qede_rx_xdp(struct qede_dev *edev,
|
||||
struct xdp_buff xdp;
|
||||
enum xdp_action act;
|
||||
|
||||
xdp_init_buff(&xdp, rxq->rx_buf_seg_size, &rxq->xdp_rxq);
|
||||
xdp.data_hard_start = page_address(bd->data);
|
||||
xdp.data = xdp.data_hard_start + *data_offset;
|
||||
xdp_set_data_meta_invalid(&xdp);
|
||||
xdp.data_end = xdp.data + *len;
|
||||
xdp.rxq = &rxq->xdp_rxq;
|
||||
xdp.frame_sz = rxq->rx_buf_seg_size; /* PAGE_SIZE when XDP enabled */
|
||||
|
||||
/* Queues always have a full reset currently, so for the time
|
||||
* being until there's atomic program replace just mark read
|
||||
|
@ -293,14 +293,13 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel,
|
||||
memcpy(rx_prefix, *ehp - efx->rx_prefix_size,
|
||||
efx->rx_prefix_size);
|
||||
|
||||
xdp_init_buff(&xdp, efx->rx_page_buf_step, &rx_queue->xdp_rxq_info);
|
||||
xdp.data = *ehp;
|
||||
xdp.data_hard_start = xdp.data - EFX_XDP_HEADROOM;
|
||||
|
||||
/* No support yet for XDP metadata */
|
||||
xdp_set_data_meta_invalid(&xdp);
|
||||
xdp.data_end = xdp.data + rx_buf->len;
|
||||
xdp.rxq = &rx_queue->xdp_rxq_info;
|
||||
xdp.frame_sz = efx->rx_page_buf_step;
|
||||
|
||||
xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
|
||||
rcu_read_unlock();
|
||||
|
@ -956,8 +956,7 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
|
||||
u32 xdp_act = 0;
|
||||
int done = 0;
|
||||
|
||||
xdp.rxq = &dring->xdp_rxq;
|
||||
xdp.frame_sz = PAGE_SIZE;
|
||||
xdp_init_buff(&xdp, PAGE_SIZE, &dring->xdp_rxq);
|
||||
|
||||
rcu_read_lock();
|
||||
xdp_prog = READ_ONCE(priv->xdp_prog);
|
||||
|
@ -392,6 +392,8 @@ static void cpsw_rx_handler(void *token, int len, int status)
|
||||
}
|
||||
|
||||
if (priv->xdp_prog) {
|
||||
xdp_init_buff(&xdp, PAGE_SIZE, &priv->xdp_rxq[ch]);
|
||||
|
||||
if (status & CPDMA_RX_VLAN_ENCAP) {
|
||||
xdp.data = pa + CPSW_HEADROOM +
|
||||
CPSW_RX_VLAN_ENCAP_HDR_SIZE;
|
||||
@ -405,8 +407,6 @@ static void cpsw_rx_handler(void *token, int len, int status)
|
||||
xdp_set_data_meta_invalid(&xdp);
|
||||
|
||||
xdp.data_hard_start = pa;
|
||||
xdp.rxq = &priv->xdp_rxq[ch];
|
||||
xdp.frame_sz = PAGE_SIZE;
|
||||
|
||||
port = priv->emac_port + cpsw->data.dual_emac;
|
||||
ret = cpsw_run_xdp(priv, ch, &xdp, page, port);
|
||||
|
@ -335,6 +335,8 @@ static void cpsw_rx_handler(void *token, int len, int status)
|
||||
}
|
||||
|
||||
if (priv->xdp_prog) {
|
||||
xdp_init_buff(&xdp, PAGE_SIZE, &priv->xdp_rxq[ch]);
|
||||
|
||||
if (status & CPDMA_RX_VLAN_ENCAP) {
|
||||
xdp.data = pa + CPSW_HEADROOM +
|
||||
CPSW_RX_VLAN_ENCAP_HDR_SIZE;
|
||||
@ -348,8 +350,6 @@ static void cpsw_rx_handler(void *token, int len, int status)
|
||||
xdp_set_data_meta_invalid(&xdp);
|
||||
|
||||
xdp.data_hard_start = pa;
|
||||
xdp.rxq = &priv->xdp_rxq[ch];
|
||||
xdp.frame_sz = PAGE_SIZE;
|
||||
|
||||
ret = cpsw_run_xdp(priv, ch, &xdp, page, priv->emac_port);
|
||||
if (ret != CPSW_XDP_PASS)
|
||||
|
@ -44,12 +44,11 @@ u32 netvsc_run_xdp(struct net_device *ndev, struct netvsc_channel *nvchan,
|
||||
goto out;
|
||||
}
|
||||
|
||||
xdp_init_buff(xdp, PAGE_SIZE, &nvchan->xdp_rxq);
|
||||
xdp->data_hard_start = page_address(page);
|
||||
xdp->data = xdp->data_hard_start + NETVSC_XDP_HDRM;
|
||||
xdp_set_data_meta_invalid(xdp);
|
||||
xdp->data_end = xdp->data + len;
|
||||
xdp->rxq = &nvchan->xdp_rxq;
|
||||
xdp->frame_sz = PAGE_SIZE;
|
||||
|
||||
memcpy(xdp->data, data, len);
|
||||
|
||||
|
@ -1599,12 +1599,11 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
|
||||
struct xdp_buff xdp;
|
||||
u32 act;
|
||||
|
||||
xdp_init_buff(&xdp, buflen, &tfile->xdp_rxq);
|
||||
xdp.data_hard_start = buf;
|
||||
xdp.data = buf + pad;
|
||||
xdp_set_data_meta_invalid(&xdp);
|
||||
xdp.data_end = xdp.data + len;
|
||||
xdp.rxq = &tfile->xdp_rxq;
|
||||
xdp.frame_sz = buflen;
|
||||
|
||||
act = bpf_prog_run_xdp(xdp_prog, &xdp);
|
||||
if (act == XDP_REDIRECT || act == XDP_TX) {
|
||||
@ -2342,9 +2341,9 @@ static int tun_xdp_one(struct tun_struct *tun,
|
||||
skb_xdp = true;
|
||||
goto build;
|
||||
}
|
||||
|
||||
xdp_init_buff(xdp, buflen, &tfile->xdp_rxq);
|
||||
xdp_set_data_meta_invalid(xdp);
|
||||
xdp->rxq = &tfile->xdp_rxq;
|
||||
xdp->frame_sz = buflen;
|
||||
|
||||
act = bpf_prog_run_xdp(xdp_prog, xdp);
|
||||
err = tun_xdp_act(tun, xdp_prog, xdp, act);
|
||||
|
@ -654,7 +654,7 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
|
||||
struct veth_xdp_tx_bq *bq,
|
||||
struct veth_stats *stats)
|
||||
{
|
||||
u32 pktlen, headroom, act, metalen;
|
||||
u32 pktlen, headroom, act, metalen, frame_sz;
|
||||
void *orig_data, *orig_data_end;
|
||||
struct bpf_prog *xdp_prog;
|
||||
int mac_len, delta, off;
|
||||
@ -714,11 +714,11 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
|
||||
xdp.data = skb_mac_header(skb);
|
||||
xdp.data_end = xdp.data + pktlen;
|
||||
xdp.data_meta = xdp.data;
|
||||
xdp.rxq = &rq->xdp_rxq;
|
||||
|
||||
/* SKB "head" area always have tailroom for skb_shared_info */
|
||||
xdp.frame_sz = (void *)skb_end_pointer(skb) - xdp.data_hard_start;
|
||||
xdp.frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
|
||||
frame_sz = (void *)skb_end_pointer(skb) - xdp.data_hard_start;
|
||||
frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
|
||||
xdp_init_buff(&xdp, frame_sz, &rq->xdp_rxq);
|
||||
|
||||
orig_data = xdp.data;
|
||||
orig_data_end = xdp.data_end;
|
||||
|
@ -689,12 +689,11 @@ static struct sk_buff *receive_small(struct net_device *dev,
|
||||
page = xdp_page;
|
||||
}
|
||||
|
||||
xdp_init_buff(&xdp, buflen, &rq->xdp_rxq);
|
||||
xdp.data_hard_start = buf + VIRTNET_RX_PAD + vi->hdr_len;
|
||||
xdp.data = xdp.data_hard_start + xdp_headroom;
|
||||
xdp.data_end = xdp.data + len;
|
||||
xdp.data_meta = xdp.data;
|
||||
xdp.rxq = &rq->xdp_rxq;
|
||||
xdp.frame_sz = buflen;
|
||||
orig_data = xdp.data;
|
||||
act = bpf_prog_run_xdp(xdp_prog, &xdp);
|
||||
stats->xdp_packets++;
|
||||
@ -859,12 +858,11 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
|
||||
* the descriptor on if we get an XDP_TX return code.
|
||||
*/
|
||||
data = page_address(xdp_page) + offset;
|
||||
xdp_init_buff(&xdp, frame_sz - vi->hdr_len, &rq->xdp_rxq);
|
||||
xdp.data_hard_start = data - VIRTIO_XDP_HEADROOM + vi->hdr_len;
|
||||
xdp.data = data + vi->hdr_len;
|
||||
xdp.data_end = xdp.data + (len - vi->hdr_len);
|
||||
xdp.data_meta = xdp.data;
|
||||
xdp.rxq = &rq->xdp_rxq;
|
||||
xdp.frame_sz = frame_sz - vi->hdr_len;
|
||||
|
||||
act = bpf_prog_run_xdp(xdp_prog, &xdp);
|
||||
stats->xdp_packets++;
|
||||
|
@ -864,12 +864,12 @@ static u32 xennet_run_xdp(struct netfront_queue *queue, struct page *pdata,
|
||||
u32 act;
|
||||
int err;
|
||||
|
||||
xdp_init_buff(xdp, XEN_PAGE_SIZE - XDP_PACKET_HEADROOM,
|
||||
&queue->xdp_rxq);
|
||||
xdp->data_hard_start = page_address(pdata);
|
||||
xdp->data = xdp->data_hard_start + XDP_PACKET_HEADROOM;
|
||||
xdp_set_data_meta_invalid(xdp);
|
||||
xdp->data_end = xdp->data + len;
|
||||
xdp->rxq = &queue->xdp_rxq;
|
||||
xdp->frame_sz = XEN_PAGE_SIZE - XDP_PACKET_HEADROOM;
|
||||
|
||||
act = bpf_prog_run_xdp(prog, xdp);
|
||||
switch (act) {
|
||||
|
@ -76,6 +76,13 @@ struct xdp_buff {
|
||||
u32 frame_sz; /* frame size to deduce data_hard_end/reserved tailroom*/
|
||||
};
|
||||
|
||||
static __always_inline void
|
||||
xdp_init_buff(struct xdp_buff *xdp, u32 frame_sz, struct xdp_rxq_info *rxq)
|
||||
{
|
||||
xdp->frame_sz = frame_sz;
|
||||
xdp->rxq = rxq;
|
||||
}
|
||||
|
||||
/* Reserve memory area at end-of data area.
|
||||
*
|
||||
* This macro reserves tailroom in the XDP buffer by limiting the
|
||||
|
@ -640,10 +640,10 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
|
||||
xdp.data = data + headroom;
|
||||
xdp.data_meta = xdp.data;
|
||||
xdp.data_end = xdp.data + size;
|
||||
xdp.frame_sz = headroom + max_data_sz + tailroom;
|
||||
|
||||
rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
|
||||
xdp.rxq = &rxqueue->xdp_rxq;
|
||||
xdp_init_buff(&xdp, headroom + max_data_sz + tailroom,
|
||||
&rxqueue->xdp_rxq);
|
||||
bpf_prog_change_xdp(NULL, prog);
|
||||
ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true);
|
||||
if (ret)
|
||||
|
@ -4606,11 +4606,11 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
|
||||
struct netdev_rx_queue *rxqueue;
|
||||
void *orig_data, *orig_data_end;
|
||||
u32 metalen, act = XDP_DROP;
|
||||
u32 mac_len, frame_sz;
|
||||
__be16 orig_eth_type;
|
||||
struct ethhdr *eth;
|
||||
bool orig_bcast;
|
||||
int hlen, off;
|
||||
u32 mac_len;
|
||||
|
||||
/* Reinjected packets coming from act_mirred or similar should
|
||||
* not get XDP generic processing.
|
||||
@ -4649,8 +4649,8 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
|
||||
xdp->data_hard_start = skb->data - skb_headroom(skb);
|
||||
|
||||
/* SKB "head" area always have tailroom for skb_shared_info */
|
||||
xdp->frame_sz = (void *)skb_end_pointer(skb) - xdp->data_hard_start;
|
||||
xdp->frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
|
||||
frame_sz = (void *)skb_end_pointer(skb) - xdp->data_hard_start;
|
||||
frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
|
||||
|
||||
orig_data_end = xdp->data_end;
|
||||
orig_data = xdp->data;
|
||||
@ -4659,7 +4659,7 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
|
||||
orig_eth_type = eth->h_proto;
|
||||
|
||||
rxqueue = netif_get_rxqueue(skb);
|
||||
xdp->rxq = &rxqueue->xdp_rxq;
|
||||
xdp_init_buff(xdp, frame_sz, &rxqueue->xdp_rxq);
|
||||
|
||||
act = bpf_prog_run_xdp(xdp_prog, xdp);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user