2019-05-31 08:09:24 +00:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
2018-01-03 10:25:13 +00:00
|
|
|
/* include/net/xdp.h
|
|
|
|
*
|
|
|
|
* Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
|
|
|
|
*/
|
|
|
|
#ifndef __LINUX_NET_XDP_H__
|
|
|
|
#define __LINUX_NET_XDP_H__
|
|
|
|
|
2023-04-12 19:48:40 +00:00
|
|
|
#include <linux/bitfield.h>
|
2023-08-03 01:02:30 +00:00
|
|
|
#include <linux/filter.h>
|
|
|
|
#include <linux/netdevice.h>
|
|
|
|
#include <linux/skbuff.h> /* skb_shared_info */
|
2020-05-14 10:49:02 +00:00
|
|
|
|
2018-01-03 10:25:13 +00:00
|
|
|
/**
|
|
|
|
* DOC: XDP RX-queue information
|
|
|
|
*
|
|
|
|
* The XDP RX-queue info (xdp_rxq_info) is associated with the driver
|
|
|
|
* level RX-ring queues. It is information that is specific to how
|
2023-12-13 04:37:35 +00:00
|
|
|
* the driver has configured a given RX-ring queue.
|
2018-01-03 10:25:13 +00:00
|
|
|
*
|
2021-09-30 06:34:02 +00:00
|
|
|
* Each xdp_buff frame received in the driver carries a (pointer)
|
2018-01-03 10:25:13 +00:00
|
|
|
* reference to this xdp_rxq_info structure. This provides the XDP
|
|
|
|
* data-path read-access to RX-info for both kernel and bpf-side
|
|
|
|
* (limited subset).
|
|
|
|
*
|
|
|
|
* For now, direct access is only safe while running in NAPI/softirq
|
2021-09-30 06:34:02 +00:00
|
|
|
* context. Contents are read-mostly and must not be updated during
|
2018-01-03 10:25:13 +00:00
|
|
|
* driver NAPI/softirq poll.
|
|
|
|
*
|
|
|
|
* The driver usage API is a register and unregister API.
|
|
|
|
*
|
|
|
|
* The struct is not directly tied to the XDP prog. A new XDP prog
|
|
|
|
* can be attached as long as it doesn't change the underlying
|
|
|
|
* RX-ring. If the RX-ring does change significantly, the NIC driver
|
2023-12-13 04:37:35 +00:00
|
|
|
* naturally needs to stop the RX-ring before purging and reallocating
|
2021-09-30 06:34:02 +00:00
|
|
|
* memory. In that process the driver MUST call unregister (which
|
|
|
|
* also applies for driver shutdown and unload). The register API is
|
2018-01-03 10:25:13 +00:00
|
|
|
* also mandatory during RX-ring setup.
|
|
|
|
*/
|
|
|
|
|
2018-04-17 14:45:26 +00:00
|
|
|
enum xdp_mem_type {
|
|
|
|
MEM_TYPE_PAGE_SHARED = 0, /* Split-page refcnt based model */
|
|
|
|
MEM_TYPE_PAGE_ORDER0, /* Orig XDP full page model */
|
2018-04-17 14:46:22 +00:00
|
|
|
MEM_TYPE_PAGE_POOL,
|
2020-05-20 19:20:53 +00:00
|
|
|
MEM_TYPE_XSK_BUFF_POOL,
|
2018-04-17 14:45:26 +00:00
|
|
|
MEM_TYPE_MAX,
|
|
|
|
};
|
|
|
|
|
2018-05-31 08:59:47 +00:00
|
|
|
/* XDP flags for ndo_xdp_xmit */
|
|
|
|
#define XDP_XMIT_FLUSH (1U << 0) /* doorbell signal consumer */
|
|
|
|
#define XDP_XMIT_FLAGS_MASK XDP_XMIT_FLUSH
|
|
|
|
|
2018-04-17 14:45:26 +00:00
|
|
|
struct xdp_mem_info {
|
|
|
|
u32 type; /* enum xdp_mem_type, but known size type */
|
xdp: rhashtable with allocator ID to pointer mapping
Use the IDA infrastructure for getting a cyclic increasing ID number,
that is used for keeping track of each registered allocator per
RX-queue xdp_rxq_info. Instead of using the IDR infrastructure, which
uses a radix tree, use a dynamic rhashtable, for creating ID to
pointer lookup table, because this is faster.
The problem that is being solved here is that, the xdp_rxq_info
pointer (stored in xdp_buff) cannot be used directly, as the
guaranteed lifetime is too short. The info is needed on a
(potentially) remote CPU during DMA-TX completion time . In an
xdp_frame the xdp_mem_info is stored, when it got converted from an
xdp_buff, which is sufficient for the simple page refcnt based recycle
schemes.
For more advanced allocators there is a need to store a pointer to the
registered allocator. Thus, there is a need to guard the lifetime or
validity of the allocator pointer, which is done through this
rhashtable ID map to pointer. The removal and validity of of the
allocator and helper struct xdp_mem_allocator is guarded by RCU. The
allocator will be created by the driver, and registered with
xdp_rxq_info_reg_mem_model().
It is up-to debate who is responsible for freeing the allocator
pointer or invoking the allocator destructor function. In any case,
this must happen via RCU freeing.
Use the IDA infrastructure for getting a cyclic increasing ID number,
that is used for keeping track of each registered allocator per
RX-queue xdp_rxq_info.
V4: Per req of Jason Wang
- Use xdp_rxq_info_reg_mem_model() in all drivers implementing
XDP_REDIRECT, even-though it's not strictly necessary when
allocator==NULL for type MEM_TYPE_PAGE_SHARED (given it's zero).
V6: Per req of Alex Duyck
- Introduce rhashtable_lookup() call in later patch
V8: Address sparse should be static warnings (from kbuild test robot)
Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-04-17 14:46:12 +00:00
|
|
|
u32 id;
|
2018-04-17 14:45:26 +00:00
|
|
|
};
|
|
|
|
|
2018-04-17 14:46:22 +00:00
|
|
|
struct page_pool;
|
|
|
|
|
2018-01-03 10:25:13 +00:00
|
|
|
struct xdp_rxq_info {
|
|
|
|
struct net_device *dev;
|
|
|
|
u32 queue_index;
|
|
|
|
u32 reg_state;
|
2018-04-17 14:45:26 +00:00
|
|
|
struct xdp_mem_info mem;
|
2020-11-30 18:52:01 +00:00
|
|
|
unsigned int napi_id;
|
2022-01-21 10:09:55 +00:00
|
|
|
u32 frag_size;
|
2018-01-03 10:25:13 +00:00
|
|
|
} ____cacheline_aligned; /* perf critical, avoid false-sharing */
|
|
|
|
|
2020-05-29 22:07:14 +00:00
|
|
|
struct xdp_txq_info {
|
|
|
|
struct net_device *dev;
|
|
|
|
};
|
|
|
|
|
2022-01-21 10:09:45 +00:00
|
|
|
enum xdp_buff_flags {
|
2022-01-21 10:09:48 +00:00
|
|
|
XDP_FLAGS_HAS_FRAGS = BIT(0), /* non-linear xdp buff */
|
|
|
|
XDP_FLAGS_FRAGS_PF_MEMALLOC = BIT(1), /* xdp paged memory is under
|
|
|
|
* pressure
|
|
|
|
*/
|
2022-01-21 10:09:45 +00:00
|
|
|
};
|
|
|
|
|
2018-04-17 14:45:37 +00:00
|
|
|
struct xdp_buff {
|
|
|
|
void *data;
|
|
|
|
void *data_end;
|
|
|
|
void *data_meta;
|
|
|
|
void *data_hard_start;
|
|
|
|
struct xdp_rxq_info *rxq;
|
2020-05-29 22:07:14 +00:00
|
|
|
struct xdp_txq_info *txq;
|
2020-05-14 10:49:02 +00:00
|
|
|
u32 frame_sz; /* frame size to deduce data_hard_end/reserved tailroom*/
|
2022-01-21 10:09:45 +00:00
|
|
|
u32 flags; /* supported values defined in xdp_buff_flags */
|
2018-04-17 14:45:37 +00:00
|
|
|
};
|
2018-04-17 14:45:26 +00:00
|
|
|
|
2022-01-21 10:09:45 +00:00
|
|
|
static __always_inline bool xdp_buff_has_frags(struct xdp_buff *xdp)
|
|
|
|
{
|
|
|
|
return !!(xdp->flags & XDP_FLAGS_HAS_FRAGS);
|
|
|
|
}
|
|
|
|
|
|
|
|
static __always_inline void xdp_buff_set_frags_flag(struct xdp_buff *xdp)
|
|
|
|
{
|
|
|
|
xdp->flags |= XDP_FLAGS_HAS_FRAGS;
|
|
|
|
}
|
|
|
|
|
|
|
|
static __always_inline void xdp_buff_clear_frags_flag(struct xdp_buff *xdp)
|
|
|
|
{
|
|
|
|
xdp->flags &= ~XDP_FLAGS_HAS_FRAGS;
|
|
|
|
}
|
|
|
|
|
2022-01-21 10:09:48 +00:00
|
|
|
static __always_inline bool xdp_buff_is_frag_pfmemalloc(struct xdp_buff *xdp)
|
|
|
|
{
|
|
|
|
return !!(xdp->flags & XDP_FLAGS_FRAGS_PF_MEMALLOC);
|
|
|
|
}
|
|
|
|
|
|
|
|
static __always_inline void xdp_buff_set_frag_pfmemalloc(struct xdp_buff *xdp)
|
|
|
|
{
|
|
|
|
xdp->flags |= XDP_FLAGS_FRAGS_PF_MEMALLOC;
|
|
|
|
}
|
|
|
|
|
2020-12-22 21:09:28 +00:00
|
|
|
static __always_inline void
|
|
|
|
xdp_init_buff(struct xdp_buff *xdp, u32 frame_sz, struct xdp_rxq_info *rxq)
|
|
|
|
{
|
|
|
|
xdp->frame_sz = frame_sz;
|
|
|
|
xdp->rxq = rxq;
|
2022-01-21 10:09:45 +00:00
|
|
|
xdp->flags = 0;
|
2020-12-22 21:09:28 +00:00
|
|
|
}
|
|
|
|
|
2020-12-22 21:09:29 +00:00
|
|
|
static __always_inline void
|
|
|
|
xdp_prepare_buff(struct xdp_buff *xdp, unsigned char *hard_start,
|
|
|
|
int headroom, int data_len, const bool meta_valid)
|
|
|
|
{
|
|
|
|
unsigned char *data = hard_start + headroom;
|
|
|
|
|
|
|
|
xdp->data_hard_start = hard_start;
|
|
|
|
xdp->data = data;
|
|
|
|
xdp->data_end = data + data_len;
|
|
|
|
xdp->data_meta = meta_valid ? data : data + 1;
|
|
|
|
}
|
|
|
|
|
2020-05-14 10:49:02 +00:00
|
|
|
/* Reserve memory area at end-of data area.
|
|
|
|
*
|
|
|
|
* This macro reserves tailroom in the XDP buffer by limiting the
|
|
|
|
* XDP/BPF data access to data_hard_end. Notice same area (and size)
|
|
|
|
* is used for XDP_PASS, when constructing the SKB via build_skb().
|
|
|
|
*/
|
|
|
|
#define xdp_data_hard_end(xdp) \
|
|
|
|
((xdp)->data_hard_start + (xdp)->frame_sz - \
|
|
|
|
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
|
|
|
|
|
2020-07-16 22:16:29 +00:00
|
|
|
static inline struct skb_shared_info *
|
|
|
|
xdp_get_shared_info_from_buff(struct xdp_buff *xdp)
|
|
|
|
{
|
|
|
|
return (struct skb_shared_info *)xdp_data_hard_end(xdp);
|
|
|
|
}
|
|
|
|
|
2022-01-21 10:09:54 +00:00
|
|
|
static __always_inline unsigned int xdp_get_buff_len(struct xdp_buff *xdp)
|
|
|
|
{
|
|
|
|
unsigned int len = xdp->data_end - xdp->data;
|
|
|
|
struct skb_shared_info *sinfo;
|
|
|
|
|
|
|
|
if (likely(!xdp_buff_has_frags(xdp)))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
sinfo = xdp_get_shared_info_from_buff(xdp);
|
|
|
|
len += sinfo->xdp_frags_size;
|
|
|
|
out:
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
2018-04-17 14:45:42 +00:00
|
|
|
struct xdp_frame {
|
|
|
|
void *data;
|
|
|
|
u16 len;
|
|
|
|
u16 headroom;
|
2022-09-23 12:48:00 +00:00
|
|
|
u32 metasize; /* uses lower 8-bits */
|
2018-04-17 14:45:42 +00:00
|
|
|
/* Lifetime of xdp_rxq_info is limited to NAPI/enqueue time,
|
|
|
|
* while mem info is valid on remote CPU.
|
|
|
|
*/
|
|
|
|
struct xdp_mem_info mem;
|
2018-04-17 14:45:57 +00:00
|
|
|
struct net_device *dev_rx; /* used by cpumap */
|
2022-09-23 12:48:00 +00:00
|
|
|
u32 frame_sz;
|
2022-01-21 10:09:45 +00:00
|
|
|
u32 flags; /* supported values defined in xdp_buff_flags */
|
2018-04-17 14:45:42 +00:00
|
|
|
};
|
|
|
|
|
2022-01-21 10:09:45 +00:00
|
|
|
static __always_inline bool xdp_frame_has_frags(struct xdp_frame *frame)
|
|
|
|
{
|
|
|
|
return !!(frame->flags & XDP_FLAGS_HAS_FRAGS);
|
|
|
|
}
|
|
|
|
|
2022-01-21 10:09:48 +00:00
|
|
|
static __always_inline bool xdp_frame_is_frag_pfmemalloc(struct xdp_frame *frame)
|
|
|
|
{
|
|
|
|
return !!(frame->flags & XDP_FLAGS_FRAGS_PF_MEMALLOC);
|
|
|
|
}
|
|
|
|
|
2020-11-13 11:48:28 +00:00
|
|
|
#define XDP_BULK_QUEUE_SIZE 16
|
|
|
|
struct xdp_frame_bulk {
|
|
|
|
int count;
|
|
|
|
void *xa;
|
|
|
|
void *q[XDP_BULK_QUEUE_SIZE];
|
|
|
|
};
|
|
|
|
|
|
|
|
static __always_inline void xdp_frame_bulk_init(struct xdp_frame_bulk *bq)
|
|
|
|
{
|
|
|
|
/* bq->count will be zero'ed when bq->xa gets updated */
|
|
|
|
bq->xa = NULL;
|
|
|
|
}
|
2020-07-22 19:34:55 +00:00
|
|
|
|
2020-07-16 22:16:29 +00:00
|
|
|
static inline struct skb_shared_info *
|
|
|
|
xdp_get_shared_info_from_frame(struct xdp_frame *frame)
|
|
|
|
{
|
|
|
|
void *data_hard_start = frame->data - frame->headroom - sizeof(*frame);
|
|
|
|
|
|
|
|
return (struct skb_shared_info *)(data_hard_start + frame->frame_sz -
|
|
|
|
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
|
|
|
|
}
|
|
|
|
|
2020-07-14 13:56:38 +00:00
|
|
|
struct xdp_cpumap_stats {
|
2020-07-14 13:56:39 +00:00
|
|
|
unsigned int redirect;
|
2020-07-14 13:56:38 +00:00
|
|
|
unsigned int pass;
|
|
|
|
unsigned int drop;
|
|
|
|
};
|
|
|
|
|
2018-08-03 07:58:12 +00:00
|
|
|
/* Clear kernel pointers in xdp_frame */
|
|
|
|
static inline void xdp_scrub_frame(struct xdp_frame *frame)
|
|
|
|
{
|
|
|
|
frame->data = NULL;
|
|
|
|
frame->dev_rx = NULL;
|
|
|
|
}
|
|
|
|
|
2022-01-21 10:09:48 +00:00
|
|
|
static inline void
|
|
|
|
xdp_update_skb_shared_info(struct sk_buff *skb, u8 nr_frags,
|
|
|
|
unsigned int size, unsigned int truesize,
|
|
|
|
bool pfmemalloc)
|
|
|
|
{
|
|
|
|
skb_shinfo(skb)->nr_frags = nr_frags;
|
|
|
|
|
|
|
|
skb->len += size;
|
|
|
|
skb->data_len += size;
|
|
|
|
skb->truesize += truesize;
|
|
|
|
skb->pfmemalloc |= pfmemalloc;
|
|
|
|
}
|
|
|
|
|
xdp: Xdp_frame add member frame_sz and handle in convert_to_xdp_frame
Use hole in struct xdp_frame, when adding member frame_sz, which keeps
same sizeof struct (32 bytes)
Drivers ixgbe and sfc had bug cases where the necessary/expected
tailroom was not reserved. This can lead to some hard to catch memory
corruption issues. Having the drivers frame_sz this can be detected when
packet length/end via xdp->data_end exceed the xdp_data_hard_end
pointer, which accounts for the reserved the tailroom.
When detecting this driver issue, simply fail the conversion with NULL,
which results in feedback to driver (failing xdp_do_redirect()) causing
driver to drop packet. Given the lack of consistent XDP stats, this can
be hard to troubleshoot. And given this is a driver bug, we want to
generate some more noise in form of a WARN stack dump (to ID the driver
code that inlined convert_to_xdp_frame).
Inlining the WARN macro is problematic, because it adds an asm
instruction (on Intel CPUs ud2) what influence instruction cache
prefetching. Thus, introduce xdp_warn and macro XDP_WARN, to avoid this
and at the same time make identifying the function and line of this
inlined function easier.
Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Toke Høiland-Jørgensen <toke@redhat.com>
Link: https://lore.kernel.org/bpf/158945337313.97035.10015729316710496600.stgit@firesoul
2020-05-14 10:49:33 +00:00
|
|
|
/* Avoids inlining WARN macro in fast-path */
|
|
|
|
void xdp_warn(const char *msg, const char *func, const int line);
|
|
|
|
#define XDP_WARN(msg) xdp_warn(msg, __func__, __LINE__)
|
|
|
|
|
2018-08-28 12:44:25 +00:00
|
|
|
struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp);
|
2021-01-12 18:26:12 +00:00
|
|
|
struct sk_buff *__xdp_build_skb_from_frame(struct xdp_frame *xdpf,
|
|
|
|
struct sk_buff *skb,
|
|
|
|
struct net_device *dev);
|
2021-01-12 18:26:13 +00:00
|
|
|
struct sk_buff *xdp_build_skb_from_frame(struct xdp_frame *xdpf,
|
|
|
|
struct net_device *dev);
|
2021-01-29 22:04:08 +00:00
|
|
|
int xdp_alloc_skb_bulk(void **skbs, int n_skb, gfp_t gfp);
|
2021-05-19 09:07:45 +00:00
|
|
|
struct xdp_frame *xdpf_clone(struct xdp_frame *xdpf);
|
2018-08-28 12:44:25 +00:00
|
|
|
|
2020-05-28 20:47:28 +00:00
|
|
|
static inline
|
|
|
|
void xdp_convert_frame_to_buff(struct xdp_frame *frame, struct xdp_buff *xdp)
|
|
|
|
{
|
|
|
|
xdp->data_hard_start = frame->data - frame->headroom - sizeof(*frame);
|
|
|
|
xdp->data = frame->data;
|
|
|
|
xdp->data_end = frame->data + frame->len;
|
|
|
|
xdp->data_meta = frame->data - frame->metasize;
|
|
|
|
xdp->frame_sz = frame->frame_sz;
|
2022-01-21 10:09:45 +00:00
|
|
|
xdp->flags = frame->flags;
|
2020-05-28 20:47:28 +00:00
|
|
|
}
|
|
|
|
|
2018-04-17 14:45:42 +00:00
|
|
|
static inline
|
2020-07-14 13:56:35 +00:00
|
|
|
int xdp_update_frame_from_buff(struct xdp_buff *xdp,
|
|
|
|
struct xdp_frame *xdp_frame)
|
2018-04-17 14:45:42 +00:00
|
|
|
{
|
2020-07-14 13:56:35 +00:00
|
|
|
int metasize, headroom;
|
2018-06-04 12:05:54 +00:00
|
|
|
|
2018-04-17 14:45:42 +00:00
|
|
|
/* Assure headroom is available for storing info */
|
|
|
|
headroom = xdp->data - xdp->data_hard_start;
|
|
|
|
metasize = xdp->data - xdp->data_meta;
|
|
|
|
metasize = metasize > 0 ? metasize : 0;
|
|
|
|
if (unlikely((headroom - metasize) < sizeof(*xdp_frame)))
|
2020-07-14 13:56:35 +00:00
|
|
|
return -ENOSPC;
|
2018-04-17 14:45:42 +00:00
|
|
|
|
xdp: Xdp_frame add member frame_sz and handle in convert_to_xdp_frame
Use hole in struct xdp_frame, when adding member frame_sz, which keeps
same sizeof struct (32 bytes)
Drivers ixgbe and sfc had bug cases where the necessary/expected
tailroom was not reserved. This can lead to some hard to catch memory
corruption issues. Having the drivers frame_sz this can be detected when
packet length/end via xdp->data_end exceed the xdp_data_hard_end
pointer, which accounts for the reserved the tailroom.
When detecting this driver issue, simply fail the conversion with NULL,
which results in feedback to driver (failing xdp_do_redirect()) causing
driver to drop packet. Given the lack of consistent XDP stats, this can
be hard to troubleshoot. And given this is a driver bug, we want to
generate some more noise in form of a WARN stack dump (to ID the driver
code that inlined convert_to_xdp_frame).
Inlining the WARN macro is problematic, because it adds an asm
instruction (on Intel CPUs ud2) what influence instruction cache
prefetching. Thus, introduce xdp_warn and macro XDP_WARN, to avoid this
and at the same time make identifying the function and line of this
inlined function easier.
Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Toke Høiland-Jørgensen <toke@redhat.com>
Link: https://lore.kernel.org/bpf/158945337313.97035.10015729316710496600.stgit@firesoul
2020-05-14 10:49:33 +00:00
|
|
|
/* Catch if driver didn't reserve tailroom for skb_shared_info */
|
|
|
|
if (unlikely(xdp->data_end > xdp_data_hard_end(xdp))) {
|
|
|
|
XDP_WARN("Driver BUG: missing reserved tailroom");
|
2020-07-14 13:56:35 +00:00
|
|
|
return -ENOSPC;
|
xdp: Xdp_frame add member frame_sz and handle in convert_to_xdp_frame
Use hole in struct xdp_frame, when adding member frame_sz, which keeps
same sizeof struct (32 bytes)
Drivers ixgbe and sfc had bug cases where the necessary/expected
tailroom was not reserved. This can lead to some hard to catch memory
corruption issues. Having the drivers frame_sz this can be detected when
packet length/end via xdp->data_end exceed the xdp_data_hard_end
pointer, which accounts for the reserved the tailroom.
When detecting this driver issue, simply fail the conversion with NULL,
which results in feedback to driver (failing xdp_do_redirect()) causing
driver to drop packet. Given the lack of consistent XDP stats, this can
be hard to troubleshoot. And given this is a driver bug, we want to
generate some more noise in form of a WARN stack dump (to ID the driver
code that inlined convert_to_xdp_frame).
Inlining the WARN macro is problematic, because it adds an asm
instruction (on Intel CPUs ud2) what influence instruction cache
prefetching. Thus, introduce xdp_warn and macro XDP_WARN, to avoid this
and at the same time make identifying the function and line of this
inlined function easier.
Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Toke Høiland-Jørgensen <toke@redhat.com>
Link: https://lore.kernel.org/bpf/158945337313.97035.10015729316710496600.stgit@firesoul
2020-05-14 10:49:33 +00:00
|
|
|
}
|
|
|
|
|
2018-04-17 14:45:42 +00:00
|
|
|
xdp_frame->data = xdp->data;
|
|
|
|
xdp_frame->len = xdp->data_end - xdp->data;
|
|
|
|
xdp_frame->headroom = headroom - sizeof(*xdp_frame);
|
|
|
|
xdp_frame->metasize = metasize;
|
xdp: Xdp_frame add member frame_sz and handle in convert_to_xdp_frame
Use hole in struct xdp_frame, when adding member frame_sz, which keeps
same sizeof struct (32 bytes)
Drivers ixgbe and sfc had bug cases where the necessary/expected
tailroom was not reserved. This can lead to some hard to catch memory
corruption issues. Having the drivers frame_sz this can be detected when
packet length/end via xdp->data_end exceed the xdp_data_hard_end
pointer, which accounts for the reserved the tailroom.
When detecting this driver issue, simply fail the conversion with NULL,
which results in feedback to driver (failing xdp_do_redirect()) causing
driver to drop packet. Given the lack of consistent XDP stats, this can
be hard to troubleshoot. And given this is a driver bug, we want to
generate some more noise in form of a WARN stack dump (to ID the driver
code that inlined convert_to_xdp_frame).
Inlining the WARN macro is problematic, because it adds an asm
instruction (on Intel CPUs ud2) what influence instruction cache
prefetching. Thus, introduce xdp_warn and macro XDP_WARN, to avoid this
and at the same time make identifying the function and line of this
inlined function easier.
Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Toke Høiland-Jørgensen <toke@redhat.com>
Link: https://lore.kernel.org/bpf/158945337313.97035.10015729316710496600.stgit@firesoul
2020-05-14 10:49:33 +00:00
|
|
|
xdp_frame->frame_sz = xdp->frame_sz;
|
2022-01-21 10:09:45 +00:00
|
|
|
xdp_frame->flags = xdp->flags;
|
2018-04-17 14:45:42 +00:00
|
|
|
|
2020-07-14 13:56:35 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Convert xdp_buff to xdp_frame */
|
|
|
|
static inline
|
|
|
|
struct xdp_frame *xdp_convert_buff_to_frame(struct xdp_buff *xdp)
|
|
|
|
{
|
|
|
|
struct xdp_frame *xdp_frame;
|
|
|
|
|
|
|
|
if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL)
|
|
|
|
return xdp_convert_zc_to_xdp_frame(xdp);
|
|
|
|
|
|
|
|
/* Store info in top of packet */
|
|
|
|
xdp_frame = xdp->data_hard_start;
|
|
|
|
if (unlikely(xdp_update_frame_from_buff(xdp, xdp_frame) < 0))
|
|
|
|
return NULL;
|
|
|
|
|
2018-04-17 14:45:42 +00:00
|
|
|
/* rxq only valid until napi_schedule ends, convert to xdp_mem_info */
|
|
|
|
xdp_frame->mem = xdp->rxq->mem;
|
|
|
|
|
|
|
|
return xdp_frame;
|
|
|
|
}
|
|
|
|
|
2022-01-21 10:09:55 +00:00
|
|
|
void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
|
|
|
|
struct xdp_buff *xdp);
|
xdp: transition into using xdp_frame for return API
Changing API xdp_return_frame() to take struct xdp_frame as argument,
seems like a natural choice. But there are some subtle performance
details here that needs extra care, which is a deliberate choice.
When de-referencing xdp_frame on a remote CPU during DMA-TX
completion, result in the cache-line is change to "Shared"
state. Later when the page is reused for RX, then this xdp_frame
cache-line is written, which change the state to "Modified".
This situation already happens (naturally) for, virtio_net, tun and
cpumap as the xdp_frame pointer is the queued object. In tun and
cpumap, the ptr_ring is used for efficiently transferring cache-lines
(with pointers) between CPUs. Thus, the only option is to
de-referencing xdp_frame.
It is only the ixgbe driver that had an optimization, in which it can
avoid doing the de-reference of xdp_frame. The driver already have
TX-ring queue, which (in case of remote DMA-TX completion) have to be
transferred between CPUs anyhow. In this data area, we stored a
struct xdp_mem_info and a data pointer, which allowed us to avoid
de-referencing xdp_frame.
To compensate for this, a prefetchw is used for telling the cache
coherency protocol about our access pattern. My benchmarks show that
this prefetchw is enough to compensate the ixgbe driver.
V7: Adjust for commit d9314c474d4f ("i40e: add support for XDP_REDIRECT")
V8: Adjust for commit bd658dda4237 ("net/mlx5e: Separate dma base address
and offset in dma_sync call")
Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-04-17 14:46:32 +00:00
|
|
|
void xdp_return_frame(struct xdp_frame *xdpf);
|
2018-05-24 14:46:07 +00:00
|
|
|
void xdp_return_frame_rx_napi(struct xdp_frame *xdpf);
|
2018-05-02 11:01:27 +00:00
|
|
|
void xdp_return_buff(struct xdp_buff *xdp);
|
2020-11-13 11:48:28 +00:00
|
|
|
void xdp_flush_frame_bulk(struct xdp_frame_bulk *bq);
|
|
|
|
void xdp_return_frame_bulk(struct xdp_frame *xdpf,
|
|
|
|
struct xdp_frame_bulk *bq);
|
2018-04-17 14:45:26 +00:00
|
|
|
|
2022-03-11 09:14:18 +00:00
|
|
|
static __always_inline unsigned int xdp_get_frame_len(struct xdp_frame *xdpf)
|
|
|
|
{
|
|
|
|
struct skb_shared_info *sinfo;
|
|
|
|
unsigned int len = xdpf->len;
|
|
|
|
|
|
|
|
if (likely(!xdp_frame_has_frags(xdpf)))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
sinfo = xdp_get_shared_info_from_frame(xdpf);
|
|
|
|
len += sinfo->xdp_frags_size;
|
|
|
|
out:
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
2022-01-21 10:09:55 +00:00
|
|
|
int __xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
|
|
|
|
struct net_device *dev, u32 queue_index,
|
|
|
|
unsigned int napi_id, u32 frag_size);
|
|
|
|
static inline int
|
|
|
|
xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
|
|
|
|
struct net_device *dev, u32 queue_index,
|
|
|
|
unsigned int napi_id)
|
|
|
|
{
|
|
|
|
return __xdp_rxq_info_reg(xdp_rxq, dev, queue_index, napi_id, 0);
|
|
|
|
}
|
|
|
|
|
2018-01-03 10:25:13 +00:00
|
|
|
void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq);
|
|
|
|
void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq);
|
2018-01-03 10:25:34 +00:00
|
|
|
bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq);
|
2018-04-17 14:45:26 +00:00
|
|
|
int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq,
|
|
|
|
enum xdp_mem_type type, void *allocator);
|
2018-08-28 12:44:26 +00:00
|
|
|
void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq);
|
2022-01-03 15:08:06 +00:00
|
|
|
int xdp_reg_mem_model(struct xdp_mem_info *mem,
|
|
|
|
enum xdp_mem_type type, void *allocator);
|
|
|
|
void xdp_unreg_mem_model(struct xdp_mem_info *mem);
|
2018-01-03 10:25:13 +00:00
|
|
|
|
2018-04-17 14:45:37 +00:00
|
|
|
/* Drivers not supporting XDP metadata can use this helper, which
|
|
|
|
* rejects any room expansion for metadata as a result.
|
|
|
|
*/
|
|
|
|
static __always_inline void
|
|
|
|
xdp_set_data_meta_invalid(struct xdp_buff *xdp)
|
|
|
|
{
|
|
|
|
xdp->data_meta = xdp->data + 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static __always_inline bool
|
|
|
|
xdp_data_meta_unsupported(const struct xdp_buff *xdp)
|
|
|
|
{
|
|
|
|
return unlikely(xdp->data_meta > xdp->data);
|
|
|
|
}
|
|
|
|
|
2021-07-07 22:16:54 +00:00
|
|
|
static inline bool xdp_metalen_invalid(unsigned long metalen)
|
|
|
|
{
|
2023-12-06 20:59:19 +00:00
|
|
|
unsigned long meta_max;
|
|
|
|
|
|
|
|
meta_max = type_max(typeof_member(struct skb_shared_info, meta_len));
|
|
|
|
BUILD_BUG_ON(!__builtin_constant_p(meta_max));
|
|
|
|
|
|
|
|
return !IS_ALIGNED(metalen, sizeof(u32)) || metalen > meta_max;
|
2021-07-07 22:16:54 +00:00
|
|
|
}
|
|
|
|
|
2018-07-12 03:36:40 +00:00
|
|
|
struct xdp_attachment_info {
|
|
|
|
struct bpf_prog *prog;
|
|
|
|
u32 flags;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct netdev_bpf;
|
|
|
|
void xdp_attachment_setup(struct xdp_attachment_info *info,
|
|
|
|
struct netdev_bpf *bpf);
|
|
|
|
|
2020-11-13 11:48:28 +00:00
|
|
|
#define DEV_MAP_BULK_SIZE XDP_BULK_QUEUE_SIZE
|
2020-04-22 12:05:09 +00:00
|
|
|
|
2023-09-13 17:13:48 +00:00
|
|
|
/* Define the relationship between xdp-rx-metadata kfunc and
|
|
|
|
* various other entities:
|
|
|
|
* - xdp_rx_metadata enum
|
2023-09-13 17:13:49 +00:00
|
|
|
* - netdev netlink enum (Documentation/netlink/specs/netdev.yaml)
|
2023-09-13 17:13:48 +00:00
|
|
|
* - kfunc name
|
|
|
|
* - xdp_metadata_ops field
|
|
|
|
*/
|
2023-01-19 22:15:26 +00:00
|
|
|
#define XDP_METADATA_KFUNC_xxx \
|
|
|
|
XDP_METADATA_KFUNC(XDP_METADATA_KFUNC_RX_TIMESTAMP, \
|
2023-09-13 17:13:49 +00:00
|
|
|
NETDEV_XDP_RX_METADATA_TIMESTAMP, \
|
2023-09-13 17:13:48 +00:00
|
|
|
bpf_xdp_metadata_rx_timestamp, \
|
|
|
|
xmo_rx_timestamp) \
|
2023-01-19 22:15:26 +00:00
|
|
|
XDP_METADATA_KFUNC(XDP_METADATA_KFUNC_RX_HASH, \
|
2023-09-13 17:13:49 +00:00
|
|
|
NETDEV_XDP_RX_METADATA_HASH, \
|
2023-09-13 17:13:48 +00:00
|
|
|
bpf_xdp_metadata_rx_hash, \
|
|
|
|
xmo_rx_hash) \
|
2023-12-05 21:08:38 +00:00
|
|
|
XDP_METADATA_KFUNC(XDP_METADATA_KFUNC_RX_VLAN_TAG, \
|
|
|
|
NETDEV_XDP_RX_METADATA_VLAN_TAG, \
|
|
|
|
bpf_xdp_metadata_rx_vlan_tag, \
|
|
|
|
xmo_rx_vlan_tag) \
|
2023-01-19 22:15:26 +00:00
|
|
|
|
2023-09-13 17:13:48 +00:00
|
|
|
enum xdp_rx_metadata {
|
2023-09-13 17:13:49 +00:00
|
|
|
#define XDP_METADATA_KFUNC(name, _, __, ___) name,
|
2023-01-19 22:15:26 +00:00
|
|
|
XDP_METADATA_KFUNC_xxx
|
|
|
|
#undef XDP_METADATA_KFUNC
|
|
|
|
MAX_XDP_METADATA_KFUNC,
|
|
|
|
};
|
|
|
|
|
2023-04-12 19:48:40 +00:00
|
|
|
enum xdp_rss_hash_type {
|
|
|
|
/* First part: Individual bits for L3/L4 types */
|
|
|
|
XDP_RSS_L3_IPV4 = BIT(0),
|
|
|
|
XDP_RSS_L3_IPV6 = BIT(1),
|
|
|
|
|
|
|
|
/* The fixed (L3) IPv4 and IPv6 headers can both be followed by
|
|
|
|
* variable/dynamic headers, IPv4 called Options and IPv6 called
|
|
|
|
* Extension Headers. HW RSS type can contain this info.
|
|
|
|
*/
|
|
|
|
XDP_RSS_L3_DYNHDR = BIT(2),
|
|
|
|
|
|
|
|
/* When RSS hash covers L4 then drivers MUST set XDP_RSS_L4 bit in
|
|
|
|
* addition to the protocol specific bit. This ease interaction with
|
|
|
|
* SKBs and avoids reserving a fixed mask for future L4 protocol bits.
|
|
|
|
*/
|
|
|
|
XDP_RSS_L4 = BIT(3), /* L4 based hash, proto can be unknown */
|
|
|
|
XDP_RSS_L4_TCP = BIT(4),
|
|
|
|
XDP_RSS_L4_UDP = BIT(5),
|
|
|
|
XDP_RSS_L4_SCTP = BIT(6),
|
|
|
|
XDP_RSS_L4_IPSEC = BIT(7), /* L4 based hash include IPSEC SPI */
|
2023-12-05 21:08:35 +00:00
|
|
|
XDP_RSS_L4_ICMP = BIT(8),
|
2023-04-12 19:48:40 +00:00
|
|
|
|
|
|
|
/* Second part: RSS hash type combinations used for driver HW mapping */
|
|
|
|
XDP_RSS_TYPE_NONE = 0,
|
|
|
|
XDP_RSS_TYPE_L2 = XDP_RSS_TYPE_NONE,
|
|
|
|
|
|
|
|
XDP_RSS_TYPE_L3_IPV4 = XDP_RSS_L3_IPV4,
|
|
|
|
XDP_RSS_TYPE_L3_IPV6 = XDP_RSS_L3_IPV6,
|
|
|
|
XDP_RSS_TYPE_L3_IPV4_OPT = XDP_RSS_L3_IPV4 | XDP_RSS_L3_DYNHDR,
|
|
|
|
XDP_RSS_TYPE_L3_IPV6_EX = XDP_RSS_L3_IPV6 | XDP_RSS_L3_DYNHDR,
|
|
|
|
|
|
|
|
XDP_RSS_TYPE_L4_ANY = XDP_RSS_L4,
|
|
|
|
XDP_RSS_TYPE_L4_IPV4_TCP = XDP_RSS_L3_IPV4 | XDP_RSS_L4 | XDP_RSS_L4_TCP,
|
|
|
|
XDP_RSS_TYPE_L4_IPV4_UDP = XDP_RSS_L3_IPV4 | XDP_RSS_L4 | XDP_RSS_L4_UDP,
|
|
|
|
XDP_RSS_TYPE_L4_IPV4_SCTP = XDP_RSS_L3_IPV4 | XDP_RSS_L4 | XDP_RSS_L4_SCTP,
|
2023-04-12 19:48:45 +00:00
|
|
|
XDP_RSS_TYPE_L4_IPV4_IPSEC = XDP_RSS_L3_IPV4 | XDP_RSS_L4 | XDP_RSS_L4_IPSEC,
|
2023-12-05 21:08:35 +00:00
|
|
|
XDP_RSS_TYPE_L4_IPV4_ICMP = XDP_RSS_L3_IPV4 | XDP_RSS_L4 | XDP_RSS_L4_ICMP,
|
2023-04-12 19:48:40 +00:00
|
|
|
|
|
|
|
XDP_RSS_TYPE_L4_IPV6_TCP = XDP_RSS_L3_IPV6 | XDP_RSS_L4 | XDP_RSS_L4_TCP,
|
|
|
|
XDP_RSS_TYPE_L4_IPV6_UDP = XDP_RSS_L3_IPV6 | XDP_RSS_L4 | XDP_RSS_L4_UDP,
|
|
|
|
XDP_RSS_TYPE_L4_IPV6_SCTP = XDP_RSS_L3_IPV6 | XDP_RSS_L4 | XDP_RSS_L4_SCTP,
|
2023-04-12 19:48:45 +00:00
|
|
|
XDP_RSS_TYPE_L4_IPV6_IPSEC = XDP_RSS_L3_IPV6 | XDP_RSS_L4 | XDP_RSS_L4_IPSEC,
|
2023-12-05 21:08:35 +00:00
|
|
|
XDP_RSS_TYPE_L4_IPV6_ICMP = XDP_RSS_L3_IPV6 | XDP_RSS_L4 | XDP_RSS_L4_ICMP,
|
2023-04-12 19:48:40 +00:00
|
|
|
|
|
|
|
XDP_RSS_TYPE_L4_IPV6_TCP_EX = XDP_RSS_TYPE_L4_IPV6_TCP | XDP_RSS_L3_DYNHDR,
|
|
|
|
XDP_RSS_TYPE_L4_IPV6_UDP_EX = XDP_RSS_TYPE_L4_IPV6_UDP | XDP_RSS_L3_DYNHDR,
|
|
|
|
XDP_RSS_TYPE_L4_IPV6_SCTP_EX = XDP_RSS_TYPE_L4_IPV6_SCTP | XDP_RSS_L3_DYNHDR,
|
|
|
|
};
|
|
|
|
|
2023-08-03 01:02:30 +00:00
|
|
|
struct xdp_metadata_ops {
|
|
|
|
int (*xmo_rx_timestamp)(const struct xdp_md *ctx, u64 *timestamp);
|
|
|
|
int (*xmo_rx_hash)(const struct xdp_md *ctx, u32 *hash,
|
|
|
|
enum xdp_rss_hash_type *rss_type);
|
2023-12-05 21:08:38 +00:00
|
|
|
int (*xmo_rx_vlan_tag)(const struct xdp_md *ctx, __be16 *vlan_proto,
|
|
|
|
u16 *vlan_tci);
|
2023-08-03 01:02:30 +00:00
|
|
|
};
|
|
|
|
|
2023-01-19 22:15:26 +00:00
|
|
|
#ifdef CONFIG_NET
|
|
|
|
u32 bpf_xdp_metadata_kfunc_id(int id);
|
|
|
|
bool bpf_dev_bound_kfunc_id(u32 btf_id);
|
2023-03-09 12:25:27 +00:00
|
|
|
void xdp_set_features_flag(struct net_device *dev, xdp_features_t val);
|
drivers: net: turn on XDP features
A summary of the flags being set for various drivers is given below.
Note that XDP_F_REDIRECT_TARGET and XDP_F_FRAG_TARGET are features
that can be turned off and on at runtime. This means that these flags
may be set and unset under RTNL lock protection by the driver. Hence,
READ_ONCE must be used by code loading the flag value.
Also, these flags are not used for synchronization against the availability
of XDP resources on a device. It is merely a hint, and hence the read
may race with the actual teardown of XDP resources on the device. This
may change in the future, e.g. operations taking a reference on the XDP
resources of the driver, and in turn inhibiting turning off this flag.
However, for now, it can only be used as a hint to check whether device
supports becoming a redirection target.
Turn 'hw-offload' feature flag on for:
- netronome (nfp)
- netdevsim.
Turn 'native' and 'zerocopy' features flags on for:
- intel (i40e, ice, ixgbe, igc)
- mellanox (mlx5).
- stmmac
- netronome (nfp)
Turn 'native' features flags on for:
- amazon (ena)
- broadcom (bnxt)
- freescale (dpaa, dpaa2, enetc)
- funeth
- intel (igb)
- marvell (mvneta, mvpp2, octeontx2)
- mellanox (mlx4)
- mtk_eth_soc
- qlogic (qede)
- sfc
- socionext (netsec)
- ti (cpsw)
- tap
- tsnep
- veth
- xen
- virtio_net.
Turn 'basic' (tx, pass, aborted and drop) features flags on for:
- netronome (nfp)
- cavium (thunder)
- hyperv.
Turn 'redirect_target' feature flag on for:
- amanzon (ena)
- broadcom (bnxt)
- freescale (dpaa, dpaa2)
- intel (i40e, ice, igb, ixgbe)
- ti (cpsw)
- marvell (mvneta, mvpp2)
- sfc
- socionext (netsec)
- qlogic (qede)
- mellanox (mlx5)
- tap
- veth
- virtio_net
- xen
Reviewed-by: Gerhard Engleder <gerhard@engleder-embedded.com>
Reviewed-by: Simon Horman <simon.horman@corigine.com>
Acked-by: Stanislav Fomichev <sdf@google.com>
Acked-by: Jakub Kicinski <kuba@kernel.org>
Co-developed-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
Co-developed-by: Lorenzo Bianconi <lorenzo@kernel.org>
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
Signed-off-by: Marek Majtyka <alardam@gmail.com>
Link: https://lore.kernel.org/r/3eca9fafb308462f7edb1f58e451d59209aa07eb.1675245258.git.lorenzo@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2023-02-01 10:24:18 +00:00
|
|
|
void xdp_features_set_redirect_target(struct net_device *dev, bool support_sg);
|
|
|
|
void xdp_features_clear_redirect_target(struct net_device *dev);
|
2023-01-19 22:15:26 +00:00
|
|
|
#else
|
|
|
|
static inline u32 bpf_xdp_metadata_kfunc_id(int id) { return 0; }
|
|
|
|
static inline bool bpf_dev_bound_kfunc_id(u32 btf_id) { return false; }
|
drivers: net: turn on XDP features
A summary of the flags being set for various drivers is given below.
Note that XDP_F_REDIRECT_TARGET and XDP_F_FRAG_TARGET are features
that can be turned off and on at runtime. This means that these flags
may be set and unset under RTNL lock protection by the driver. Hence,
READ_ONCE must be used by code loading the flag value.
Also, these flags are not used for synchronization against the availability
of XDP resources on a device. It is merely a hint, and hence the read
may race with the actual teardown of XDP resources on the device. This
may change in the future, e.g. operations taking a reference on the XDP
resources of the driver, and in turn inhibiting turning off this flag.
However, for now, it can only be used as a hint to check whether device
supports becoming a redirection target.
Turn 'hw-offload' feature flag on for:
- netronome (nfp)
- netdevsim.
Turn 'native' and 'zerocopy' features flags on for:
- intel (i40e, ice, ixgbe, igc)
- mellanox (mlx5).
- stmmac
- netronome (nfp)
Turn 'native' features flags on for:
- amazon (ena)
- broadcom (bnxt)
- freescale (dpaa, dpaa2, enetc)
- funeth
- intel (igb)
- marvell (mvneta, mvpp2, octeontx2)
- mellanox (mlx4)
- mtk_eth_soc
- qlogic (qede)
- sfc
- socionext (netsec)
- ti (cpsw)
- tap
- tsnep
- veth
- xen
- virtio_net.
Turn 'basic' (tx, pass, aborted and drop) features flags on for:
- netronome (nfp)
- cavium (thunder)
- hyperv.
Turn 'redirect_target' feature flag on for:
- amanzon (ena)
- broadcom (bnxt)
- freescale (dpaa, dpaa2)
- intel (i40e, ice, igb, ixgbe)
- ti (cpsw)
- marvell (mvneta, mvpp2)
- sfc
- socionext (netsec)
- qlogic (qede)
- mellanox (mlx5)
- tap
- veth
- virtio_net
- xen
Reviewed-by: Gerhard Engleder <gerhard@engleder-embedded.com>
Reviewed-by: Simon Horman <simon.horman@corigine.com>
Acked-by: Stanislav Fomichev <sdf@google.com>
Acked-by: Jakub Kicinski <kuba@kernel.org>
Co-developed-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
Co-developed-by: Lorenzo Bianconi <lorenzo@kernel.org>
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
Signed-off-by: Marek Majtyka <alardam@gmail.com>
Link: https://lore.kernel.org/r/3eca9fafb308462f7edb1f58e451d59209aa07eb.1675245258.git.lorenzo@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2023-02-01 10:24:18 +00:00
|
|
|
|
2023-03-09 12:25:27 +00:00
|
|
|
static inline void
|
|
|
|
xdp_set_features_flag(struct net_device *dev, xdp_features_t val)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
drivers: net: turn on XDP features
A summary of the flags being set for various drivers is given below.
Note that XDP_F_REDIRECT_TARGET and XDP_F_FRAG_TARGET are features
that can be turned off and on at runtime. This means that these flags
may be set and unset under RTNL lock protection by the driver. Hence,
READ_ONCE must be used by code loading the flag value.
Also, these flags are not used for synchronization against the availability
of XDP resources on a device. It is merely a hint, and hence the read
may race with the actual teardown of XDP resources on the device. This
may change in the future, e.g. operations taking a reference on the XDP
resources of the driver, and in turn inhibiting turning off this flag.
However, for now, it can only be used as a hint to check whether device
supports becoming a redirection target.
Turn 'hw-offload' feature flag on for:
- netronome (nfp)
- netdevsim.
Turn 'native' and 'zerocopy' features flags on for:
- intel (i40e, ice, ixgbe, igc)
- mellanox (mlx5).
- stmmac
- netronome (nfp)
Turn 'native' features flags on for:
- amazon (ena)
- broadcom (bnxt)
- freescale (dpaa, dpaa2, enetc)
- funeth
- intel (igb)
- marvell (mvneta, mvpp2, octeontx2)
- mellanox (mlx4)
- mtk_eth_soc
- qlogic (qede)
- sfc
- socionext (netsec)
- ti (cpsw)
- tap
- tsnep
- veth
- xen
- virtio_net.
Turn 'basic' (tx, pass, aborted and drop) features flags on for:
- netronome (nfp)
- cavium (thunder)
- hyperv.
Turn 'redirect_target' feature flag on for:
- amanzon (ena)
- broadcom (bnxt)
- freescale (dpaa, dpaa2)
- intel (i40e, ice, igb, ixgbe)
- ti (cpsw)
- marvell (mvneta, mvpp2)
- sfc
- socionext (netsec)
- qlogic (qede)
- mellanox (mlx5)
- tap
- veth
- virtio_net
- xen
Reviewed-by: Gerhard Engleder <gerhard@engleder-embedded.com>
Reviewed-by: Simon Horman <simon.horman@corigine.com>
Acked-by: Stanislav Fomichev <sdf@google.com>
Acked-by: Jakub Kicinski <kuba@kernel.org>
Co-developed-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
Co-developed-by: Lorenzo Bianconi <lorenzo@kernel.org>
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
Signed-off-by: Marek Majtyka <alardam@gmail.com>
Link: https://lore.kernel.org/r/3eca9fafb308462f7edb1f58e451d59209aa07eb.1675245258.git.lorenzo@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2023-02-01 10:24:18 +00:00
|
|
|
static inline void
|
|
|
|
xdp_features_set_redirect_target(struct net_device *dev, bool support_sg)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
xdp_features_clear_redirect_target(struct net_device *dev)
|
|
|
|
{
|
|
|
|
}
|
2023-01-19 22:15:26 +00:00
|
|
|
#endif
|
|
|
|
|
2023-03-09 12:25:27 +00:00
|
|
|
static inline void xdp_clear_features_flag(struct net_device *dev)
|
|
|
|
{
|
|
|
|
xdp_set_features_flag(dev, 0);
|
|
|
|
}
|
|
|
|
|
2023-08-03 01:02:30 +00:00
|
|
|
static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
|
|
|
|
struct xdp_buff *xdp)
|
|
|
|
{
|
|
|
|
/* Driver XDP hooks are invoked within a single NAPI poll cycle and thus
|
|
|
|
* under local_bh_disable(), which provides the needed RCU protection
|
|
|
|
* for accessing map entries.
|
|
|
|
*/
|
|
|
|
u32 act = __bpf_prog_run(prog, xdp, BPF_DISPATCHER_FUNC(xdp));
|
|
|
|
|
|
|
|
if (static_branch_unlikely(&bpf_master_redirect_enabled_key)) {
|
|
|
|
if (act == XDP_TX && netif_is_bond_slave(xdp->rxq->dev))
|
|
|
|
act = xdp_master_redirect(xdp);
|
|
|
|
}
|
|
|
|
|
|
|
|
return act;
|
|
|
|
}
|
2018-01-03 10:25:13 +00:00
|
|
|
#endif /* __LINUX_NET_XDP_H__ */
|