linux/include/net/xdp.h
Jesper Dangaard Brouer 5ab073ffd3 xdp: introduce xdp_return_frame API and use in cpumap
Introduce an xdp_return_frame API, and convert over cpumap as
the first user, given it have queued XDP frame structure to leverage.

V3: Cleanup and remove C99 style comments, pointed out by Alex Duyck.
V6: Remove comment that id will be added later (Req by Alex Duyck)
V8: Rename enum mem_type to xdp_mem_type (found by kbuild test robot)

Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-04-17 10:50:27 -04:00

76 lines
2.4 KiB
C

/* include/net/xdp.h
*
* Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
* Released under terms in GPL version 2. See COPYING.
*/
#ifndef __LINUX_NET_XDP_H__
#define __LINUX_NET_XDP_H__
/**
* DOC: XDP RX-queue information
*
* The XDP RX-queue info (xdp_rxq_info) is associated with the driver
* level RX-ring queues. It is information that is specific to how
* the driver have configured a given RX-ring queue.
*
* Each xdp_buff frame received in the driver carry a (pointer)
* reference to this xdp_rxq_info structure. This provides the XDP
* data-path read-access to RX-info for both kernel and bpf-side
* (limited subset).
*
* For now, direct access is only safe while running in NAPI/softirq
* context. Contents is read-mostly and must not be updated during
* driver NAPI/softirq poll.
*
* The driver usage API is a register and unregister API.
*
* The struct is not directly tied to the XDP prog. A new XDP prog
* can be attached as long as it doesn't change the underlying
* RX-ring. If the RX-ring does change significantly, the NIC driver
* naturally need to stop the RX-ring before purging and reallocating
* memory. In that process the driver MUST call unregistor (which
* also apply for driver shutdown and unload). The register API is
* also mandatory during RX-ring setup.
*/
enum xdp_mem_type {
MEM_TYPE_PAGE_SHARED = 0, /* Split-page refcnt based model */
MEM_TYPE_PAGE_ORDER0, /* Orig XDP full page model */
MEM_TYPE_MAX,
};
struct xdp_mem_info {
u32 type; /* enum xdp_mem_type, but known size type */
};
struct xdp_rxq_info {
struct net_device *dev;
u32 queue_index;
u32 reg_state;
struct xdp_mem_info mem;
} ____cacheline_aligned; /* perf critical, avoid false-sharing */
static inline
void xdp_return_frame(void *data, struct xdp_mem_info *mem)
{
if (mem->type == MEM_TYPE_PAGE_SHARED)
page_frag_free(data);
if (mem->type == MEM_TYPE_PAGE_ORDER0) {
struct page *page = virt_to_page(data); /* Assumes order0 page*/
put_page(page);
}
}
int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
struct net_device *dev, u32 queue_index);
void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq);
void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq);
bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq);
int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq,
enum xdp_mem_type type, void *allocator);
#endif /* __LINUX_NET_XDP_H__ */