2018-05-02 11:01:24 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
/* XDP user-space ring structure
|
|
|
|
* Copyright(c) 2018 Intel Corporation.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/slab.h>
|
|
|
|
|
|
|
|
#include "xsk_queue.h"
|
|
|
|
|
2018-05-02 11:01:26 +00:00
|
|
|
void xskq_set_umem(struct xsk_queue *q, struct xdp_umem_props *umem_props)
|
|
|
|
{
|
|
|
|
if (!q)
|
|
|
|
return;
|
|
|
|
|
|
|
|
q->umem_props = *umem_props;
|
|
|
|
}
|
|
|
|
|
2018-05-02 11:01:24 +00:00
|
|
|
static u32 xskq_umem_get_ring_size(struct xsk_queue *q)
|
|
|
|
{
|
|
|
|
return sizeof(struct xdp_umem_ring) + q->nentries * sizeof(u32);
|
|
|
|
}
|
|
|
|
|
2018-05-02 11:01:25 +00:00
|
|
|
static u32 xskq_rxtx_get_ring_size(struct xsk_queue *q)
|
|
|
|
{
|
2018-05-18 12:00:23 +00:00
|
|
|
return sizeof(struct xdp_ring) + q->nentries * sizeof(struct xdp_desc);
|
2018-05-02 11:01:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
struct xsk_queue *xskq_create(u32 nentries, bool umem_queue)
|
2018-05-02 11:01:24 +00:00
|
|
|
{
|
|
|
|
struct xsk_queue *q;
|
|
|
|
gfp_t gfp_flags;
|
|
|
|
size_t size;
|
|
|
|
|
|
|
|
q = kzalloc(sizeof(*q), GFP_KERNEL);
|
|
|
|
if (!q)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
q->nentries = nentries;
|
|
|
|
q->ring_mask = nentries - 1;
|
|
|
|
|
|
|
|
gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN |
|
|
|
|
__GFP_COMP | __GFP_NORETRY;
|
2018-05-02 11:01:25 +00:00
|
|
|
size = umem_queue ? xskq_umem_get_ring_size(q) :
|
|
|
|
xskq_rxtx_get_ring_size(q);
|
2018-05-02 11:01:24 +00:00
|
|
|
|
|
|
|
q->ring = (struct xdp_ring *)__get_free_pages(gfp_flags,
|
|
|
|
get_order(size));
|
|
|
|
if (!q->ring) {
|
|
|
|
kfree(q);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return q;
|
|
|
|
}
|
|
|
|
|
|
|
|
void xskq_destroy(struct xsk_queue *q)
|
|
|
|
{
|
|
|
|
if (!q)
|
|
|
|
return;
|
|
|
|
|
|
|
|
page_frag_free(q->ring);
|
|
|
|
kfree(q);
|
|
|
|
}
|