bpf: devmap introduce dev_map_enqueue

Functionality is the same, but the ndo_xdp_xmit call is now
simply invoked from inside the devmap.c code.

V2: Fix compile issue reported by kbuild test robot <lkp@intel.com>

V5: Cleanups requested by Daniel
 - Newlines before func definition
 - Use BUILD_BUG_ON checks
 - Remove unnecessary use return value store in dev_map_enqueue

Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
Jesper Dangaard Brouer 2018-05-24 16:45:46 +02:00 committed by Alexei Starovoitov
parent f80acbd233
commit 67f29e07e1
4 changed files with 51 additions and 23 deletions

View File

@ -487,14 +487,16 @@ int bpf_check(struct bpf_prog **fp, union bpf_attr *attr);
void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth); void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
/* Map specifics */ /* Map specifics */
struct net_device *__dev_map_lookup_elem(struct bpf_map *map, u32 key); struct xdp_buff;
struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key);
void __dev_map_insert_ctx(struct bpf_map *map, u32 index); void __dev_map_insert_ctx(struct bpf_map *map, u32 index);
void __dev_map_flush(struct bpf_map *map); void __dev_map_flush(struct bpf_map *map);
int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp);
struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key); struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key);
void __cpu_map_insert_ctx(struct bpf_map *map, u32 index); void __cpu_map_insert_ctx(struct bpf_map *map, u32 index);
void __cpu_map_flush(struct bpf_map *map); void __cpu_map_flush(struct bpf_map *map);
struct xdp_buff;
int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp, int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp,
struct net_device *dev_rx); struct net_device *dev_rx);
@ -573,6 +575,15 @@ static inline void __dev_map_flush(struct bpf_map *map)
{ {
} }
struct xdp_buff;
struct bpf_dtab_netdev;
static inline
int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp)
{
return 0;
}
static inline static inline
struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key) struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
{ {
@ -587,7 +598,6 @@ static inline void __cpu_map_flush(struct bpf_map *map)
{ {
} }
struct xdp_buff;
static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu,
struct xdp_buff *xdp, struct xdp_buff *xdp,
struct net_device *dev_rx) struct net_device *dev_rx)

View File

@ -138,11 +138,18 @@ DEFINE_EVENT_PRINT(xdp_redirect_template, xdp_redirect_map_err,
__entry->map_id, __entry->map_index) __entry->map_id, __entry->map_index)
); );
#ifndef __DEVMAP_OBJ_TYPE
#define __DEVMAP_OBJ_TYPE
struct _bpf_dtab_netdev {
struct net_device *dev;
};
#endif /* __DEVMAP_OBJ_TYPE */
#define devmap_ifindex(fwd, map) \ #define devmap_ifindex(fwd, map) \
(!fwd ? 0 : \ (!fwd ? 0 : \
(!map ? 0 : \ (!map ? 0 : \
((map->map_type == BPF_MAP_TYPE_DEVMAP) ? \ ((map->map_type == BPF_MAP_TYPE_DEVMAP) ? \
((struct net_device *)fwd)->ifindex : 0))) ((struct _bpf_dtab_netdev *)fwd)->dev->ifindex : 0)))
#define _trace_xdp_redirect_map(dev, xdp, fwd, map, idx) \ #define _trace_xdp_redirect_map(dev, xdp, fwd, map, idx) \
trace_xdp_redirect_map(dev, xdp, devmap_ifindex(fwd, map), \ trace_xdp_redirect_map(dev, xdp, devmap_ifindex(fwd, map), \

View File

@ -48,13 +48,15 @@
* calls will fail at this point. * calls will fail at this point.
*/ */
#include <linux/bpf.h> #include <linux/bpf.h>
#include <net/xdp.h>
#include <linux/filter.h> #include <linux/filter.h>
#include <trace/events/xdp.h>
#define DEV_CREATE_FLAG_MASK \ #define DEV_CREATE_FLAG_MASK \
(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY) (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
struct bpf_dtab_netdev { struct bpf_dtab_netdev {
struct net_device *dev; struct net_device *dev; /* must be first member, due to tracepoint */
struct bpf_dtab *dtab; struct bpf_dtab *dtab;
unsigned int bit; unsigned int bit;
struct rcu_head rcu; struct rcu_head rcu;
@ -240,21 +242,38 @@ void __dev_map_flush(struct bpf_map *map)
* update happens in parallel here a dev_put wont happen until after reading the * update happens in parallel here a dev_put wont happen until after reading the
* ifindex. * ifindex.
*/ */
struct net_device *__dev_map_lookup_elem(struct bpf_map *map, u32 key) struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key)
{ {
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
struct bpf_dtab_netdev *dev; struct bpf_dtab_netdev *obj;
if (key >= map->max_entries) if (key >= map->max_entries)
return NULL; return NULL;
dev = READ_ONCE(dtab->netdev_map[key]); obj = READ_ONCE(dtab->netdev_map[key]);
return dev ? dev->dev : NULL; return obj;
}
int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp)
{
struct net_device *dev = dst->dev;
struct xdp_frame *xdpf;
if (!dev->netdev_ops->ndo_xdp_xmit)
return -EOPNOTSUPP;
xdpf = convert_to_xdp_frame(xdp);
if (unlikely(!xdpf))
return -EOVERFLOW;
/* TODO: implement a bulking/enqueue step later */
return dev->netdev_ops->ndo_xdp_xmit(dev, xdpf);
} }
static void *dev_map_lookup_elem(struct bpf_map *map, void *key) static void *dev_map_lookup_elem(struct bpf_map *map, void *key)
{ {
struct net_device *dev = __dev_map_lookup_elem(map, *(u32 *)key); struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, *(u32 *)key);
struct net_device *dev = dev = obj ? obj->dev : NULL;
return dev ? &dev->ifindex : NULL; return dev ? &dev->ifindex : NULL;
} }
@ -405,6 +424,9 @@ static struct notifier_block dev_map_notifier = {
static int __init dev_map_init(void) static int __init dev_map_init(void)
{ {
/* Assure tracepoint shadow struct _bpf_dtab_netdev is in sync */
BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev, dev) !=
offsetof(struct _bpf_dtab_netdev, dev));
register_netdevice_notifier(&dev_map_notifier); register_netdevice_notifier(&dev_map_notifier);
return 0; return 0;
} }

View File

@ -3065,20 +3065,9 @@ static int __bpf_tx_xdp_map(struct net_device *dev_rx, void *fwd,
switch (map->map_type) { switch (map->map_type) {
case BPF_MAP_TYPE_DEVMAP: { case BPF_MAP_TYPE_DEVMAP: {
struct net_device *dev = fwd; struct bpf_dtab_netdev *dst = fwd;
struct xdp_frame *xdpf;
if (!dev->netdev_ops->ndo_xdp_xmit) err = dev_map_enqueue(dst, xdp);
return -EOPNOTSUPP;
xdpf = convert_to_xdp_frame(xdp);
if (unlikely(!xdpf))
return -EOVERFLOW;
/* TODO: move to inside map code instead, for bulk support
* err = dev_map_enqueue(dev, xdp);
*/
err = dev->netdev_ops->ndo_xdp_xmit(dev, xdpf);
if (err) if (err)
return err; return err;
__dev_map_insert_ctx(map, index); __dev_map_insert_ctx(map, index);