Merge branch 'bpf-flow-dissector-tests'

Stanislav Fomichev says:

====================
This patch series adds support for testing flow dissector BPF programs
by extending already existing BPF_PROG_TEST_RUN. The goal is to have
a packet as an input and `struct bpf_flow_key' as an output. That way
we can easily test flow dissector programs' behavior. I've also modified
existing test_progs.c test to do a simple flow dissector run as well.

* first patch introduces new __skb_flow_bpf_dissect to simplify
  sharing between __skb_flow_bpf_dissect and BPF_PROG_TEST_RUN
* second patch adds actual BPF_PROG_TEST_RUN support
* third patch adds example usage to the selftests

v3:
* rebased on top of latest bpf-next

v2:
* loop over 'kattr->test.repeat' inside of
  bpf_prog_test_run_flow_dissector, don't reuse
  bpf_test_run/bpf_test_run_one
====================

Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
This commit is contained in:
Daniel Borkmann 2019-01-29 01:08:30 +01:00
commit 3d2af27a84
9 changed files with 284 additions and 78 deletions

View File

@ -404,6 +404,9 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
union bpf_attr __user *uattr);
int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
union bpf_attr __user *uattr);
int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
const union bpf_attr *kattr,
union bpf_attr __user *uattr);
/* an array of programs to be executed under rcu_lock.
*

View File

@ -1221,6 +1221,11 @@ static inline int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr)
}
#endif
struct bpf_flow_keys;
bool __skb_flow_bpf_dissect(struct bpf_prog *prog,
const struct sk_buff *skb,
struct flow_dissector *flow_dissector,
struct bpf_flow_keys *flow_keys);
bool __skb_flow_dissect(const struct sk_buff *skb,
struct flow_dissector *flow_dissector,
void *target_container,

View File

@ -240,3 +240,85 @@ out:
kfree(data);
return ret;
}
int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
const union bpf_attr *kattr,
union bpf_attr __user *uattr)
{
u32 size = kattr->test.data_size_in;
u32 repeat = kattr->test.repeat;
struct bpf_flow_keys flow_keys;
u64 time_start, time_spent = 0;
struct bpf_skb_data_end *cb;
u32 retval, duration;
struct sk_buff *skb;
struct sock *sk;
void *data;
int ret;
u32 i;
if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR)
return -EINVAL;
data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
if (IS_ERR(data))
return PTR_ERR(data);
sk = kzalloc(sizeof(*sk), GFP_USER);
if (!sk) {
kfree(data);
return -ENOMEM;
}
sock_net_set(sk, current->nsproxy->net_ns);
sock_init_data(NULL, sk);
skb = build_skb(data, 0);
if (!skb) {
kfree(data);
kfree(sk);
return -ENOMEM;
}
skb->sk = sk;
skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
__skb_put(skb, size);
skb->protocol = eth_type_trans(skb,
current->nsproxy->net_ns->loopback_dev);
skb_reset_network_header(skb);
cb = (struct bpf_skb_data_end *)skb->cb;
cb->qdisc_cb.flow_keys = &flow_keys;
if (!repeat)
repeat = 1;
time_start = ktime_get_ns();
for (i = 0; i < repeat; i++) {
preempt_disable();
rcu_read_lock();
retval = __skb_flow_bpf_dissect(prog, skb,
&flow_keys_dissector,
&flow_keys);
rcu_read_unlock();
preempt_enable();
if (need_resched()) {
if (signal_pending(current))
break;
time_spent += ktime_get_ns() - time_start;
cond_resched();
time_start = ktime_get_ns();
}
}
time_spent += ktime_get_ns() - time_start;
do_div(time_spent, repeat);
duration = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
ret = bpf_test_finish(kattr, uattr, &flow_keys, sizeof(flow_keys),
retval, duration);
kfree_skb(skb);
kfree(sk);
return ret;
}

View File

@ -7711,6 +7711,7 @@ const struct bpf_verifier_ops flow_dissector_verifier_ops = {
};
const struct bpf_prog_ops flow_dissector_prog_ops = {
.test_run = bpf_prog_test_run_flow_dissector,
};
int sk_detach_filter(struct sock *sk)

View File

@ -683,6 +683,46 @@ static void __skb_flow_bpf_to_target(const struct bpf_flow_keys *flow_keys,
}
}
bool __skb_flow_bpf_dissect(struct bpf_prog *prog,
const struct sk_buff *skb,
struct flow_dissector *flow_dissector,
struct bpf_flow_keys *flow_keys)
{
struct bpf_skb_data_end cb_saved;
struct bpf_skb_data_end *cb;
u32 result;
/* Note that even though the const qualifier is discarded
* throughout the execution of the BPF program, all changes(the
* control block) are reverted after the BPF program returns.
* Therefore, __skb_flow_dissect does not alter the skb.
*/
cb = (struct bpf_skb_data_end *)skb->cb;
/* Save Control Block */
memcpy(&cb_saved, cb, sizeof(cb_saved));
memset(cb, 0, sizeof(*cb));
/* Pass parameters to the BPF program */
memset(flow_keys, 0, sizeof(*flow_keys));
cb->qdisc_cb.flow_keys = flow_keys;
flow_keys->nhoff = skb_network_offset(skb);
flow_keys->thoff = flow_keys->nhoff;
bpf_compute_data_pointers((struct sk_buff *)skb);
result = BPF_PROG_RUN(prog, skb);
/* Restore state */
memcpy(cb, &cb_saved, sizeof(cb_saved));
flow_keys->nhoff = clamp_t(u16, flow_keys->nhoff, 0, skb->len);
flow_keys->thoff = clamp_t(u16, flow_keys->thoff,
flow_keys->nhoff, skb->len);
return result == BPF_OK;
}
/**
* __skb_flow_dissect - extract the flow_keys struct and return it
* @skb: sk_buff to extract the flow from, can be NULL if the rest are specified
@ -714,7 +754,6 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
struct flow_dissector_key_vlan *key_vlan;
enum flow_dissect_ret fdret;
enum flow_dissector_key_id dissector_vlan = FLOW_DISSECTOR_KEY_MAX;
struct bpf_prog *attached = NULL;
int num_hdrs = 0;
u8 ip_proto = 0;
bool ret;
@ -754,53 +793,30 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
FLOW_DISSECTOR_KEY_BASIC,
target_container);
rcu_read_lock();
if (skb) {
struct bpf_flow_keys flow_keys;
struct bpf_prog *attached = NULL;
rcu_read_lock();
if (skb->dev)
attached = rcu_dereference(dev_net(skb->dev)->flow_dissector_prog);
else if (skb->sk)
attached = rcu_dereference(sock_net(skb->sk)->flow_dissector_prog);
else
WARN_ON_ONCE(1);
}
if (attached) {
/* Note that even though the const qualifier is discarded
* throughout the execution of the BPF program, all changes(the
* control block) are reverted after the BPF program returns.
* Therefore, __skb_flow_dissect does not alter the skb.
*/
struct bpf_flow_keys flow_keys = {};
struct bpf_skb_data_end cb_saved;
struct bpf_skb_data_end *cb;
u32 result;
cb = (struct bpf_skb_data_end *)skb->cb;
/* Save Control Block */
memcpy(&cb_saved, cb, sizeof(cb_saved));
memset(cb, 0, sizeof(cb_saved));
/* Pass parameters to the BPF program */
cb->qdisc_cb.flow_keys = &flow_keys;
flow_keys.nhoff = nhoff;
flow_keys.thoff = nhoff;
bpf_compute_data_pointers((struct sk_buff *)skb);
result = BPF_PROG_RUN(attached, skb);
/* Restore state */
memcpy(cb, &cb_saved, sizeof(cb_saved));
flow_keys.nhoff = clamp_t(u16, flow_keys.nhoff, 0, skb->len);
flow_keys.thoff = clamp_t(u16, flow_keys.thoff,
flow_keys.nhoff, skb->len);
__skb_flow_bpf_to_target(&flow_keys, flow_dissector,
target_container);
if (attached) {
ret = __skb_flow_bpf_dissect(attached, skb,
flow_dissector,
&flow_keys);
__skb_flow_bpf_to_target(&flow_keys, flow_dissector,
target_container);
rcu_read_unlock();
return ret;
}
rcu_read_unlock();
return result == BPF_OK;
}
rcu_read_unlock();
if (dissector_uses_key(flow_dissector,
FLOW_DISSECTOR_KEY_ETH_ADDRS)) {

View File

@ -148,6 +148,9 @@ $(OUTPUT)/test_xdp_noinline.o: CLANG_FLAGS += -fno-inline
$(OUTPUT)/test_queue_map.o: test_queue_stack_map.h
$(OUTPUT)/test_stack_map.o: test_queue_stack_map.h
$(OUTPUT)/flow_dissector_load.o: flow_dissector_load.h
$(OUTPUT)/test_progs.o: flow_dissector_load.h
BTF_LLC_PROBE := $(shell $(LLC) -march=bpf -mattr=help 2>&1 | grep dwarfris)
BTF_PAHOLE_PROBE := $(shell $(BTF_PAHOLE) --help 2>&1 | grep BTF)
BTF_OBJCOPY_PROBE := $(shell $(LLVM_OBJCOPY) --help 2>&1 | grep -i 'usage.*llvm')

View File

@ -12,6 +12,7 @@
#include <bpf/libbpf.h>
#include "bpf_rlimit.h"
#include "flow_dissector_load.h"
const char *cfg_pin_path = "/sys/fs/bpf/flow_dissector";
const char *cfg_map_name = "jmp_table";
@ -21,46 +22,13 @@ char *cfg_path_name;
static void load_and_attach_program(void)
{
struct bpf_program *prog, *main_prog;
struct bpf_map *prog_array;
int i, fd, prog_fd, ret;
int prog_fd, ret;
struct bpf_object *obj;
int prog_array_fd;
ret = bpf_prog_load(cfg_path_name, BPF_PROG_TYPE_FLOW_DISSECTOR, &obj,
&prog_fd);
ret = bpf_flow_load(&obj, cfg_path_name, cfg_section_name,
cfg_map_name, &prog_fd);
if (ret)
error(1, 0, "bpf_prog_load %s", cfg_path_name);
main_prog = bpf_object__find_program_by_title(obj, cfg_section_name);
if (!main_prog)
error(1, 0, "bpf_object__find_program_by_title %s",
cfg_section_name);
prog_fd = bpf_program__fd(main_prog);
if (prog_fd < 0)
error(1, 0, "bpf_program__fd");
prog_array = bpf_object__find_map_by_name(obj, cfg_map_name);
if (!prog_array)
error(1, 0, "bpf_object__find_map_by_name %s", cfg_map_name);
prog_array_fd = bpf_map__fd(prog_array);
if (prog_array_fd < 0)
error(1, 0, "bpf_map__fd %s", cfg_map_name);
i = 0;
bpf_object__for_each_program(prog, obj) {
fd = bpf_program__fd(prog);
if (fd < 0)
error(1, 0, "bpf_program__fd");
if (fd != prog_fd) {
printf("%d: %s\n", i, bpf_program__title(prog, false));
bpf_map_update_elem(prog_array_fd, &i, &fd, BPF_ANY);
++i;
}
}
error(1, 0, "bpf_flow_load %s", cfg_path_name);
ret = bpf_prog_attach(prog_fd, 0 /* Ignore */, BPF_FLOW_DISSECTOR, 0);
if (ret)
@ -69,7 +37,6 @@ static void load_and_attach_program(void)
ret = bpf_object__pin(obj, cfg_pin_path);
if (ret)
error(1, 0, "bpf_object__pin %s", cfg_pin_path);
}
static void detach_program(void)

View File

@ -0,0 +1,55 @@
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
#ifndef FLOW_DISSECTOR_LOAD
#define FLOW_DISSECTOR_LOAD
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
static inline int bpf_flow_load(struct bpf_object **obj,
const char *path,
const char *section_name,
const char *map_name,
int *prog_fd)
{
struct bpf_program *prog, *main_prog;
struct bpf_map *prog_array;
int prog_array_fd;
int ret, fd, i;
ret = bpf_prog_load(path, BPF_PROG_TYPE_FLOW_DISSECTOR, obj,
prog_fd);
if (ret)
return ret;
main_prog = bpf_object__find_program_by_title(*obj, section_name);
if (!main_prog)
return ret;
*prog_fd = bpf_program__fd(main_prog);
if (*prog_fd < 0)
return ret;
prog_array = bpf_object__find_map_by_name(*obj, map_name);
if (!prog_array)
return ret;
prog_array_fd = bpf_map__fd(prog_array);
if (prog_array_fd < 0)
return ret;
i = 0;
bpf_object__for_each_program(prog, *obj) {
fd = bpf_program__fd(prog);
if (fd < 0)
return fd;
if (fd != *prog_fd) {
bpf_map_update_elem(prog_array_fd, &i, &fd, BPF_ANY);
++i;
}
}
return 0;
}
#endif /* FLOW_DISSECTOR_LOAD */

View File

@ -39,6 +39,7 @@ typedef __u16 __sum16;
#include "bpf_endian.h"
#include "bpf_rlimit.h"
#include "trace_helpers.h"
#include "flow_dissector_load.h"
static int error_cnt, pass_cnt;
static bool jit_enabled;
@ -53,9 +54,10 @@ static struct {
} __packed pkt_v4 = {
.eth.h_proto = __bpf_constant_htons(ETH_P_IP),
.iph.ihl = 5,
.iph.protocol = 6,
.iph.protocol = IPPROTO_TCP,
.iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
.tcp.urg_ptr = 123,
.tcp.doff = 5,
};
/* ipv6 test vector */
@ -65,9 +67,10 @@ static struct {
struct tcphdr tcp;
} __packed pkt_v6 = {
.eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
.iph.nexthdr = 6,
.iph.nexthdr = IPPROTO_TCP,
.iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
.tcp.urg_ptr = 123,
.tcp.doff = 5,
};
#define _CHECK(condition, tag, duration, format...) ({ \
@ -1882,6 +1885,76 @@ out:
bpf_object__close(obj);
}
#define CHECK_FLOW_KEYS(desc, got, expected) \
CHECK(memcmp(&got, &expected, sizeof(got)) != 0, \
desc, \
"nhoff=%u/%u " \
"thoff=%u/%u " \
"addr_proto=0x%x/0x%x " \
"is_frag=%u/%u " \
"is_first_frag=%u/%u " \
"is_encap=%u/%u " \
"n_proto=0x%x/0x%x " \
"sport=%u/%u " \
"dport=%u/%u\n", \
got.nhoff, expected.nhoff, \
got.thoff, expected.thoff, \
got.addr_proto, expected.addr_proto, \
got.is_frag, expected.is_frag, \
got.is_first_frag, expected.is_first_frag, \
got.is_encap, expected.is_encap, \
got.n_proto, expected.n_proto, \
got.sport, expected.sport, \
got.dport, expected.dport)
static struct bpf_flow_keys pkt_v4_flow_keys = {
.nhoff = 0,
.thoff = sizeof(struct iphdr),
.addr_proto = ETH_P_IP,
.ip_proto = IPPROTO_TCP,
.n_proto = bpf_htons(ETH_P_IP),
};
static struct bpf_flow_keys pkt_v6_flow_keys = {
.nhoff = 0,
.thoff = sizeof(struct ipv6hdr),
.addr_proto = ETH_P_IPV6,
.ip_proto = IPPROTO_TCP,
.n_proto = bpf_htons(ETH_P_IPV6),
};
static void test_flow_dissector(void)
{
struct bpf_flow_keys flow_keys;
struct bpf_object *obj;
__u32 duration, retval;
int err, prog_fd;
__u32 size;
err = bpf_flow_load(&obj, "./bpf_flow.o", "flow_dissector",
"jmp_table", &prog_fd);
if (err) {
error_cnt++;
return;
}
err = bpf_prog_test_run(prog_fd, 10, &pkt_v4, sizeof(pkt_v4),
&flow_keys, &size, &retval, &duration);
CHECK(size != sizeof(flow_keys) || err || retval != 1, "ipv4",
"err %d errno %d retval %d duration %d size %u/%lu\n",
err, errno, retval, duration, size, sizeof(flow_keys));
CHECK_FLOW_KEYS("ipv4_flow_keys", flow_keys, pkt_v4_flow_keys);
err = bpf_prog_test_run(prog_fd, 10, &pkt_v6, sizeof(pkt_v6),
&flow_keys, &size, &retval, &duration);
CHECK(size != sizeof(flow_keys) || err || retval != 1, "ipv6",
"err %d errno %d retval %d duration %d size %u/%lu\n",
err, errno, retval, duration, size, sizeof(flow_keys));
CHECK_FLOW_KEYS("ipv6_flow_keys", flow_keys, pkt_v6_flow_keys);
bpf_object__close(obj);
}
int main(void)
{
srand(time(NULL));
@ -1909,6 +1982,7 @@ int main(void)
test_reference_tracking();
test_queue_stack_map(QUEUE);
test_queue_stack_map(STACK);
test_flow_dissector();
printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, error_cnt);
return error_cnt ? EXIT_FAILURE : EXIT_SUCCESS;