selftests/bpf: Add negative tests for new nf_conntrack kfuncs
Test cases we care about and ensure improper usage is caught and rejected by the verifier. Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com> Link: https://lore.kernel.org/r/20220721134245.2450-13-memxor@gmail.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
parent
6eb7fba007
commit
c6f420ac9d
@ -2,13 +2,29 @@
|
||||
#include <test_progs.h>
|
||||
#include <network_helpers.h>
|
||||
#include "test_bpf_nf.skel.h"
|
||||
#include "test_bpf_nf_fail.skel.h"
|
||||
|
||||
static char log_buf[1024 * 1024];
|
||||
|
||||
struct {
|
||||
const char *prog_name;
|
||||
const char *err_msg;
|
||||
} test_bpf_nf_fail_tests[] = {
|
||||
{ "alloc_release", "kernel function bpf_ct_release args#0 expected pointer to STRUCT nf_conn but" },
|
||||
{ "insert_insert", "kernel function bpf_ct_insert_entry args#0 expected pointer to STRUCT nf_conn___init but" },
|
||||
{ "lookup_insert", "kernel function bpf_ct_insert_entry args#0 expected pointer to STRUCT nf_conn___init but" },
|
||||
{ "set_timeout_after_insert", "kernel function bpf_ct_set_timeout args#0 expected pointer to STRUCT nf_conn___init but" },
|
||||
{ "set_status_after_insert", "kernel function bpf_ct_set_status args#0 expected pointer to STRUCT nf_conn___init but" },
|
||||
{ "change_timeout_after_alloc", "kernel function bpf_ct_change_timeout args#0 expected pointer to STRUCT nf_conn but" },
|
||||
{ "change_status_after_alloc", "kernel function bpf_ct_change_status args#0 expected pointer to STRUCT nf_conn but" },
|
||||
};
|
||||
|
||||
enum {
|
||||
TEST_XDP,
|
||||
TEST_TC_BPF,
|
||||
};
|
||||
|
||||
void test_bpf_nf_ct(int mode)
|
||||
static void test_bpf_nf_ct(int mode)
|
||||
{
|
||||
struct test_bpf_nf *skel;
|
||||
int prog_fd, err;
|
||||
@ -51,10 +67,48 @@ end:
|
||||
test_bpf_nf__destroy(skel);
|
||||
}
|
||||
|
||||
static void test_bpf_nf_ct_fail(const char *prog_name, const char *err_msg)
|
||||
{
|
||||
LIBBPF_OPTS(bpf_object_open_opts, opts, .kernel_log_buf = log_buf,
|
||||
.kernel_log_size = sizeof(log_buf),
|
||||
.kernel_log_level = 1);
|
||||
struct test_bpf_nf_fail *skel;
|
||||
struct bpf_program *prog;
|
||||
int ret;
|
||||
|
||||
skel = test_bpf_nf_fail__open_opts(&opts);
|
||||
if (!ASSERT_OK_PTR(skel, "test_bpf_nf_fail__open"))
|
||||
return;
|
||||
|
||||
prog = bpf_object__find_program_by_name(skel->obj, prog_name);
|
||||
if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
|
||||
goto end;
|
||||
|
||||
bpf_program__set_autoload(prog, true);
|
||||
|
||||
ret = test_bpf_nf_fail__load(skel);
|
||||
if (!ASSERT_ERR(ret, "test_bpf_nf_fail__load must fail"))
|
||||
goto end;
|
||||
|
||||
if (!ASSERT_OK_PTR(strstr(log_buf, err_msg), "expected error message")) {
|
||||
fprintf(stderr, "Expected: %s\n", err_msg);
|
||||
fprintf(stderr, "Verifier: %s\n", log_buf);
|
||||
}
|
||||
|
||||
end:
|
||||
test_bpf_nf_fail__destroy(skel);
|
||||
}
|
||||
|
||||
void test_bpf_nf(void)
|
||||
{
|
||||
int i;
|
||||
if (test__start_subtest("xdp-ct"))
|
||||
test_bpf_nf_ct(TEST_XDP);
|
||||
if (test__start_subtest("tc-bpf-ct"))
|
||||
test_bpf_nf_ct(TEST_TC_BPF);
|
||||
for (i = 0; i < ARRAY_SIZE(test_bpf_nf_fail_tests); i++) {
|
||||
if (test__start_subtest(test_bpf_nf_fail_tests[i].prog_name))
|
||||
test_bpf_nf_ct_fail(test_bpf_nf_fail_tests[i].prog_name,
|
||||
test_bpf_nf_fail_tests[i].err_msg);
|
||||
}
|
||||
}
|
||||
|
134
tools/testing/selftests/bpf/progs/test_bpf_nf_fail.c
Normal file
134
tools/testing/selftests/bpf/progs/test_bpf_nf_fail.c
Normal file
@ -0,0 +1,134 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <vmlinux.h>
|
||||
#include <bpf/bpf_tracing.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include <bpf/bpf_core_read.h>
|
||||
|
||||
struct nf_conn;
|
||||
|
||||
struct bpf_ct_opts___local {
|
||||
s32 netns_id;
|
||||
s32 error;
|
||||
u8 l4proto;
|
||||
u8 reserved[3];
|
||||
} __attribute__((preserve_access_index));
|
||||
|
||||
struct nf_conn *bpf_skb_ct_alloc(struct __sk_buff *, struct bpf_sock_tuple *, u32,
|
||||
struct bpf_ct_opts___local *, u32) __ksym;
|
||||
struct nf_conn *bpf_skb_ct_lookup(struct __sk_buff *, struct bpf_sock_tuple *, u32,
|
||||
struct bpf_ct_opts___local *, u32) __ksym;
|
||||
struct nf_conn *bpf_ct_insert_entry(struct nf_conn *) __ksym;
|
||||
void bpf_ct_release(struct nf_conn *) __ksym;
|
||||
void bpf_ct_set_timeout(struct nf_conn *, u32) __ksym;
|
||||
int bpf_ct_change_timeout(struct nf_conn *, u32) __ksym;
|
||||
int bpf_ct_set_status(struct nf_conn *, u32) __ksym;
|
||||
int bpf_ct_change_status(struct nf_conn *, u32) __ksym;
|
||||
|
||||
SEC("?tc")
|
||||
int alloc_release(struct __sk_buff *ctx)
|
||||
{
|
||||
struct bpf_ct_opts___local opts = {};
|
||||
struct bpf_sock_tuple tup = {};
|
||||
struct nf_conn *ct;
|
||||
|
||||
ct = bpf_skb_ct_alloc(ctx, &tup, sizeof(tup.ipv4), &opts, sizeof(opts));
|
||||
if (!ct)
|
||||
return 0;
|
||||
bpf_ct_release(ct);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("?tc")
|
||||
int insert_insert(struct __sk_buff *ctx)
|
||||
{
|
||||
struct bpf_ct_opts___local opts = {};
|
||||
struct bpf_sock_tuple tup = {};
|
||||
struct nf_conn *ct;
|
||||
|
||||
ct = bpf_skb_ct_alloc(ctx, &tup, sizeof(tup.ipv4), &opts, sizeof(opts));
|
||||
if (!ct)
|
||||
return 0;
|
||||
ct = bpf_ct_insert_entry(ct);
|
||||
if (!ct)
|
||||
return 0;
|
||||
ct = bpf_ct_insert_entry(ct);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("?tc")
|
||||
int lookup_insert(struct __sk_buff *ctx)
|
||||
{
|
||||
struct bpf_ct_opts___local opts = {};
|
||||
struct bpf_sock_tuple tup = {};
|
||||
struct nf_conn *ct;
|
||||
|
||||
ct = bpf_skb_ct_lookup(ctx, &tup, sizeof(tup.ipv4), &opts, sizeof(opts));
|
||||
if (!ct)
|
||||
return 0;
|
||||
bpf_ct_insert_entry(ct);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("?tc")
|
||||
int set_timeout_after_insert(struct __sk_buff *ctx)
|
||||
{
|
||||
struct bpf_ct_opts___local opts = {};
|
||||
struct bpf_sock_tuple tup = {};
|
||||
struct nf_conn *ct;
|
||||
|
||||
ct = bpf_skb_ct_alloc(ctx, &tup, sizeof(tup.ipv4), &opts, sizeof(opts));
|
||||
if (!ct)
|
||||
return 0;
|
||||
ct = bpf_ct_insert_entry(ct);
|
||||
if (!ct)
|
||||
return 0;
|
||||
bpf_ct_set_timeout(ct, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("?tc")
|
||||
int set_status_after_insert(struct __sk_buff *ctx)
|
||||
{
|
||||
struct bpf_ct_opts___local opts = {};
|
||||
struct bpf_sock_tuple tup = {};
|
||||
struct nf_conn *ct;
|
||||
|
||||
ct = bpf_skb_ct_alloc(ctx, &tup, sizeof(tup.ipv4), &opts, sizeof(opts));
|
||||
if (!ct)
|
||||
return 0;
|
||||
ct = bpf_ct_insert_entry(ct);
|
||||
if (!ct)
|
||||
return 0;
|
||||
bpf_ct_set_status(ct, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("?tc")
|
||||
int change_timeout_after_alloc(struct __sk_buff *ctx)
|
||||
{
|
||||
struct bpf_ct_opts___local opts = {};
|
||||
struct bpf_sock_tuple tup = {};
|
||||
struct nf_conn *ct;
|
||||
|
||||
ct = bpf_skb_ct_alloc(ctx, &tup, sizeof(tup.ipv4), &opts, sizeof(opts));
|
||||
if (!ct)
|
||||
return 0;
|
||||
bpf_ct_change_timeout(ct, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("?tc")
|
||||
int change_status_after_alloc(struct __sk_buff *ctx)
|
||||
{
|
||||
struct bpf_ct_opts___local opts = {};
|
||||
struct bpf_sock_tuple tup = {};
|
||||
struct nf_conn *ct;
|
||||
|
||||
ct = bpf_skb_ct_alloc(ctx, &tup, sizeof(tup.ipv4), &opts, sizeof(opts));
|
||||
if (!ct)
|
||||
return 0;
|
||||
bpf_ct_change_status(ct, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
Loading…
Reference in New Issue
Block a user