Merge branch 'bpf-prog-digest'

Daniel Borkmann says:

====================
Minor BPF cleanups and digest

First two patches are minor cleanups, and the third one adds
a prog digest. For details, please see individual patches.
After this one, I have a set with tracepoint support that makes
use of this facility as well.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2016-12-05 15:33:11 -05:00
commit 5397b31bce
10 changed files with 137 additions and 30 deletions

View File

@ -216,6 +216,7 @@ u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5);
u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
void bpf_prog_calc_digest(struct bpf_prog *fp);
const struct bpf_func_proto *bpf_get_trace_printk_proto(void);

View File

@ -14,6 +14,7 @@
#include <linux/workqueue.h>
#include <linux/sched.h>
#include <linux/capability.h>
#include <linux/cryptohash.h>
#include <net/sch_generic.h>
@ -56,6 +57,9 @@ struct bpf_prog_aux;
/* BPF program can access up to 512 bytes of stack space. */
#define MAX_BPF_STACK 512
/* Maximum BPF program size in bytes. */
#define MAX_BPF_SIZE (BPF_MAXINSNS * sizeof(struct bpf_insn))
/* Helper macros for filter block array initializers. */
/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
@ -404,8 +408,9 @@ struct bpf_prog {
cb_access:1, /* Is control block accessed? */
dst_needed:1; /* Do we need dst entry? */
kmemcheck_bitfield_end(meta);
u32 len; /* Number of filter blocks */
enum bpf_prog_type type; /* Type of BPF program */
u32 len; /* Number of filter blocks */
u32 digest[SHA_DIGEST_WORDS]; /* Program digest */
struct bpf_prog_aux *aux; /* Auxiliary fields */
struct sock_fprog_kern *orig_prog; /* Original BPF program */
unsigned int (*bpf_func)(const void *ctx,

View File

@ -397,6 +397,7 @@ enum {
TCA_BPF_NAME,
TCA_BPF_FLAGS,
TCA_BPF_FLAGS_GEN,
TCA_BPF_DIGEST,
__TCA_BPF_MAX,
};

View File

@ -27,6 +27,7 @@ enum {
TCA_ACT_BPF_FD,
TCA_ACT_BPF_NAME,
TCA_ACT_BPF_PAD,
TCA_ACT_BPF_DIGEST,
__TCA_ACT_BPF_MAX,
};
#define TCA_ACT_BPF_MAX (__TCA_ACT_BPF_MAX - 1)

View File

@ -136,6 +136,71 @@ void __bpf_prog_free(struct bpf_prog *fp)
vfree(fp);
}
#define SHA_BPF_RAW_SIZE \
round_up(MAX_BPF_SIZE + sizeof(__be64) + 1, SHA_MESSAGE_BYTES)
/* Called under verifier mutex. */
void bpf_prog_calc_digest(struct bpf_prog *fp)
{
const u32 bits_offset = SHA_MESSAGE_BYTES - sizeof(__be64);
static u32 ws[SHA_WORKSPACE_WORDS];
static u8 raw[SHA_BPF_RAW_SIZE];
struct bpf_insn *dst = (void *)raw;
u32 i, bsize, psize, blocks;
bool was_ld_map;
u8 *todo = raw;
__be32 *result;
__be64 *bits;
sha_init(fp->digest);
memset(ws, 0, sizeof(ws));
/* We need to take out the map fd for the digest calculation
* since they are unstable from user space side.
*/
for (i = 0, was_ld_map = false; i < fp->len; i++) {
dst[i] = fp->insnsi[i];
if (!was_ld_map &&
dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
dst[i].src_reg == BPF_PSEUDO_MAP_FD) {
was_ld_map = true;
dst[i].imm = 0;
} else if (was_ld_map &&
dst[i].code == 0 &&
dst[i].dst_reg == 0 &&
dst[i].src_reg == 0 &&
dst[i].off == 0) {
was_ld_map = false;
dst[i].imm = 0;
} else {
was_ld_map = false;
}
}
psize = fp->len * sizeof(struct bpf_insn);
memset(&raw[psize], 0, sizeof(raw) - psize);
raw[psize++] = 0x80;
bsize = round_up(psize, SHA_MESSAGE_BYTES);
blocks = bsize / SHA_MESSAGE_BYTES;
if (bsize - psize >= sizeof(__be64)) {
bits = (__be64 *)(todo + bsize - sizeof(__be64));
} else {
bits = (__be64 *)(todo + bsize + bits_offset);
blocks++;
}
*bits = cpu_to_be64((psize - 1) << 3);
while (blocks--) {
sha_transform(fp->digest, todo, ws);
todo += SHA_MESSAGE_BYTES;
}
result = (__force __be32 *)fp->digest;
for (i = 0; i < SHA_DIGEST_WORDS; i++)
result[i] = cpu_to_be32(fp->digest[i]);
}
static bool bpf_is_jmp_and_has_target(const struct bpf_insn *insn)
{
return BPF_CLASS(insn->code) == BPF_JMP &&

View File

@ -662,8 +662,30 @@ static int bpf_prog_release(struct inode *inode, struct file *filp)
return 0;
}
#ifdef CONFIG_PROC_FS
static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
{
const struct bpf_prog *prog = filp->private_data;
char prog_digest[sizeof(prog->digest) * 2 + 1] = { };
bin2hex(prog_digest, prog->digest, sizeof(prog->digest));
seq_printf(m,
"prog_type:\t%u\n"
"prog_jited:\t%u\n"
"prog_digest:\t%s\n"
"memlock:\t%llu\n",
prog->type,
prog->jited,
prog_digest,
prog->pages * 1ULL << PAGE_SHIFT);
}
#endif
static const struct file_operations bpf_prog_fops = {
.release = bpf_prog_release,
#ifdef CONFIG_PROC_FS
.show_fdinfo = bpf_prog_show_fdinfo,
#endif
.release = bpf_prog_release,
};
int bpf_prog_new_fd(struct bpf_prog *prog)

View File

@ -3176,6 +3176,8 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
log_level = 0;
}
bpf_prog_calc_digest(env->prog);
ret = replace_map_fd_with_map_ptr(env);
if (ret < 0)
goto skip_full_check;

View File

@ -2748,7 +2748,7 @@ lwt_xmit_func_proto(enum bpf_func_id func_id)
}
}
static bool __is_valid_access(int off, int size, enum bpf_access_type type)
static bool __is_valid_access(int off, int size)
{
if (off < 0 || off >= sizeof(struct __sk_buff))
return false;
@ -2782,7 +2782,7 @@ static bool sk_filter_is_valid_access(int off, int size,
}
}
return __is_valid_access(off, size, type);
return __is_valid_access(off, size);
}
static bool lwt_is_valid_access(int off, int size,
@ -2815,7 +2815,7 @@ static bool lwt_is_valid_access(int off, int size,
break;
}
return __is_valid_access(off, size, type);
return __is_valid_access(off, size);
}
static bool sock_filter_is_valid_access(int off, int size,
@ -2833,11 +2833,9 @@ static bool sock_filter_is_valid_access(int off, int size,
if (off < 0 || off + size > sizeof(struct bpf_sock))
return false;
/* The verifier guarantees that size > 0. */
if (off % size != 0)
return false;
if (size != sizeof(__u32))
return false;
@ -2910,11 +2908,10 @@ static bool tc_cls_act_is_valid_access(int off, int size,
break;
}
return __is_valid_access(off, size, type);
return __is_valid_access(off, size);
}
static bool __is_valid_xdp_access(int off, int size,
enum bpf_access_type type)
static bool __is_valid_xdp_access(int off, int size)
{
if (off < 0 || off >= sizeof(struct xdp_md))
return false;
@ -2942,7 +2939,7 @@ static bool xdp_is_valid_access(int off, int size,
break;
}
return __is_valid_xdp_access(off, size, type);
return __is_valid_xdp_access(off, size);
}
void bpf_warn_invalid_xdp_action(u32 act)

View File

@ -117,10 +117,19 @@ static int tcf_bpf_dump_bpf_info(const struct tcf_bpf *prog,
static int tcf_bpf_dump_ebpf_info(const struct tcf_bpf *prog,
struct sk_buff *skb)
{
struct nlattr *nla;
if (prog->bpf_name &&
nla_put_string(skb, TCA_ACT_BPF_NAME, prog->bpf_name))
return -EMSGSIZE;
nla = nla_reserve(skb, TCA_ACT_BPF_DIGEST,
sizeof(prog->filter->digest));
if (nla == NULL)
return -EMSGSIZE;
memcpy(nla_data(nla), prog->filter->digest, nla_len(nla));
return 0;
}

View File

@ -241,7 +241,7 @@ static int cls_bpf_init(struct tcf_proto *tp)
return 0;
}
static void cls_bpf_delete_prog(struct tcf_proto *tp, struct cls_bpf_prog *prog)
static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog)
{
tcf_exts_destroy(&prog->exts);
@ -255,22 +255,22 @@ static void cls_bpf_delete_prog(struct tcf_proto *tp, struct cls_bpf_prog *prog)
kfree(prog);
}
static void __cls_bpf_delete_prog(struct rcu_head *rcu)
static void cls_bpf_delete_prog_rcu(struct rcu_head *rcu)
{
struct cls_bpf_prog *prog = container_of(rcu, struct cls_bpf_prog, rcu);
__cls_bpf_delete_prog(container_of(rcu, struct cls_bpf_prog, rcu));
}
cls_bpf_delete_prog(prog->tp, prog);
static void __cls_bpf_delete(struct tcf_proto *tp, struct cls_bpf_prog *prog)
{
cls_bpf_stop_offload(tp, prog);
list_del_rcu(&prog->link);
tcf_unbind_filter(tp, &prog->res);
call_rcu(&prog->rcu, cls_bpf_delete_prog_rcu);
}
static int cls_bpf_delete(struct tcf_proto *tp, unsigned long arg)
{
struct cls_bpf_prog *prog = (struct cls_bpf_prog *) arg;
cls_bpf_stop_offload(tp, prog);
list_del_rcu(&prog->link);
tcf_unbind_filter(tp, &prog->res);
call_rcu(&prog->rcu, __cls_bpf_delete_prog);
__cls_bpf_delete(tp, (struct cls_bpf_prog *) arg);
return 0;
}
@ -282,12 +282,8 @@ static bool cls_bpf_destroy(struct tcf_proto *tp, bool force)
if (!force && !list_empty(&head->plist))
return false;
list_for_each_entry_safe(prog, tmp, &head->plist, link) {
cls_bpf_stop_offload(tp, prog);
list_del_rcu(&prog->link);
tcf_unbind_filter(tp, &prog->res);
call_rcu(&prog->rcu, __cls_bpf_delete_prog);
}
list_for_each_entry_safe(prog, tmp, &head->plist, link)
__cls_bpf_delete(tp, prog);
kfree_rcu(head, rcu);
return true;
@ -511,14 +507,14 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
ret = cls_bpf_offload(tp, prog, oldprog);
if (ret) {
cls_bpf_delete_prog(tp, prog);
__cls_bpf_delete_prog(prog);
return ret;
}
if (oldprog) {
list_replace_rcu(&oldprog->link, &prog->link);
tcf_unbind_filter(tp, &oldprog->res);
call_rcu(&oldprog->rcu, __cls_bpf_delete_prog);
call_rcu(&oldprog->rcu, cls_bpf_delete_prog_rcu);
} else {
list_add_rcu(&prog->link, &head->plist);
}
@ -553,10 +549,18 @@ static int cls_bpf_dump_bpf_info(const struct cls_bpf_prog *prog,
static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog *prog,
struct sk_buff *skb)
{
struct nlattr *nla;
if (prog->bpf_name &&
nla_put_string(skb, TCA_BPF_NAME, prog->bpf_name))
return -EMSGSIZE;
nla = nla_reserve(skb, TCA_BPF_DIGEST, sizeof(prog->filter->digest));
if (nla == NULL)
return -EMSGSIZE;
memcpy(nla_data(nla), prog->filter->digest, nla_len(nla));
return 0;
}