forked from Minki/linux
7105e828c0
Currently a dump of an xlated prog (post verifier stage) doesn't correlate used helpers as well as maps. The prog info lists involved map ids, however there's no correlation of where in the program they are used as of today. Likewise, bpftool does not correlate helper calls with the target functions. The latter can be done w/o any kernel changes through kallsyms, and also has the advantage that this works with inlined helpers and BPF calls. Example, via interpreter: # tc filter show dev foo ingress filter protocol all pref 49152 bpf chain 0 filter protocol all pref 49152 bpf chain 0 handle 0x1 foo.o:[ingress] \ direct-action not_in_hw id 1 tag c74773051b364165 <-- prog id:1 * Output before patch (calls/maps remain unclear): # bpftool prog dump xlated id 1 <-- dump prog id:1 0: (b7) r1 = 2 1: (63) *(u32 *)(r10 -4) = r1 2: (bf) r2 = r10 3: (07) r2 += -4 4: (18) r1 = 0xffff95c47a8d4800 6: (85) call unknown#73040 7: (15) if r0 == 0x0 goto pc+18 8: (bf) r2 = r10 9: (07) r2 += -4 10: (bf) r1 = r0 11: (85) call unknown#73040 12: (15) if r0 == 0x0 goto pc+23 [...] * Output after patch: # bpftool prog dump xlated id 1 0: (b7) r1 = 2 1: (63) *(u32 *)(r10 -4) = r1 2: (bf) r2 = r10 3: (07) r2 += -4 4: (18) r1 = map[id:2] <-- map id:2 6: (85) call bpf_map_lookup_elem#73424 <-- helper call 7: (15) if r0 == 0x0 goto pc+18 8: (bf) r2 = r10 9: (07) r2 += -4 10: (bf) r1 = r0 11: (85) call bpf_map_lookup_elem#73424 12: (15) if r0 == 0x0 goto pc+23 [...] # bpftool map show id 2 <-- show/dump/etc map id:2 2: hash_of_maps flags 0x0 key 4B value 4B max_entries 3 memlock 4096B Example, JITed, same prog: # tc filter show dev foo ingress filter protocol all pref 49152 bpf chain 0 filter protocol all pref 49152 bpf chain 0 handle 0x1 foo.o:[ingress] \ direct-action not_in_hw id 3 tag c74773051b364165 jited # bpftool prog show id 3 3: sched_cls tag c74773051b364165 loaded_at Dec 19/13:48 uid 0 xlated 384B jited 257B memlock 4096B map_ids 2 # bpftool prog dump xlated id 3 0: (b7) r1 = 2 1: (63) *(u32 *)(r10 -4) = r1 2: (bf) r2 = r10 3: (07) r2 += -4 4: (18) r1 = map[id:2] <-- map id:2 6: (85) call __htab_map_lookup_elem#77408 <-+ inlined rewrite 7: (15) if r0 == 0x0 goto pc+2 | 8: (07) r0 += 56 | 9: (79) r0 = *(u64 *)(r0 +0) <-+ 10: (15) if r0 == 0x0 goto pc+24 11: (bf) r2 = r10 12: (07) r2 += -4 [...] Example, same prog, but kallsyms disabled (in that case we are also not allowed to pass any relative offsets, etc, so prog becomes pointer sanitized on dump): # sysctl kernel.kptr_restrict=2 kernel.kptr_restrict = 2 # bpftool prog dump xlated id 3 0: (b7) r1 = 2 1: (63) *(u32 *)(r10 -4) = r1 2: (bf) r2 = r10 3: (07) r2 += -4 4: (18) r1 = map[id:2] 6: (85) call bpf_unspec#0 7: (15) if r0 == 0x0 goto pc+2 [...] Example, BPF calls via interpreter: # bpftool prog dump xlated id 1 0: (85) call pc+2#__bpf_prog_run_args32 1: (b7) r0 = 1 2: (95) exit 3: (b7) r0 = 2 4: (95) exit Example, BPF calls via JIT: # sysctl net.core.bpf_jit_enable=1 net.core.bpf_jit_enable = 1 # sysctl net.core.bpf_jit_kallsyms=1 net.core.bpf_jit_kallsyms = 1 # bpftool prog dump xlated id 1 0: (85) call pc+2#bpf_prog_3b185187f1855c4c_F 1: (b7) r0 = 1 2: (95) exit 3: (b7) r0 = 2 4: (95) exit And finally, an example for tail calls that is now working as well wrt correlation: # bpftool prog dump xlated id 2 [...] 10: (b7) r2 = 8 11: (85) call bpf_trace_printk#-41312 12: (bf) r1 = r6 13: (18) r2 = map[id:1] 15: (b7) r3 = 0 16: (85) call bpf_tail_call#12 17: (b7) r1 = 42 18: (6b) *(u16 *)(r6 +46) = r1 19: (b7) r0 = 0 20: (95) exit # bpftool map show id 1 1: prog_array flags 0x0 key 4B value 4B max_entries 1 memlock 4096B Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
262 lines
7.3 KiB
C
262 lines
7.3 KiB
C
/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
|
|
* Copyright (c) 2016 Facebook
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of version 2 of the GNU General Public
|
|
* License as published by the Free Software Foundation.
|
|
*
|
|
* This program is distributed in the hope that it will be useful, but
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
* General Public License for more details.
|
|
*/
|
|
|
|
#include <linux/bpf.h>
|
|
|
|
#include "disasm.h"
|
|
|
|
#define __BPF_FUNC_STR_FN(x) [BPF_FUNC_ ## x] = __stringify(bpf_ ## x)
|
|
static const char * const func_id_str[] = {
|
|
__BPF_FUNC_MAPPER(__BPF_FUNC_STR_FN)
|
|
};
|
|
#undef __BPF_FUNC_STR_FN
|
|
|
|
static const char *__func_get_name(const struct bpf_insn_cbs *cbs,
|
|
const struct bpf_insn *insn,
|
|
char *buff, size_t len)
|
|
{
|
|
BUILD_BUG_ON(ARRAY_SIZE(func_id_str) != __BPF_FUNC_MAX_ID);
|
|
|
|
if (insn->src_reg != BPF_PSEUDO_CALL &&
|
|
insn->imm >= 0 && insn->imm < __BPF_FUNC_MAX_ID &&
|
|
func_id_str[insn->imm])
|
|
return func_id_str[insn->imm];
|
|
|
|
if (cbs && cbs->cb_call)
|
|
return cbs->cb_call(cbs->private_data, insn);
|
|
|
|
if (insn->src_reg == BPF_PSEUDO_CALL)
|
|
snprintf(buff, len, "%+d", insn->imm);
|
|
|
|
return buff;
|
|
}
|
|
|
|
static const char *__func_imm_name(const struct bpf_insn_cbs *cbs,
|
|
const struct bpf_insn *insn,
|
|
u64 full_imm, char *buff, size_t len)
|
|
{
|
|
if (cbs && cbs->cb_imm)
|
|
return cbs->cb_imm(cbs->private_data, insn, full_imm);
|
|
|
|
snprintf(buff, len, "0x%llx", (unsigned long long)full_imm);
|
|
return buff;
|
|
}
|
|
|
|
const char *func_id_name(int id)
|
|
{
|
|
if (id >= 0 && id < __BPF_FUNC_MAX_ID && func_id_str[id])
|
|
return func_id_str[id];
|
|
else
|
|
return "unknown";
|
|
}
|
|
|
|
const char *const bpf_class_string[8] = {
|
|
[BPF_LD] = "ld",
|
|
[BPF_LDX] = "ldx",
|
|
[BPF_ST] = "st",
|
|
[BPF_STX] = "stx",
|
|
[BPF_ALU] = "alu",
|
|
[BPF_JMP] = "jmp",
|
|
[BPF_RET] = "BUG",
|
|
[BPF_ALU64] = "alu64",
|
|
};
|
|
|
|
const char *const bpf_alu_string[16] = {
|
|
[BPF_ADD >> 4] = "+=",
|
|
[BPF_SUB >> 4] = "-=",
|
|
[BPF_MUL >> 4] = "*=",
|
|
[BPF_DIV >> 4] = "/=",
|
|
[BPF_OR >> 4] = "|=",
|
|
[BPF_AND >> 4] = "&=",
|
|
[BPF_LSH >> 4] = "<<=",
|
|
[BPF_RSH >> 4] = ">>=",
|
|
[BPF_NEG >> 4] = "neg",
|
|
[BPF_MOD >> 4] = "%=",
|
|
[BPF_XOR >> 4] = "^=",
|
|
[BPF_MOV >> 4] = "=",
|
|
[BPF_ARSH >> 4] = "s>>=",
|
|
[BPF_END >> 4] = "endian",
|
|
};
|
|
|
|
static const char *const bpf_ldst_string[] = {
|
|
[BPF_W >> 3] = "u32",
|
|
[BPF_H >> 3] = "u16",
|
|
[BPF_B >> 3] = "u8",
|
|
[BPF_DW >> 3] = "u64",
|
|
};
|
|
|
|
static const char *const bpf_jmp_string[16] = {
|
|
[BPF_JA >> 4] = "jmp",
|
|
[BPF_JEQ >> 4] = "==",
|
|
[BPF_JGT >> 4] = ">",
|
|
[BPF_JLT >> 4] = "<",
|
|
[BPF_JGE >> 4] = ">=",
|
|
[BPF_JLE >> 4] = "<=",
|
|
[BPF_JSET >> 4] = "&",
|
|
[BPF_JNE >> 4] = "!=",
|
|
[BPF_JSGT >> 4] = "s>",
|
|
[BPF_JSLT >> 4] = "s<",
|
|
[BPF_JSGE >> 4] = "s>=",
|
|
[BPF_JSLE >> 4] = "s<=",
|
|
[BPF_CALL >> 4] = "call",
|
|
[BPF_EXIT >> 4] = "exit",
|
|
};
|
|
|
|
static void print_bpf_end_insn(bpf_insn_print_t verbose,
|
|
struct bpf_verifier_env *env,
|
|
const struct bpf_insn *insn)
|
|
{
|
|
verbose(env, "(%02x) r%d = %s%d r%d\n", insn->code, insn->dst_reg,
|
|
BPF_SRC(insn->code) == BPF_TO_BE ? "be" : "le",
|
|
insn->imm, insn->dst_reg);
|
|
}
|
|
|
|
void print_bpf_insn(const struct bpf_insn_cbs *cbs,
|
|
struct bpf_verifier_env *env,
|
|
const struct bpf_insn *insn,
|
|
bool allow_ptr_leaks)
|
|
{
|
|
const bpf_insn_print_t verbose = cbs->cb_print;
|
|
u8 class = BPF_CLASS(insn->code);
|
|
|
|
if (class == BPF_ALU || class == BPF_ALU64) {
|
|
if (BPF_OP(insn->code) == BPF_END) {
|
|
if (class == BPF_ALU64)
|
|
verbose(env, "BUG_alu64_%02x\n", insn->code);
|
|
else
|
|
print_bpf_end_insn(verbose, env, insn);
|
|
} else if (BPF_OP(insn->code) == BPF_NEG) {
|
|
verbose(env, "(%02x) r%d = %s-r%d\n",
|
|
insn->code, insn->dst_reg,
|
|
class == BPF_ALU ? "(u32) " : "",
|
|
insn->dst_reg);
|
|
} else if (BPF_SRC(insn->code) == BPF_X) {
|
|
verbose(env, "(%02x) %sr%d %s %sr%d\n",
|
|
insn->code, class == BPF_ALU ? "(u32) " : "",
|
|
insn->dst_reg,
|
|
bpf_alu_string[BPF_OP(insn->code) >> 4],
|
|
class == BPF_ALU ? "(u32) " : "",
|
|
insn->src_reg);
|
|
} else {
|
|
verbose(env, "(%02x) %sr%d %s %s%d\n",
|
|
insn->code, class == BPF_ALU ? "(u32) " : "",
|
|
insn->dst_reg,
|
|
bpf_alu_string[BPF_OP(insn->code) >> 4],
|
|
class == BPF_ALU ? "(u32) " : "",
|
|
insn->imm);
|
|
}
|
|
} else if (class == BPF_STX) {
|
|
if (BPF_MODE(insn->code) == BPF_MEM)
|
|
verbose(env, "(%02x) *(%s *)(r%d %+d) = r%d\n",
|
|
insn->code,
|
|
bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
|
|
insn->dst_reg,
|
|
insn->off, insn->src_reg);
|
|
else if (BPF_MODE(insn->code) == BPF_XADD)
|
|
verbose(env, "(%02x) lock *(%s *)(r%d %+d) += r%d\n",
|
|
insn->code,
|
|
bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
|
|
insn->dst_reg, insn->off,
|
|
insn->src_reg);
|
|
else
|
|
verbose(env, "BUG_%02x\n", insn->code);
|
|
} else if (class == BPF_ST) {
|
|
if (BPF_MODE(insn->code) != BPF_MEM) {
|
|
verbose(env, "BUG_st_%02x\n", insn->code);
|
|
return;
|
|
}
|
|
verbose(env, "(%02x) *(%s *)(r%d %+d) = %d\n",
|
|
insn->code,
|
|
bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
|
|
insn->dst_reg,
|
|
insn->off, insn->imm);
|
|
} else if (class == BPF_LDX) {
|
|
if (BPF_MODE(insn->code) != BPF_MEM) {
|
|
verbose(env, "BUG_ldx_%02x\n", insn->code);
|
|
return;
|
|
}
|
|
verbose(env, "(%02x) r%d = *(%s *)(r%d %+d)\n",
|
|
insn->code, insn->dst_reg,
|
|
bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
|
|
insn->src_reg, insn->off);
|
|
} else if (class == BPF_LD) {
|
|
if (BPF_MODE(insn->code) == BPF_ABS) {
|
|
verbose(env, "(%02x) r0 = *(%s *)skb[%d]\n",
|
|
insn->code,
|
|
bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
|
|
insn->imm);
|
|
} else if (BPF_MODE(insn->code) == BPF_IND) {
|
|
verbose(env, "(%02x) r0 = *(%s *)skb[r%d + %d]\n",
|
|
insn->code,
|
|
bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
|
|
insn->src_reg, insn->imm);
|
|
} else if (BPF_MODE(insn->code) == BPF_IMM &&
|
|
BPF_SIZE(insn->code) == BPF_DW) {
|
|
/* At this point, we already made sure that the second
|
|
* part of the ldimm64 insn is accessible.
|
|
*/
|
|
u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
|
|
bool map_ptr = insn->src_reg == BPF_PSEUDO_MAP_FD;
|
|
char tmp[64];
|
|
|
|
if (map_ptr && !allow_ptr_leaks)
|
|
imm = 0;
|
|
|
|
verbose(env, "(%02x) r%d = %s\n",
|
|
insn->code, insn->dst_reg,
|
|
__func_imm_name(cbs, insn, imm,
|
|
tmp, sizeof(tmp)));
|
|
} else {
|
|
verbose(env, "BUG_ld_%02x\n", insn->code);
|
|
return;
|
|
}
|
|
} else if (class == BPF_JMP) {
|
|
u8 opcode = BPF_OP(insn->code);
|
|
|
|
if (opcode == BPF_CALL) {
|
|
char tmp[64];
|
|
|
|
if (insn->src_reg == BPF_PSEUDO_CALL) {
|
|
verbose(env, "(%02x) call pc%s\n",
|
|
insn->code,
|
|
__func_get_name(cbs, insn,
|
|
tmp, sizeof(tmp)));
|
|
} else {
|
|
strcpy(tmp, "unknown");
|
|
verbose(env, "(%02x) call %s#%d\n", insn->code,
|
|
__func_get_name(cbs, insn,
|
|
tmp, sizeof(tmp)),
|
|
insn->imm);
|
|
}
|
|
} else if (insn->code == (BPF_JMP | BPF_JA)) {
|
|
verbose(env, "(%02x) goto pc%+d\n",
|
|
insn->code, insn->off);
|
|
} else if (insn->code == (BPF_JMP | BPF_EXIT)) {
|
|
verbose(env, "(%02x) exit\n", insn->code);
|
|
} else if (BPF_SRC(insn->code) == BPF_X) {
|
|
verbose(env, "(%02x) if r%d %s r%d goto pc%+d\n",
|
|
insn->code, insn->dst_reg,
|
|
bpf_jmp_string[BPF_OP(insn->code) >> 4],
|
|
insn->src_reg, insn->off);
|
|
} else {
|
|
verbose(env, "(%02x) if r%d %s 0x%x goto pc%+d\n",
|
|
insn->code, insn->dst_reg,
|
|
bpf_jmp_string[BPF_OP(insn->code) >> 4],
|
|
insn->imm, insn->off);
|
|
}
|
|
} else {
|
|
verbose(env, "(%02x) %s\n",
|
|
insn->code, bpf_class_string[class]);
|
|
}
|
|
}
|