mirror of
https://github.com/torvalds/linux.git
synced 2024-11-26 14:12:06 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Alexei Starovoitov says: ==================== pull-request: bpf 2020-08-21 The following pull-request contains BPF updates for your *net* tree. We've added 11 non-merge commits during the last 5 day(s) which contain a total of 12 files changed, 78 insertions(+), 24 deletions(-). The main changes are: 1) three fixes in BPF task iterator logic, from Yonghong. 2) fix for compressed dwarf sections in vmlinux, from Jiri. 3) fix xdp attach regression, from Andrii. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
4af7b32f84
@ -767,7 +767,7 @@ union bpf_attr {
|
||||
*
|
||||
* Also, note that **bpf_trace_printk**\ () is slow, and should
|
||||
* only be used for debugging purposes. For this reason, a notice
|
||||
* bloc (spanning several lines) is printed to kernel logs and
|
||||
* block (spanning several lines) is printed to kernel logs and
|
||||
* states that the helper should not be used "for production use"
|
||||
* the first time this helper is used (or more precisely, when
|
||||
* **trace_printk**\ () buffers are allocated). For passing values
|
||||
@ -1033,14 +1033,14 @@ union bpf_attr {
|
||||
*
|
||||
* int ret;
|
||||
* struct bpf_tunnel_key key = {};
|
||||
*
|
||||
*
|
||||
* ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0);
|
||||
* if (ret < 0)
|
||||
* return TC_ACT_SHOT; // drop packet
|
||||
*
|
||||
*
|
||||
* if (key.remote_ipv4 != 0x0a000001)
|
||||
* return TC_ACT_SHOT; // drop packet
|
||||
*
|
||||
*
|
||||
* return TC_ACT_OK; // accept packet
|
||||
*
|
||||
* This interface can also be used with all encapsulation devices
|
||||
@ -1147,7 +1147,7 @@ union bpf_attr {
|
||||
* Description
|
||||
* Retrieve the realm or the route, that is to say the
|
||||
* **tclassid** field of the destination for the *skb*. The
|
||||
* indentifier retrieved is a user-provided tag, similar to the
|
||||
* identifier retrieved is a user-provided tag, similar to the
|
||||
* one used with the net_cls cgroup (see description for
|
||||
* **bpf_get_cgroup_classid**\ () helper), but here this tag is
|
||||
* held by a route (a destination entry), not by a task.
|
||||
|
@ -67,6 +67,9 @@ static void bpf_iter_done_stop(struct seq_file *seq)
|
||||
iter_priv->done_stop = true;
|
||||
}
|
||||
|
||||
/* maximum visited objects before bailing out */
|
||||
#define MAX_ITER_OBJECTS 1000000
|
||||
|
||||
/* bpf_seq_read, a customized and simpler version for bpf iterator.
|
||||
* no_llseek is assumed for this file.
|
||||
* The following are differences from seq_read():
|
||||
@ -79,7 +82,7 @@ static ssize_t bpf_seq_read(struct file *file, char __user *buf, size_t size,
|
||||
{
|
||||
struct seq_file *seq = file->private_data;
|
||||
size_t n, offs, copied = 0;
|
||||
int err = 0;
|
||||
int err = 0, num_objs = 0;
|
||||
void *p;
|
||||
|
||||
mutex_lock(&seq->lock);
|
||||
@ -135,6 +138,7 @@ static ssize_t bpf_seq_read(struct file *file, char __user *buf, size_t size,
|
||||
while (1) {
|
||||
loff_t pos = seq->index;
|
||||
|
||||
num_objs++;
|
||||
offs = seq->count;
|
||||
p = seq->op->next(seq, p, &seq->index);
|
||||
if (pos == seq->index) {
|
||||
@ -153,6 +157,15 @@ static ssize_t bpf_seq_read(struct file *file, char __user *buf, size_t size,
|
||||
if (seq->count >= size)
|
||||
break;
|
||||
|
||||
if (num_objs >= MAX_ITER_OBJECTS) {
|
||||
if (offs == 0) {
|
||||
err = -EAGAIN;
|
||||
seq->op->stop(seq, p);
|
||||
goto done;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
err = seq->op->show(seq, p);
|
||||
if (err > 0) {
|
||||
bpf_iter_dec_seq_num(seq);
|
||||
|
@ -29,8 +29,9 @@ static struct task_struct *task_seq_get_next(struct pid_namespace *ns,
|
||||
|
||||
rcu_read_lock();
|
||||
retry:
|
||||
pid = idr_get_next(&ns->idr, tid);
|
||||
pid = find_ge_pid(*tid, ns);
|
||||
if (pid) {
|
||||
*tid = pid_nr_ns(pid, ns);
|
||||
task = get_pid_task(pid, PIDTYPE_PID);
|
||||
if (!task) {
|
||||
++*tid;
|
||||
@ -178,10 +179,11 @@ again:
|
||||
f = fcheck_files(curr_files, curr_fd);
|
||||
if (!f)
|
||||
continue;
|
||||
if (!get_file_rcu(f))
|
||||
continue;
|
||||
|
||||
/* set info->fd */
|
||||
info->fd = curr_fd;
|
||||
get_file(f);
|
||||
rcu_read_unlock();
|
||||
return f;
|
||||
}
|
||||
|
@ -8742,13 +8742,15 @@ struct bpf_xdp_link {
|
||||
int flags;
|
||||
};
|
||||
|
||||
static enum bpf_xdp_mode dev_xdp_mode(u32 flags)
|
||||
static enum bpf_xdp_mode dev_xdp_mode(struct net_device *dev, u32 flags)
|
||||
{
|
||||
if (flags & XDP_FLAGS_HW_MODE)
|
||||
return XDP_MODE_HW;
|
||||
if (flags & XDP_FLAGS_DRV_MODE)
|
||||
return XDP_MODE_DRV;
|
||||
return XDP_MODE_SKB;
|
||||
if (flags & XDP_FLAGS_SKB_MODE)
|
||||
return XDP_MODE_SKB;
|
||||
return dev->netdev_ops->ndo_bpf ? XDP_MODE_DRV : XDP_MODE_SKB;
|
||||
}
|
||||
|
||||
static bpf_op_t dev_xdp_bpf_op(struct net_device *dev, enum bpf_xdp_mode mode)
|
||||
@ -8896,7 +8898,7 @@ static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mode = dev_xdp_mode(flags);
|
||||
mode = dev_xdp_mode(dev, flags);
|
||||
/* can't replace attached link */
|
||||
if (dev_xdp_link(dev, mode)) {
|
||||
NL_SET_ERR_MSG(extack, "Can't replace active BPF XDP link");
|
||||
@ -8984,7 +8986,7 @@ static int dev_xdp_detach_link(struct net_device *dev,
|
||||
|
||||
ASSERT_RTNL();
|
||||
|
||||
mode = dev_xdp_mode(link->flags);
|
||||
mode = dev_xdp_mode(dev, link->flags);
|
||||
if (dev_xdp_link(dev, mode) != link)
|
||||
return -EINVAL;
|
||||
|
||||
@ -9080,7 +9082,7 @@ static int bpf_xdp_link_update(struct bpf_link *link, struct bpf_prog *new_prog,
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
mode = dev_xdp_mode(xdp_link->flags);
|
||||
mode = dev_xdp_mode(xdp_link->dev, xdp_link->flags);
|
||||
bpf_op = dev_xdp_bpf_op(xdp_link->dev, mode);
|
||||
err = dev_xdp_install(xdp_link->dev, mode, bpf_op, NULL,
|
||||
xdp_link->flags, new_prog);
|
||||
@ -9164,7 +9166,7 @@ out_put_dev:
|
||||
int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
|
||||
int fd, int expected_fd, u32 flags)
|
||||
{
|
||||
enum bpf_xdp_mode mode = dev_xdp_mode(flags);
|
||||
enum bpf_xdp_mode mode = dev_xdp_mode(dev, flags);
|
||||
struct bpf_prog *new_prog = NULL, *old_prog = NULL;
|
||||
int err;
|
||||
|
||||
|
@ -134,6 +134,8 @@ int build_obj_refs_table(struct obj_refs_table *table, enum bpf_obj_type type)
|
||||
while (true) {
|
||||
ret = read(fd, buf, sizeof(buf));
|
||||
if (ret < 0) {
|
||||
if (errno == EAGAIN)
|
||||
continue;
|
||||
err = -errno;
|
||||
p_err("failed to read PID iterator output: %d", err);
|
||||
goto out;
|
||||
|
@ -233,6 +233,39 @@ static struct btf_id *add_symbol(struct rb_root *root, char *name, size_t size)
|
||||
return btf_id__add(root, id, false);
|
||||
}
|
||||
|
||||
/*
|
||||
* The data of compressed section should be aligned to 4
|
||||
* (for 32bit) or 8 (for 64 bit) bytes. The binutils ld
|
||||
* sets sh_addralign to 1, which makes libelf fail with
|
||||
* misaligned section error during the update:
|
||||
* FAILED elf_update(WRITE): invalid section alignment
|
||||
*
|
||||
* While waiting for ld fix, we fix the compressed sections
|
||||
* sh_addralign value manualy.
|
||||
*/
|
||||
static int compressed_section_fix(Elf *elf, Elf_Scn *scn, GElf_Shdr *sh)
|
||||
{
|
||||
int expected = gelf_getclass(elf) == ELFCLASS32 ? 4 : 8;
|
||||
|
||||
if (!(sh->sh_flags & SHF_COMPRESSED))
|
||||
return 0;
|
||||
|
||||
if (sh->sh_addralign == expected)
|
||||
return 0;
|
||||
|
||||
pr_debug2(" - fixing wrong alignment sh_addralign %u, expected %u\n",
|
||||
sh->sh_addralign, expected);
|
||||
|
||||
sh->sh_addralign = expected;
|
||||
|
||||
if (gelf_update_shdr(scn, sh) == 0) {
|
||||
printf("FAILED cannot update section header: %s\n",
|
||||
elf_errmsg(-1));
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int elf_collect(struct object *obj)
|
||||
{
|
||||
Elf_Scn *scn = NULL;
|
||||
@ -309,6 +342,9 @@ static int elf_collect(struct object *obj)
|
||||
obj->efile.idlist_shndx = idx;
|
||||
obj->efile.idlist_addr = sh.sh_addr;
|
||||
}
|
||||
|
||||
if (compressed_section_fix(elf, scn, &sh))
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -767,7 +767,7 @@ union bpf_attr {
|
||||
*
|
||||
* Also, note that **bpf_trace_printk**\ () is slow, and should
|
||||
* only be used for debugging purposes. For this reason, a notice
|
||||
* bloc (spanning several lines) is printed to kernel logs and
|
||||
* block (spanning several lines) is printed to kernel logs and
|
||||
* states that the helper should not be used "for production use"
|
||||
* the first time this helper is used (or more precisely, when
|
||||
* **trace_printk**\ () buffers are allocated). For passing values
|
||||
@ -1033,14 +1033,14 @@ union bpf_attr {
|
||||
*
|
||||
* int ret;
|
||||
* struct bpf_tunnel_key key = {};
|
||||
*
|
||||
*
|
||||
* ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0);
|
||||
* if (ret < 0)
|
||||
* return TC_ACT_SHOT; // drop packet
|
||||
*
|
||||
*
|
||||
* if (key.remote_ipv4 != 0x0a000001)
|
||||
* return TC_ACT_SHOT; // drop packet
|
||||
*
|
||||
*
|
||||
* return TC_ACT_OK; // accept packet
|
||||
*
|
||||
* This interface can also be used with all encapsulation devices
|
||||
@ -1147,7 +1147,7 @@ union bpf_attr {
|
||||
* Description
|
||||
* Retrieve the realm or the route, that is to say the
|
||||
* **tclassid** field of the destination for the *skb*. The
|
||||
* indentifier retrieved is a user-provided tag, similar to the
|
||||
* identifier retrieved is a user-provided tag, similar to the
|
||||
* one used with the net_cls cgroup (see description for
|
||||
* **bpf_get_cgroup_classid**\ () helper), but here this tag is
|
||||
* held by a route (a destination entry), not by a task.
|
||||
|
@ -879,7 +879,7 @@ static void btf_dump_emit_struct_def(struct btf_dump *d,
|
||||
btf_dump_printf(d, ": %d", m_sz);
|
||||
off = m_off + m_sz;
|
||||
} else {
|
||||
m_sz = max(0LL, btf__resolve_size(d->btf, m->type));
|
||||
m_sz = max((__s64)0, btf__resolve_size(d->btf, m->type));
|
||||
off = m_off + m_sz * 8;
|
||||
}
|
||||
btf_dump_printf(d, ";");
|
||||
|
@ -2264,7 +2264,7 @@ static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict,
|
||||
data = elf_getdata(scn, NULL);
|
||||
if (!scn || !data) {
|
||||
pr_warn("failed to get Elf_Data from map section %d (%s)\n",
|
||||
obj->efile.maps_shndx, MAPS_ELF_SEC);
|
||||
obj->efile.btf_maps_shndx, MAPS_ELF_SEC);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
1
tools/testing/selftests/bpf/.gitignore
vendored
1
tools/testing/selftests/bpf/.gitignore
vendored
@ -6,7 +6,6 @@ test_lpm_map
|
||||
test_tag
|
||||
FEATURE-DUMP.libbpf
|
||||
fixdep
|
||||
test_align
|
||||
test_dev_cgroup
|
||||
/test_progs*
|
||||
test_tcpbpf_user
|
||||
|
@ -32,7 +32,7 @@ LDLIBS += -lcap -lelf -lz -lrt -lpthread
|
||||
|
||||
# Order correspond to 'make run_tests' order
|
||||
TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \
|
||||
test_align test_verifier_log test_dev_cgroup test_tcpbpf_user \
|
||||
test_verifier_log test_dev_cgroup test_tcpbpf_user \
|
||||
test_sock test_btf test_sockmap get_cgroup_id_user test_socket_cookie \
|
||||
test_cgroup_storage \
|
||||
test_netcnt test_tcpnotify_user test_sock_fields test_sysctl \
|
||||
|
@ -19,7 +19,7 @@ static int libbpf_debug_print(enum libbpf_print_level level,
|
||||
log_buf = va_arg(args, char *);
|
||||
if (!log_buf)
|
||||
goto out;
|
||||
if (strstr(log_buf, err_str) == 0)
|
||||
if (err_str && strstr(log_buf, err_str) == 0)
|
||||
found = true;
|
||||
out:
|
||||
printf(format, log_buf);
|
||||
|
Loading…
Reference in New Issue
Block a user