forked from Minki/linux
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Daniel Borkmann says: ==================== pull-request: bpf 2018-04-21 The following pull-request contains BPF updates for your *net* tree. The main changes are: 1) Fix a deadlock between mm->mmap_sem and bpf_event_mutex when one task is detaching a BPF prog via perf_event_detach_bpf_prog() and another one dumping through bpf_prog_array_copy_info(). For the latter we move the copy_to_user() out of the bpf_event_mutex lock to fix it, from Yonghong. 2) Fix test_sock and test_sock_addr.sh failures. The former was hitting rlimit issues and the latter required ping to specify the address family, from Yonghong. 3) Remove a dead check in sockmap's sock_map_alloc(), from Jann. 4) Add generated files to BPF kselftests gitignore that were previously missed, from Anders. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
986e54cd68
@ -339,8 +339,8 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
|
||||
void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *progs,
|
||||
struct bpf_prog *old_prog);
|
||||
int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array,
|
||||
__u32 __user *prog_ids, u32 request_cnt,
|
||||
__u32 __user *prog_cnt);
|
||||
u32 *prog_ids, u32 request_cnt,
|
||||
u32 *prog_cnt);
|
||||
int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
|
||||
struct bpf_prog *exclude_prog,
|
||||
struct bpf_prog *include_prog,
|
||||
|
@ -1572,13 +1572,32 @@ int bpf_prog_array_length(struct bpf_prog_array __rcu *progs)
|
||||
return cnt;
|
||||
}
|
||||
|
||||
static bool bpf_prog_array_copy_core(struct bpf_prog **prog,
|
||||
u32 *prog_ids,
|
||||
u32 request_cnt)
|
||||
{
|
||||
int i = 0;
|
||||
|
||||
for (; *prog; prog++) {
|
||||
if (*prog == &dummy_bpf_prog.prog)
|
||||
continue;
|
||||
prog_ids[i] = (*prog)->aux->id;
|
||||
if (++i == request_cnt) {
|
||||
prog++;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return !!(*prog);
|
||||
}
|
||||
|
||||
int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
|
||||
__u32 __user *prog_ids, u32 cnt)
|
||||
{
|
||||
struct bpf_prog **prog;
|
||||
unsigned long err = 0;
|
||||
u32 i = 0, *ids;
|
||||
bool nospc;
|
||||
u32 *ids;
|
||||
|
||||
/* users of this function are doing:
|
||||
* cnt = bpf_prog_array_length();
|
||||
@ -1595,16 +1614,7 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
|
||||
return -ENOMEM;
|
||||
rcu_read_lock();
|
||||
prog = rcu_dereference(progs)->progs;
|
||||
for (; *prog; prog++) {
|
||||
if (*prog == &dummy_bpf_prog.prog)
|
||||
continue;
|
||||
ids[i] = (*prog)->aux->id;
|
||||
if (++i == cnt) {
|
||||
prog++;
|
||||
break;
|
||||
}
|
||||
}
|
||||
nospc = !!(*prog);
|
||||
nospc = bpf_prog_array_copy_core(prog, ids, cnt);
|
||||
rcu_read_unlock();
|
||||
err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));
|
||||
kfree(ids);
|
||||
@ -1683,22 +1693,25 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
|
||||
}
|
||||
|
||||
int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array,
|
||||
__u32 __user *prog_ids, u32 request_cnt,
|
||||
__u32 __user *prog_cnt)
|
||||
u32 *prog_ids, u32 request_cnt,
|
||||
u32 *prog_cnt)
|
||||
{
|
||||
struct bpf_prog **prog;
|
||||
u32 cnt = 0;
|
||||
|
||||
if (array)
|
||||
cnt = bpf_prog_array_length(array);
|
||||
|
||||
if (copy_to_user(prog_cnt, &cnt, sizeof(cnt)))
|
||||
return -EFAULT;
|
||||
*prog_cnt = cnt;
|
||||
|
||||
/* return early if user requested only program count or nothing to copy */
|
||||
if (!request_cnt || !cnt)
|
||||
return 0;
|
||||
|
||||
return bpf_prog_array_copy_to_user(array, prog_ids, request_cnt);
|
||||
/* this function is called under trace/bpf_trace.c: bpf_event_mutex */
|
||||
prog = rcu_dereference_check(array, 1)->progs;
|
||||
return bpf_prog_array_copy_core(prog, prog_ids, request_cnt) ? -ENOSPC
|
||||
: 0;
|
||||
}
|
||||
|
||||
static void bpf_prog_free_deferred(struct work_struct *work)
|
||||
|
@ -1442,9 +1442,6 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
|
||||
attr->value_size != 4 || attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if (attr->value_size > KMALLOC_MAX_SIZE)
|
||||
return ERR_PTR(-E2BIG);
|
||||
|
||||
err = bpf_tcp_ulp_register();
|
||||
if (err && err != -EEXIST)
|
||||
return ERR_PTR(err);
|
||||
|
@ -977,6 +977,7 @@ int perf_event_query_prog_array(struct perf_event *event, void __user *info)
|
||||
{
|
||||
struct perf_event_query_bpf __user *uquery = info;
|
||||
struct perf_event_query_bpf query = {};
|
||||
u32 *ids, prog_cnt, ids_len;
|
||||
int ret;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
@ -985,16 +986,32 @@ int perf_event_query_prog_array(struct perf_event *event, void __user *info)
|
||||
return -EINVAL;
|
||||
if (copy_from_user(&query, uquery, sizeof(query)))
|
||||
return -EFAULT;
|
||||
if (query.ids_len > BPF_TRACE_MAX_PROGS)
|
||||
|
||||
ids_len = query.ids_len;
|
||||
if (ids_len > BPF_TRACE_MAX_PROGS)
|
||||
return -E2BIG;
|
||||
ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN);
|
||||
if (!ids)
|
||||
return -ENOMEM;
|
||||
/*
|
||||
* The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which
|
||||
* is required when user only wants to check for uquery->prog_cnt.
|
||||
* There is no need to check for it since the case is handled
|
||||
* gracefully in bpf_prog_array_copy_info.
|
||||
*/
|
||||
|
||||
mutex_lock(&bpf_event_mutex);
|
||||
ret = bpf_prog_array_copy_info(event->tp_event->prog_array,
|
||||
uquery->ids,
|
||||
query.ids_len,
|
||||
&uquery->prog_cnt);
|
||||
ids,
|
||||
ids_len,
|
||||
&prog_cnt);
|
||||
mutex_unlock(&bpf_event_mutex);
|
||||
|
||||
if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
|
||||
copy_to_user(uquery->ids, ids, ids_len * sizeof(u32)))
|
||||
ret = -EFAULT;
|
||||
|
||||
kfree(ids);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
3
tools/testing/selftests/bpf/.gitignore
vendored
3
tools/testing/selftests/bpf/.gitignore
vendored
@ -12,3 +12,6 @@ test_tcpbpf_user
|
||||
test_verifier_log
|
||||
feature
|
||||
test_libbpf_open
|
||||
test_sock
|
||||
test_sock_addr
|
||||
urandom_read
|
||||
|
@ -13,6 +13,7 @@
|
||||
#include <bpf/bpf.h>
|
||||
|
||||
#include "cgroup_helpers.h"
|
||||
#include "bpf_rlimit.h"
|
||||
|
||||
#ifndef ARRAY_SIZE
|
||||
# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include <bpf/libbpf.h>
|
||||
|
||||
#include "cgroup_helpers.h"
|
||||
#include "bpf_rlimit.h"
|
||||
|
||||
#define CG_PATH "/foo"
|
||||
#define CONNECT4_PROG_PATH "./connect4_prog.o"
|
||||
|
@ -4,7 +4,7 @@ set -eu
|
||||
|
||||
ping_once()
|
||||
{
|
||||
ping -q -c 1 -W 1 ${1%%/*} >/dev/null 2>&1
|
||||
ping -${1} -q -c 1 -W 1 ${2%%/*} >/dev/null 2>&1
|
||||
}
|
||||
|
||||
wait_for_ip()
|
||||
@ -13,7 +13,7 @@ wait_for_ip()
|
||||
echo -n "Wait for testing IPv4/IPv6 to become available "
|
||||
for _i in $(seq ${MAX_PING_TRIES}); do
|
||||
echo -n "."
|
||||
if ping_once ${TEST_IPv4} && ping_once ${TEST_IPv6}; then
|
||||
if ping_once 4 ${TEST_IPv4} && ping_once 6 ${TEST_IPv6}; then
|
||||
echo " OK"
|
||||
return
|
||||
fi
|
||||
|
Loading…
Reference in New Issue
Block a user