Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Fun set of conflict resolutions here...
For the mac80211 stuff, these were fortunately just parallel
adds. Trivially resolved.
In drivers/net/phy/phy.c we had a bug fix in 'net' that moved the
function phy_disable_interrupts() earlier in the file, whilst in
'net-next' the phy_error() call from this function was removed.
In net/ipv4/xfrm4_policy.c, David Ahern's changes to remove the
'rt_table_id' member of rtable collided with a bug fix in 'net' that
added a new struct member "rt_mtu_locked" which needs to be copied
over here.
The mlxsw driver conflict consisted of net-next separating
the span code and definitions into separate files, whilst
a 'net' bug fix made some changes to that moved code.
The mlx5 infiniband conflict resolution was quite non-trivial,
the RDMA tree's merge commit was used as a guide here, and
here are their notes:
====================
Due to bug fixes found by the syzkaller bot and taken into the for-rc
branch after development for the 4.17 merge window had already started
being taken into the for-next branch, there were fairly non-trivial
merge issues that would need to be resolved between the for-rc branch
and the for-next branch. This merge resolves those conflicts and
provides a unified base upon which ongoing development for 4.17 can
be based.
Conflicts:
drivers/infiniband/hw/mlx5/main.c - Commit 42cea83f95
(IB/mlx5: Fix cleanup order on unload) added to for-rc and
commit b5ca15ad7e (IB/mlx5: Add proper representors support)
add as part of the devel cycle both needed to modify the
init/de-init functions used by mlx5. To support the new
representors, the new functions added by the cleanup patch
needed to be made non-static, and the init/de-init list
added by the representors patch needed to be modified to
match the init/de-init list changes made by the cleanup
patch.
Updates:
drivers/infiniband/hw/mlx5/mlx5_ib.h - Update function
prototypes added by representors patch to reflect new function
names as changed by cleanup patch
drivers/infiniband/hw/mlx5/ib_rep.c - Update init/de-init
stage list to match new order from cleanup patch
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
@@ -1849,7 +1849,7 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
|
||||
union bpf_attr attr = {};
|
||||
int err;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN) && sysctl_unprivileged_bpf_disabled)
|
||||
if (sysctl_unprivileged_bpf_disabled && !capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
err = check_uarg_tail_zero(uattr, sizeof(attr), size);
|
||||
|
||||
@@ -3183,6 +3183,16 @@ static int cgroup_enable_threaded(struct cgroup *cgrp)
|
||||
if (cgroup_is_threaded(cgrp))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* If @cgroup is populated or has domain controllers enabled, it
|
||||
* can't be switched. While the below cgroup_can_be_thread_root()
|
||||
* test can catch the same conditions, that's only when @parent is
|
||||
* not mixable, so let's check it explicitly.
|
||||
*/
|
||||
if (cgroup_is_populated(cgrp) ||
|
||||
cgrp->subtree_control & ~cgrp_dfl_threaded_ss_mask)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* we're joining the parent's domain, ensure its validity */
|
||||
if (!cgroup_is_valid_domain(dom_cgrp) ||
|
||||
!cgroup_can_be_thread_root(dom_cgrp))
|
||||
|
||||
@@ -488,25 +488,6 @@ get_compat_sigset(sigset_t *set, const compat_sigset_t __user *compat)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(get_compat_sigset);
|
||||
|
||||
int
|
||||
put_compat_sigset(compat_sigset_t __user *compat, const sigset_t *set,
|
||||
unsigned int size)
|
||||
{
|
||||
/* size <= sizeof(compat_sigset_t) <= sizeof(sigset_t) */
|
||||
#ifdef __BIG_ENDIAN
|
||||
compat_sigset_t v;
|
||||
switch (_NSIG_WORDS) {
|
||||
case 4: v.sig[7] = (set->sig[3] >> 32); v.sig[6] = set->sig[3];
|
||||
case 3: v.sig[5] = (set->sig[2] >> 32); v.sig[4] = set->sig[2];
|
||||
case 2: v.sig[3] = (set->sig[1] >> 32); v.sig[2] = set->sig[1];
|
||||
case 1: v.sig[1] = (set->sig[0] >> 32); v.sig[0] = set->sig[0];
|
||||
}
|
||||
return copy_to_user(compat, &v, size) ? -EFAULT : 0;
|
||||
#else
|
||||
return copy_to_user(compat, set, size) ? -EFAULT : 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
COMPAT_SYSCALL_DEFINE6(move_pages, pid_t, pid, compat_ulong_t, nr_pages,
|
||||
compat_uptr_t __user *, pages32,
|
||||
|
||||
@@ -2246,7 +2246,7 @@ static void ctx_resched(struct perf_cpu_context *cpuctx,
|
||||
struct perf_event_context *task_ctx,
|
||||
enum event_type_t event_type)
|
||||
{
|
||||
enum event_type_t ctx_event_type = event_type & EVENT_ALL;
|
||||
enum event_type_t ctx_event_type;
|
||||
bool cpu_event = !!(event_type & EVENT_CPU);
|
||||
|
||||
/*
|
||||
@@ -2256,6 +2256,8 @@ static void ctx_resched(struct perf_cpu_context *cpuctx,
|
||||
if (event_type & EVENT_PINNED)
|
||||
event_type |= EVENT_FLEXIBLE;
|
||||
|
||||
ctx_event_type = event_type & EVENT_ALL;
|
||||
|
||||
perf_pmu_disable(cpuctx->ctx.pmu);
|
||||
if (task_ctx)
|
||||
task_ctx_sched_out(cpuctx, task_ctx, event_type);
|
||||
|
||||
@@ -14,6 +14,15 @@
|
||||
|
||||
static int fei_kprobe_handler(struct kprobe *kp, struct pt_regs *regs);
|
||||
|
||||
static void fei_post_handler(struct kprobe *kp, struct pt_regs *regs,
|
||||
unsigned long flags)
|
||||
{
|
||||
/*
|
||||
* A dummy post handler is required to prohibit optimizing, because
|
||||
* jump optimization does not support execution path overriding.
|
||||
*/
|
||||
}
|
||||
|
||||
struct fei_attr {
|
||||
struct list_head list;
|
||||
struct kprobe kp;
|
||||
@@ -56,6 +65,7 @@ static struct fei_attr *fei_attr_new(const char *sym, unsigned long addr)
|
||||
return NULL;
|
||||
}
|
||||
attr->kp.pre_handler = fei_kprobe_handler;
|
||||
attr->kp.post_handler = fei_post_handler;
|
||||
attr->retval = adjust_error_retval(addr, 0);
|
||||
INIT_LIST_HEAD(&attr->list);
|
||||
}
|
||||
|
||||
@@ -373,7 +373,8 @@ static void __jump_label_update(struct static_key *key,
|
||||
if (kernel_text_address(entry->code))
|
||||
arch_jump_label_transform(entry, jump_label_type(entry));
|
||||
else
|
||||
WARN_ONCE(1, "can't patch jump_label at %pS", (void *)entry->code);
|
||||
WARN_ONCE(1, "can't patch jump_label at %pS",
|
||||
(void *)(unsigned long)entry->code);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1616,11 +1616,12 @@ bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock,
|
||||
void __sched rt_mutex_futex_unlock(struct rt_mutex *lock)
|
||||
{
|
||||
DEFINE_WAKE_Q(wake_q);
|
||||
unsigned long flags;
|
||||
bool postunlock;
|
||||
|
||||
raw_spin_lock_irq(&lock->wait_lock);
|
||||
raw_spin_lock_irqsave(&lock->wait_lock, flags);
|
||||
postunlock = __rt_mutex_futex_unlock(lock, &wake_q);
|
||||
raw_spin_unlock_irq(&lock->wait_lock);
|
||||
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
|
||||
|
||||
if (postunlock)
|
||||
rt_mutex_postunlock(&wake_q);
|
||||
|
||||
@@ -427,7 +427,6 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
|
||||
err_pfn_remap:
|
||||
err_radix:
|
||||
pgmap_radix_release(res, pgoff);
|
||||
devres_free(pgmap);
|
||||
return ERR_PTR(error);
|
||||
}
|
||||
EXPORT_SYMBOL(devm_memremap_pages);
|
||||
|
||||
@@ -4228,7 +4228,7 @@ static int modules_open(struct inode *inode, struct file *file)
|
||||
m->private = kallsyms_show_value() ? NULL : (void *)8ul;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return err;
|
||||
}
|
||||
|
||||
static const struct file_operations proc_modules_operations = {
|
||||
|
||||
@@ -640,7 +640,7 @@ device_initcall(register_warn_debugfs);
|
||||
*/
|
||||
__visible void __stack_chk_fail(void)
|
||||
{
|
||||
panic("stack-protector: Kernel stack is corrupted in: %p\n",
|
||||
panic("stack-protector: Kernel stack is corrupted in: %pB\n",
|
||||
__builtin_return_address(0));
|
||||
}
|
||||
EXPORT_SYMBOL(__stack_chk_fail);
|
||||
|
||||
@@ -6683,13 +6683,18 @@ static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
|
||||
parent_quota = parent_b->hierarchical_quota;
|
||||
|
||||
/*
|
||||
* Ensure max(child_quota) <= parent_quota, inherit when no
|
||||
* Ensure max(child_quota) <= parent_quota. On cgroup2,
|
||||
* always take the min. On cgroup1, only inherit when no
|
||||
* limit is set:
|
||||
*/
|
||||
if (quota == RUNTIME_INF)
|
||||
quota = parent_quota;
|
||||
else if (parent_quota != RUNTIME_INF && quota > parent_quota)
|
||||
return -EINVAL;
|
||||
if (cgroup_subsys_on_dfl(cpu_cgrp_subsys)) {
|
||||
quota = min(quota, parent_quota);
|
||||
} else {
|
||||
if (quota == RUNTIME_INF)
|
||||
quota = parent_quota;
|
||||
else if (parent_quota != RUNTIME_INF && quota > parent_quota)
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
cfs_b->hierarchical_quota = quota;
|
||||
|
||||
|
||||
@@ -661,32 +661,6 @@ static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
|
||||
.arg3_type = ARG_ANYTHING,
|
||||
};
|
||||
|
||||
BPF_CALL_3(bpf_perf_prog_read_value_tp, struct bpf_perf_event_data_kern *, ctx,
|
||||
struct bpf_perf_event_value *, buf, u32, size)
|
||||
{
|
||||
int err = -EINVAL;
|
||||
|
||||
if (unlikely(size != sizeof(struct bpf_perf_event_value)))
|
||||
goto clear;
|
||||
err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled,
|
||||
&buf->running);
|
||||
if (unlikely(err))
|
||||
goto clear;
|
||||
return 0;
|
||||
clear:
|
||||
memset(buf, 0, size);
|
||||
return err;
|
||||
}
|
||||
|
||||
static const struct bpf_func_proto bpf_perf_prog_read_value_proto_tp = {
|
||||
.func = bpf_perf_prog_read_value_tp,
|
||||
.gpl_only = true,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_CTX,
|
||||
.arg2_type = ARG_PTR_TO_UNINIT_MEM,
|
||||
.arg3_type = ARG_CONST_SIZE,
|
||||
};
|
||||
|
||||
static const struct bpf_func_proto *tp_prog_func_proto(enum bpf_func_id func_id)
|
||||
{
|
||||
switch (func_id) {
|
||||
@@ -694,8 +668,6 @@ static const struct bpf_func_proto *tp_prog_func_proto(enum bpf_func_id func_id)
|
||||
return &bpf_perf_event_output_proto_tp;
|
||||
case BPF_FUNC_get_stackid:
|
||||
return &bpf_get_stackid_proto_tp;
|
||||
case BPF_FUNC_perf_prog_read_value:
|
||||
return &bpf_perf_prog_read_value_proto_tp;
|
||||
default:
|
||||
return tracing_func_proto(func_id);
|
||||
}
|
||||
@@ -723,6 +695,46 @@ const struct bpf_verifier_ops tracepoint_verifier_ops = {
|
||||
const struct bpf_prog_ops tracepoint_prog_ops = {
|
||||
};
|
||||
|
||||
BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx,
|
||||
struct bpf_perf_event_value *, buf, u32, size)
|
||||
{
|
||||
int err = -EINVAL;
|
||||
|
||||
if (unlikely(size != sizeof(struct bpf_perf_event_value)))
|
||||
goto clear;
|
||||
err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled,
|
||||
&buf->running);
|
||||
if (unlikely(err))
|
||||
goto clear;
|
||||
return 0;
|
||||
clear:
|
||||
memset(buf, 0, size);
|
||||
return err;
|
||||
}
|
||||
|
||||
static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
|
||||
.func = bpf_perf_prog_read_value,
|
||||
.gpl_only = true,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_CTX,
|
||||
.arg2_type = ARG_PTR_TO_UNINIT_MEM,
|
||||
.arg3_type = ARG_CONST_SIZE,
|
||||
};
|
||||
|
||||
static const struct bpf_func_proto *pe_prog_func_proto(enum bpf_func_id func_id)
|
||||
{
|
||||
switch (func_id) {
|
||||
case BPF_FUNC_perf_event_output:
|
||||
return &bpf_perf_event_output_proto_tp;
|
||||
case BPF_FUNC_get_stackid:
|
||||
return &bpf_get_stackid_proto_tp;
|
||||
case BPF_FUNC_perf_prog_read_value:
|
||||
return &bpf_perf_prog_read_value_proto;
|
||||
default:
|
||||
return tracing_func_proto(func_id);
|
||||
}
|
||||
}
|
||||
|
||||
static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
|
||||
struct bpf_insn_access_aux *info)
|
||||
{
|
||||
@@ -791,7 +803,7 @@ static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
|
||||
}
|
||||
|
||||
const struct bpf_verifier_ops perf_event_verifier_ops = {
|
||||
.get_func_proto = tp_prog_func_proto,
|
||||
.get_func_proto = pe_prog_func_proto,
|
||||
.is_valid_access = pe_prog_is_valid_access,
|
||||
.convert_ctx_access = pe_prog_convert_ctx_access,
|
||||
};
|
||||
|
||||
@@ -3018,14 +3018,6 @@ static bool __cancel_work(struct work_struct *work, bool is_dwork)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* See cancel_delayed_work()
|
||||
*/
|
||||
bool cancel_work(struct work_struct *work)
|
||||
{
|
||||
return __cancel_work(work, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* cancel_delayed_work - cancel a delayed work
|
||||
* @dwork: delayed_work to cancel
|
||||
@@ -5337,7 +5329,7 @@ int workqueue_sysfs_register(struct workqueue_struct *wq)
|
||||
|
||||
ret = device_register(&wq_dev->dev);
|
||||
if (ret) {
|
||||
kfree(wq_dev);
|
||||
put_device(&wq_dev->dev);
|
||||
wq->wq_dev = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user