Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Two minor conflicts in virtio_net driver (bug fix overlapping addition of a helper) and MAINTAINERS (new driver edit overlapping revamp of PHY entry). Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
@@ -1289,7 +1289,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
|
||||
info_len = min_t(u32, sizeof(info), info_len);
|
||||
|
||||
if (copy_from_user(&info, uinfo, info_len))
|
||||
return err;
|
||||
return -EFAULT;
|
||||
|
||||
info.type = prog->type;
|
||||
info.id = prog->aux->id;
|
||||
@@ -1312,7 +1312,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
|
||||
}
|
||||
|
||||
ulen = info.xlated_prog_len;
|
||||
info.xlated_prog_len = bpf_prog_size(prog->len);
|
||||
info.xlated_prog_len = bpf_prog_insn_size(prog);
|
||||
if (info.xlated_prog_len && ulen) {
|
||||
uinsns = u64_to_user_ptr(info.xlated_prog_insns);
|
||||
ulen = min_t(u32, info.xlated_prog_len, ulen);
|
||||
|
||||
@@ -1877,10 +1877,12 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env,
|
||||
* do our normal operations to the register, we need to set the values
|
||||
* to the min/max since they are undefined.
|
||||
*/
|
||||
if (min_val == BPF_REGISTER_MIN_RANGE)
|
||||
dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
|
||||
if (max_val == BPF_REGISTER_MAX_RANGE)
|
||||
dst_reg->max_value = BPF_REGISTER_MAX_RANGE;
|
||||
if (opcode != BPF_SUB) {
|
||||
if (min_val == BPF_REGISTER_MIN_RANGE)
|
||||
dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
|
||||
if (max_val == BPF_REGISTER_MAX_RANGE)
|
||||
dst_reg->max_value = BPF_REGISTER_MAX_RANGE;
|
||||
}
|
||||
|
||||
switch (opcode) {
|
||||
case BPF_ADD:
|
||||
@@ -1891,10 +1893,17 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env,
|
||||
dst_reg->min_align = min(src_align, dst_align);
|
||||
break;
|
||||
case BPF_SUB:
|
||||
/* If one of our values was at the end of our ranges, then the
|
||||
* _opposite_ value in the dst_reg goes to the end of our range.
|
||||
*/
|
||||
if (min_val == BPF_REGISTER_MIN_RANGE)
|
||||
dst_reg->max_value = BPF_REGISTER_MAX_RANGE;
|
||||
if (max_val == BPF_REGISTER_MAX_RANGE)
|
||||
dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
|
||||
if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE)
|
||||
dst_reg->min_value -= min_val;
|
||||
dst_reg->min_value -= max_val;
|
||||
if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE)
|
||||
dst_reg->max_value -= max_val;
|
||||
dst_reg->max_value -= min_val;
|
||||
dst_reg->min_align = min(src_align, dst_align);
|
||||
break;
|
||||
case BPF_MUL:
|
||||
|
||||
@@ -33,6 +33,9 @@ struct cgroup_taskset {
|
||||
struct list_head src_csets;
|
||||
struct list_head dst_csets;
|
||||
|
||||
/* the number of tasks in the set */
|
||||
int nr_tasks;
|
||||
|
||||
/* the subsys currently being processed */
|
||||
int ssid;
|
||||
|
||||
|
||||
@@ -2006,6 +2006,8 @@ static void cgroup_migrate_add_task(struct task_struct *task,
|
||||
if (!cset->mg_src_cgrp)
|
||||
return;
|
||||
|
||||
mgctx->tset.nr_tasks++;
|
||||
|
||||
list_move_tail(&task->cg_list, &cset->mg_tasks);
|
||||
if (list_empty(&cset->mg_node))
|
||||
list_add_tail(&cset->mg_node,
|
||||
@@ -2094,21 +2096,19 @@ static int cgroup_migrate_execute(struct cgroup_mgctx *mgctx)
|
||||
struct css_set *cset, *tmp_cset;
|
||||
int ssid, failed_ssid, ret;
|
||||
|
||||
/* methods shouldn't be called if no task is actually migrating */
|
||||
if (list_empty(&tset->src_csets))
|
||||
return 0;
|
||||
|
||||
/* check that we can legitimately attach to the cgroup */
|
||||
do_each_subsys_mask(ss, ssid, mgctx->ss_mask) {
|
||||
if (ss->can_attach) {
|
||||
tset->ssid = ssid;
|
||||
ret = ss->can_attach(tset);
|
||||
if (ret) {
|
||||
failed_ssid = ssid;
|
||||
goto out_cancel_attach;
|
||||
if (tset->nr_tasks) {
|
||||
do_each_subsys_mask(ss, ssid, mgctx->ss_mask) {
|
||||
if (ss->can_attach) {
|
||||
tset->ssid = ssid;
|
||||
ret = ss->can_attach(tset);
|
||||
if (ret) {
|
||||
failed_ssid = ssid;
|
||||
goto out_cancel_attach;
|
||||
}
|
||||
}
|
||||
}
|
||||
} while_each_subsys_mask();
|
||||
} while_each_subsys_mask();
|
||||
}
|
||||
|
||||
/*
|
||||
* Now that we're guaranteed success, proceed to move all tasks to
|
||||
@@ -2137,25 +2137,29 @@ static int cgroup_migrate_execute(struct cgroup_mgctx *mgctx)
|
||||
*/
|
||||
tset->csets = &tset->dst_csets;
|
||||
|
||||
do_each_subsys_mask(ss, ssid, mgctx->ss_mask) {
|
||||
if (ss->attach) {
|
||||
tset->ssid = ssid;
|
||||
ss->attach(tset);
|
||||
}
|
||||
} while_each_subsys_mask();
|
||||
if (tset->nr_tasks) {
|
||||
do_each_subsys_mask(ss, ssid, mgctx->ss_mask) {
|
||||
if (ss->attach) {
|
||||
tset->ssid = ssid;
|
||||
ss->attach(tset);
|
||||
}
|
||||
} while_each_subsys_mask();
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
goto out_release_tset;
|
||||
|
||||
out_cancel_attach:
|
||||
do_each_subsys_mask(ss, ssid, mgctx->ss_mask) {
|
||||
if (ssid == failed_ssid)
|
||||
break;
|
||||
if (ss->cancel_attach) {
|
||||
tset->ssid = ssid;
|
||||
ss->cancel_attach(tset);
|
||||
}
|
||||
} while_each_subsys_mask();
|
||||
if (tset->nr_tasks) {
|
||||
do_each_subsys_mask(ss, ssid, mgctx->ss_mask) {
|
||||
if (ssid == failed_ssid)
|
||||
break;
|
||||
if (ss->cancel_attach) {
|
||||
tset->ssid = ssid;
|
||||
ss->cancel_attach(tset);
|
||||
}
|
||||
} while_each_subsys_mask();
|
||||
}
|
||||
out_release_tset:
|
||||
spin_lock_irq(&css_set_lock);
|
||||
list_splice_init(&tset->dst_csets, &tset->src_csets);
|
||||
@@ -2997,11 +3001,11 @@ static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
|
||||
cgrp->subtree_control &= ~disable;
|
||||
|
||||
ret = cgroup_apply_control(cgrp);
|
||||
|
||||
cgroup_finalize_control(cgrp, ret);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
|
||||
kernfs_activate(cgrp->kn);
|
||||
ret = 0;
|
||||
out_unlock:
|
||||
cgroup_kn_unlock(of->kn);
|
||||
return ret ?: nbytes;
|
||||
@@ -4669,6 +4673,10 @@ int __init cgroup_init(void)
|
||||
|
||||
if (ss->bind)
|
||||
ss->bind(init_css_set.subsys[ssid]);
|
||||
|
||||
mutex_lock(&cgroup_mutex);
|
||||
css_populate_dir(init_css_set.subsys[ssid]);
|
||||
mutex_unlock(&cgroup_mutex);
|
||||
}
|
||||
|
||||
/* init_css_set.subsys[] has been updated, re-hash */
|
||||
|
||||
@@ -1452,6 +1452,13 @@ static enum event_type_t get_event_type(struct perf_event *event)
|
||||
|
||||
lockdep_assert_held(&ctx->lock);
|
||||
|
||||
/*
|
||||
* It's 'group type', really, because if our group leader is
|
||||
* pinned, so are we.
|
||||
*/
|
||||
if (event->group_leader != event)
|
||||
event = event->group_leader;
|
||||
|
||||
event_type = event->attr.pinned ? EVENT_PINNED : EVENT_FLEXIBLE;
|
||||
if (!ctx->task)
|
||||
event_type |= EVENT_CPU;
|
||||
@@ -4378,7 +4385,9 @@ EXPORT_SYMBOL_GPL(perf_event_read_value);
|
||||
static int __perf_read_group_add(struct perf_event *leader,
|
||||
u64 read_format, u64 *values)
|
||||
{
|
||||
struct perf_event_context *ctx = leader->ctx;
|
||||
struct perf_event *sub;
|
||||
unsigned long flags;
|
||||
int n = 1; /* skip @nr */
|
||||
int ret;
|
||||
|
||||
@@ -4408,12 +4417,15 @@ static int __perf_read_group_add(struct perf_event *leader,
|
||||
if (read_format & PERF_FORMAT_ID)
|
||||
values[n++] = primary_event_id(leader);
|
||||
|
||||
raw_spin_lock_irqsave(&ctx->lock, flags);
|
||||
|
||||
list_for_each_entry(sub, &leader->sibling_list, group_entry) {
|
||||
values[n++] += perf_event_count(sub);
|
||||
if (read_format & PERF_FORMAT_ID)
|
||||
values[n++] = primary_event_id(sub);
|
||||
}
|
||||
|
||||
raw_spin_unlock_irqrestore(&ctx->lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -7321,21 +7333,6 @@ int perf_event_account_interrupt(struct perf_event *event)
|
||||
return __perf_event_account_interrupt(event, 1);
|
||||
}
|
||||
|
||||
static bool sample_is_allowed(struct perf_event *event, struct pt_regs *regs)
|
||||
{
|
||||
/*
|
||||
* Due to interrupt latency (AKA "skid"), we may enter the
|
||||
* kernel before taking an overflow, even if the PMU is only
|
||||
* counting user events.
|
||||
* To avoid leaking information to userspace, we must always
|
||||
* reject kernel samples when exclude_kernel is set.
|
||||
*/
|
||||
if (event->attr.exclude_kernel && !user_mode(regs))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Generic event overflow handling, sampling.
|
||||
*/
|
||||
@@ -7356,12 +7353,6 @@ static int __perf_event_overflow(struct perf_event *event,
|
||||
|
||||
ret = __perf_event_account_interrupt(event, throttle);
|
||||
|
||||
/*
|
||||
* For security, drop the skid kernel samples if necessary.
|
||||
*/
|
||||
if (!sample_is_allowed(event, regs))
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* XXX event_limit might not quite work as expected on inherited
|
||||
* events
|
||||
|
||||
@@ -170,21 +170,11 @@ static void irq_state_clr_disabled(struct irq_desc *desc)
|
||||
irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED);
|
||||
}
|
||||
|
||||
static void irq_state_set_disabled(struct irq_desc *desc)
|
||||
{
|
||||
irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
|
||||
}
|
||||
|
||||
static void irq_state_clr_masked(struct irq_desc *desc)
|
||||
{
|
||||
irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED);
|
||||
}
|
||||
|
||||
static void irq_state_set_masked(struct irq_desc *desc)
|
||||
{
|
||||
irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
|
||||
}
|
||||
|
||||
static void irq_state_clr_started(struct irq_desc *desc)
|
||||
{
|
||||
irqd_clear(&desc->irq_data, IRQD_IRQ_STARTED);
|
||||
|
||||
@@ -95,8 +95,13 @@ static bool migrate_one_irq(struct irq_desc *desc)
|
||||
affinity = cpu_online_mask;
|
||||
brokeaff = true;
|
||||
}
|
||||
|
||||
err = irq_do_set_affinity(d, affinity, true);
|
||||
/*
|
||||
* Do not set the force argument of irq_do_set_affinity() as this
|
||||
* disables the masking of offline CPUs from the supplied affinity
|
||||
* mask and therefore might keep/reassign the irq to the outgoing
|
||||
* CPU.
|
||||
*/
|
||||
err = irq_do_set_affinity(d, affinity, false);
|
||||
if (err) {
|
||||
pr_warn_ratelimited("IRQ%u: set affinity failed(%d).\n",
|
||||
d->irq, err);
|
||||
|
||||
@@ -227,6 +227,16 @@ static inline bool irqd_has_set(struct irq_data *d, unsigned int mask)
|
||||
return __irqd_to_state(d) & mask;
|
||||
}
|
||||
|
||||
static inline void irq_state_set_disabled(struct irq_desc *desc)
|
||||
{
|
||||
irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
|
||||
}
|
||||
|
||||
static inline void irq_state_set_masked(struct irq_desc *desc)
|
||||
{
|
||||
irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
|
||||
}
|
||||
|
||||
#undef __irqd_to_state
|
||||
|
||||
static inline void kstat_incr_irqs_this_cpu(struct irq_desc *desc)
|
||||
|
||||
@@ -149,6 +149,8 @@ static void resume_irq(struct irq_desc *desc)
|
||||
|
||||
/* Pretend that it got disabled ! */
|
||||
desc->depth++;
|
||||
irq_state_set_disabled(desc);
|
||||
irq_state_set_masked(desc);
|
||||
resume:
|
||||
desc->istate &= ~IRQS_SUSPENDED;
|
||||
__enable_irq(desc);
|
||||
|
||||
@@ -963,7 +963,6 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
|
||||
return -EDEADLK;
|
||||
|
||||
raw_spin_lock(&task->pi_lock);
|
||||
rt_mutex_adjust_prio(task);
|
||||
waiter->task = task;
|
||||
waiter->lock = lock;
|
||||
waiter->prio = task->prio;
|
||||
|
||||
@@ -2069,7 +2069,7 @@ out:
|
||||
/**
|
||||
* try_to_wake_up_local - try to wake up a local task with rq lock held
|
||||
* @p: the thread to be awakened
|
||||
* @cookie: context's cookie for pinning
|
||||
* @rf: request-queue flags for pinning
|
||||
*
|
||||
* Put @p on the run-queue if it's not already there. The caller must
|
||||
* ensure that this_rq() is locked, @p is bound to this_rq() and not
|
||||
|
||||
@@ -683,7 +683,7 @@ static u64 vtime_delta(struct vtime *vtime)
|
||||
{
|
||||
unsigned long long clock;
|
||||
|
||||
clock = sched_clock_cpu(smp_processor_id());
|
||||
clock = sched_clock();
|
||||
if (clock < vtime->starttime)
|
||||
return 0;
|
||||
|
||||
@@ -814,7 +814,7 @@ void arch_vtime_task_switch(struct task_struct *prev)
|
||||
|
||||
write_seqcount_begin(&vtime->seqcount);
|
||||
vtime->state = VTIME_SYS;
|
||||
vtime->starttime = sched_clock_cpu(smp_processor_id());
|
||||
vtime->starttime = sched_clock();
|
||||
write_seqcount_end(&vtime->seqcount);
|
||||
}
|
||||
|
||||
@@ -826,7 +826,7 @@ void vtime_init_idle(struct task_struct *t, int cpu)
|
||||
local_irq_save(flags);
|
||||
write_seqcount_begin(&vtime->seqcount);
|
||||
vtime->state = VTIME_SYS;
|
||||
vtime->starttime = sched_clock_cpu(cpu);
|
||||
vtime->starttime = sched_clock();
|
||||
write_seqcount_end(&vtime->seqcount);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
@@ -1392,17 +1392,19 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
|
||||
struct sched_dl_entity *pi_se = &p->dl;
|
||||
|
||||
/*
|
||||
* Use the scheduling parameters of the top pi-waiter
|
||||
* task if we have one and its (absolute) deadline is
|
||||
* smaller than our one... OTW we keep our runtime and
|
||||
* deadline.
|
||||
* Use the scheduling parameters of the top pi-waiter task if:
|
||||
* - we have a top pi-waiter which is a SCHED_DEADLINE task AND
|
||||
* - our dl_boosted is set (i.e. the pi-waiter's (absolute) deadline is
|
||||
* smaller than our deadline OR we are a !SCHED_DEADLINE task getting
|
||||
* boosted due to a SCHED_DEADLINE pi-waiter).
|
||||
* Otherwise we keep our runtime and deadline.
|
||||
*/
|
||||
if (pi_task && p->dl.dl_boosted && dl_prio(pi_task->normal_prio)) {
|
||||
if (pi_task && dl_prio(pi_task->normal_prio) && p->dl.dl_boosted) {
|
||||
pi_se = &pi_task->dl;
|
||||
} else if (!dl_prio(p->normal_prio)) {
|
||||
/*
|
||||
* Special case in which we have a !SCHED_DEADLINE task
|
||||
* that is going to be deboosted, but exceedes its
|
||||
* that is going to be deboosted, but exceeds its
|
||||
* runtime while doing so. No point in replenishing
|
||||
* it, as it's going to return back to its original
|
||||
* scheduling class after this.
|
||||
|
||||
@@ -113,7 +113,7 @@ static int ftrace_disabled __read_mostly;
|
||||
|
||||
static DEFINE_MUTEX(ftrace_lock);
|
||||
|
||||
static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
|
||||
static struct ftrace_ops __rcu *ftrace_ops_list __read_mostly = &ftrace_list_end;
|
||||
ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
|
||||
static struct ftrace_ops global_ops;
|
||||
|
||||
@@ -169,8 +169,11 @@ int ftrace_nr_registered_ops(void)
|
||||
|
||||
mutex_lock(&ftrace_lock);
|
||||
|
||||
for (ops = ftrace_ops_list;
|
||||
ops != &ftrace_list_end; ops = ops->next)
|
||||
for (ops = rcu_dereference_protected(ftrace_ops_list,
|
||||
lockdep_is_held(&ftrace_lock));
|
||||
ops != &ftrace_list_end;
|
||||
ops = rcu_dereference_protected(ops->next,
|
||||
lockdep_is_held(&ftrace_lock)))
|
||||
cnt++;
|
||||
|
||||
mutex_unlock(&ftrace_lock);
|
||||
@@ -275,10 +278,11 @@ static void update_ftrace_function(void)
|
||||
* If there's only one ftrace_ops registered, the ftrace_ops_list
|
||||
* will point to the ops we want.
|
||||
*/
|
||||
set_function_trace_op = ftrace_ops_list;
|
||||
set_function_trace_op = rcu_dereference_protected(ftrace_ops_list,
|
||||
lockdep_is_held(&ftrace_lock));
|
||||
|
||||
/* If there's no ftrace_ops registered, just call the stub function */
|
||||
if (ftrace_ops_list == &ftrace_list_end) {
|
||||
if (set_function_trace_op == &ftrace_list_end) {
|
||||
func = ftrace_stub;
|
||||
|
||||
/*
|
||||
@@ -286,7 +290,8 @@ static void update_ftrace_function(void)
|
||||
* recursion safe and not dynamic and the arch supports passing ops,
|
||||
* then have the mcount trampoline call the function directly.
|
||||
*/
|
||||
} else if (ftrace_ops_list->next == &ftrace_list_end) {
|
||||
} else if (rcu_dereference_protected(ftrace_ops_list->next,
|
||||
lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
|
||||
func = ftrace_ops_get_list_func(ftrace_ops_list);
|
||||
|
||||
} else {
|
||||
@@ -348,9 +353,11 @@ int using_ftrace_ops_list_func(void)
|
||||
return ftrace_trace_function == ftrace_ops_list_func;
|
||||
}
|
||||
|
||||
static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
|
||||
static void add_ftrace_ops(struct ftrace_ops __rcu **list,
|
||||
struct ftrace_ops *ops)
|
||||
{
|
||||
ops->next = *list;
|
||||
rcu_assign_pointer(ops->next, *list);
|
||||
|
||||
/*
|
||||
* We are entering ops into the list but another
|
||||
* CPU might be walking that list. We need to make sure
|
||||
@@ -360,7 +367,8 @@ static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
|
||||
rcu_assign_pointer(*list, ops);
|
||||
}
|
||||
|
||||
static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
|
||||
static int remove_ftrace_ops(struct ftrace_ops __rcu **list,
|
||||
struct ftrace_ops *ops)
|
||||
{
|
||||
struct ftrace_ops **p;
|
||||
|
||||
@@ -368,7 +376,10 @@ static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
|
||||
* If we are removing the last function, then simply point
|
||||
* to the ftrace_stub.
|
||||
*/
|
||||
if (*list == ops && ops->next == &ftrace_list_end) {
|
||||
if (rcu_dereference_protected(*list,
|
||||
lockdep_is_held(&ftrace_lock)) == ops &&
|
||||
rcu_dereference_protected(ops->next,
|
||||
lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
|
||||
*list = &ftrace_list_end;
|
||||
return 0;
|
||||
}
|
||||
@@ -1569,8 +1580,8 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
|
||||
return 0;
|
||||
#endif
|
||||
|
||||
hash.filter_hash = rcu_dereference_raw_notrace(ops->func_hash->filter_hash);
|
||||
hash.notrace_hash = rcu_dereference_raw_notrace(ops->func_hash->notrace_hash);
|
||||
rcu_assign_pointer(hash.filter_hash, ops->func_hash->filter_hash);
|
||||
rcu_assign_pointer(hash.notrace_hash, ops->func_hash->notrace_hash);
|
||||
|
||||
if (hash_contains_ip(ip, &hash))
|
||||
ret = 1;
|
||||
@@ -2840,7 +2851,8 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
|
||||
* If there's no more ops registered with ftrace, run a
|
||||
* sanity check to make sure all rec flags are cleared.
|
||||
*/
|
||||
if (ftrace_ops_list == &ftrace_list_end) {
|
||||
if (rcu_dereference_protected(ftrace_ops_list,
|
||||
lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
|
||||
struct ftrace_page *pg;
|
||||
struct dyn_ftrace *rec;
|
||||
|
||||
@@ -6453,7 +6465,8 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
|
||||
if (ftrace_enabled) {
|
||||
|
||||
/* we are starting ftrace again */
|
||||
if (ftrace_ops_list != &ftrace_list_end)
|
||||
if (rcu_dereference_protected(ftrace_ops_list,
|
||||
lockdep_is_held(&ftrace_lock)) != &ftrace_list_end)
|
||||
update_ftrace_function();
|
||||
|
||||
ftrace_startup_sysctl();
|
||||
|
||||
@@ -1136,12 +1136,12 @@ static int __rb_allocate_pages(long nr_pages, struct list_head *pages, int cpu)
|
||||
for (i = 0; i < nr_pages; i++) {
|
||||
struct page *page;
|
||||
/*
|
||||
* __GFP_NORETRY flag makes sure that the allocation fails
|
||||
* gracefully without invoking oom-killer and the system is
|
||||
* not destabilized.
|
||||
* __GFP_RETRY_MAYFAIL flag makes sure that the allocation fails
|
||||
* gracefully without invoking oom-killer and the system is not
|
||||
* destabilized.
|
||||
*/
|
||||
bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
|
||||
GFP_KERNEL | __GFP_NORETRY,
|
||||
GFP_KERNEL | __GFP_RETRY_MAYFAIL,
|
||||
cpu_to_node(cpu));
|
||||
if (!bpage)
|
||||
goto free_pages;
|
||||
@@ -1149,7 +1149,7 @@ static int __rb_allocate_pages(long nr_pages, struct list_head *pages, int cpu)
|
||||
list_add(&bpage->list, pages);
|
||||
|
||||
page = alloc_pages_node(cpu_to_node(cpu),
|
||||
GFP_KERNEL | __GFP_NORETRY, 0);
|
||||
GFP_KERNEL | __GFP_RETRY_MAYFAIL, 0);
|
||||
if (!page)
|
||||
goto free_pages;
|
||||
bpage->page = page_address(page);
|
||||
|
||||
@@ -7774,6 +7774,7 @@ static int instance_rmdir(const char *name)
|
||||
}
|
||||
kfree(tr->topts);
|
||||
|
||||
free_cpumask_var(tr->tracing_cpumask);
|
||||
kfree(tr->name);
|
||||
kfree(tr);
|
||||
|
||||
|
||||
@@ -1210,9 +1210,9 @@ struct ftrace_event_field {
|
||||
struct event_filter {
|
||||
int n_preds; /* Number assigned */
|
||||
int a_preds; /* allocated */
|
||||
struct filter_pred *preds;
|
||||
struct filter_pred *root;
|
||||
char *filter_string;
|
||||
struct filter_pred __rcu *preds;
|
||||
struct filter_pred __rcu *root;
|
||||
char *filter_string;
|
||||
};
|
||||
|
||||
struct event_subsystem {
|
||||
|
||||
@@ -3577,6 +3577,13 @@ static bool wq_calc_node_cpumask(const struct workqueue_attrs *attrs, int node,
|
||||
|
||||
/* yeap, return possible CPUs in @node that @attrs wants */
|
||||
cpumask_and(cpumask, attrs->cpumask, wq_numa_possible_cpumask[node]);
|
||||
|
||||
if (cpumask_empty(cpumask)) {
|
||||
pr_warn_once("WARNING: workqueue cpumask: online intersect > "
|
||||
"possible intersect\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
return !cpumask_equal(cpumask, attrs->cpumask);
|
||||
|
||||
use_dfl:
|
||||
@@ -3744,8 +3751,12 @@ static int apply_workqueue_attrs_locked(struct workqueue_struct *wq,
|
||||
return -EINVAL;
|
||||
|
||||
/* creating multiple pwqs breaks ordering guarantee */
|
||||
if (WARN_ON((wq->flags & __WQ_ORDERED) && !list_empty(&wq->pwqs)))
|
||||
return -EINVAL;
|
||||
if (!list_empty(&wq->pwqs)) {
|
||||
if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
|
||||
return -EINVAL;
|
||||
|
||||
wq->flags &= ~__WQ_ORDERED;
|
||||
}
|
||||
|
||||
ctx = apply_wqattrs_prepare(wq, attrs);
|
||||
if (!ctx)
|
||||
@@ -3929,6 +3940,16 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
|
||||
struct workqueue_struct *wq;
|
||||
struct pool_workqueue *pwq;
|
||||
|
||||
/*
|
||||
* Unbound && max_active == 1 used to imply ordered, which is no
|
||||
* longer the case on NUMA machines due to per-node pools. While
|
||||
* alloc_ordered_workqueue() is the right way to create an ordered
|
||||
* workqueue, keep the previous behavior to avoid subtle breakages
|
||||
* on NUMA.
|
||||
*/
|
||||
if ((flags & WQ_UNBOUND) && max_active == 1)
|
||||
flags |= __WQ_ORDERED;
|
||||
|
||||
/* see the comment above the definition of WQ_POWER_EFFICIENT */
|
||||
if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient)
|
||||
flags |= WQ_UNBOUND;
|
||||
@@ -4119,13 +4140,14 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
|
||||
struct pool_workqueue *pwq;
|
||||
|
||||
/* disallow meddling with max_active for ordered workqueues */
|
||||
if (WARN_ON(wq->flags & __WQ_ORDERED))
|
||||
if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
|
||||
return;
|
||||
|
||||
max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
|
||||
|
||||
mutex_lock(&wq->mutex);
|
||||
|
||||
wq->flags &= ~__WQ_ORDERED;
|
||||
wq->saved_max_active = max_active;
|
||||
|
||||
for_each_pwq(pwq, wq)
|
||||
@@ -5253,7 +5275,7 @@ int workqueue_sysfs_register(struct workqueue_struct *wq)
|
||||
* attributes breaks ordering guarantee. Disallow exposing ordered
|
||||
* workqueues.
|
||||
*/
|
||||
if (WARN_ON(wq->flags & __WQ_ORDERED))
|
||||
if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
|
||||
return -EINVAL;
|
||||
|
||||
wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL);
|
||||
|
||||
Reference in New Issue
Block a user