Merge branch 'perf/urgent' into perf/core
Merge the latest fixes. Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
@@ -916,7 +916,7 @@ static int audit_tree_handle_event(struct fsnotify_group *group,
|
||||
struct fsnotify_mark *inode_mark,
|
||||
struct fsnotify_mark *vfsmount_mark,
|
||||
u32 mask, void *data, int data_type,
|
||||
const unsigned char *file_name)
|
||||
const unsigned char *file_name, u32 cookie)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -471,7 +471,7 @@ static int audit_watch_handle_event(struct fsnotify_group *group,
|
||||
struct fsnotify_mark *inode_mark,
|
||||
struct fsnotify_mark *vfsmount_mark,
|
||||
u32 mask, void *data, int data_type,
|
||||
const unsigned char *dname)
|
||||
const unsigned char *dname, u32 cookie)
|
||||
{
|
||||
struct inode *inode;
|
||||
struct audit_parent *parent;
|
||||
|
||||
@@ -974,12 +974,6 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
|
||||
* Temporarilly set tasks mems_allowed to target nodes of migration,
|
||||
* so that the migration code can allocate pages on these nodes.
|
||||
*
|
||||
* Call holding cpuset_mutex, so current's cpuset won't change
|
||||
* during this call, as manage_mutex holds off any cpuset_attach()
|
||||
* calls. Therefore we don't need to take task_lock around the
|
||||
* call to guarantee_online_mems(), as we know no one is changing
|
||||
* our task's cpuset.
|
||||
*
|
||||
* While the mm_struct we are migrating is typically from some
|
||||
* other task, the task_struct mems_allowed that we are hacking
|
||||
* is for our current task, which must allocate new pages for that
|
||||
@@ -996,8 +990,10 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
|
||||
|
||||
do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL);
|
||||
|
||||
rcu_read_lock();
|
||||
mems_cs = effective_nodemask_cpuset(task_cs(tsk));
|
||||
guarantee_online_mems(mems_cs, &tsk->mems_allowed);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -2486,9 +2482,9 @@ int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
|
||||
|
||||
task_lock(current);
|
||||
cs = nearest_hardwall_ancestor(task_cs(current));
|
||||
allowed = node_isset(node, cs->mems_allowed);
|
||||
task_unlock(current);
|
||||
|
||||
allowed = node_isset(node, cs->mems_allowed);
|
||||
mutex_unlock(&callback_mutex);
|
||||
return allowed;
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/topology.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
@@ -802,8 +802,7 @@ static irqreturn_t irq_thread_fn(struct irq_desc *desc,
|
||||
|
||||
static void wake_threads_waitq(struct irq_desc *desc)
|
||||
{
|
||||
if (atomic_dec_and_test(&desc->threads_active) &&
|
||||
waitqueue_active(&desc->wait_for_threads))
|
||||
if (atomic_dec_and_test(&desc->threads_active))
|
||||
wake_up(&desc->wait_for_threads);
|
||||
}
|
||||
|
||||
|
||||
@@ -549,14 +549,14 @@ static int create_hash_tables(void)
|
||||
struct page *page;
|
||||
|
||||
page = alloc_pages_exact_node(node,
|
||||
GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
|
||||
GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
|
||||
0);
|
||||
if (!page)
|
||||
goto out_cleanup;
|
||||
per_cpu(cpu_profile_hits, cpu)[1]
|
||||
= (struct profile_hit *)page_address(page);
|
||||
page = alloc_pages_exact_node(node,
|
||||
GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
|
||||
GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
|
||||
0);
|
||||
if (!page)
|
||||
goto out_cleanup;
|
||||
|
||||
@@ -70,7 +70,7 @@ static void cpudl_heapify(struct cpudl *cp, int idx)
|
||||
|
||||
static void cpudl_change_key(struct cpudl *cp, int idx, u64 new_dl)
|
||||
{
|
||||
WARN_ON(!cpu_present(idx) || idx == IDX_INVALID);
|
||||
WARN_ON(idx == IDX_INVALID || !cpu_present(idx));
|
||||
|
||||
if (dl_time_before(new_dl, cp->elements[idx].dl)) {
|
||||
cp->elements[idx].dl = new_dl;
|
||||
@@ -117,7 +117,7 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p,
|
||||
}
|
||||
|
||||
out:
|
||||
WARN_ON(!cpu_present(best_cpu) && best_cpu != -1);
|
||||
WARN_ON(best_cpu != -1 && !cpu_present(best_cpu));
|
||||
|
||||
return best_cpu;
|
||||
}
|
||||
|
||||
@@ -135,7 +135,6 @@ static void update_dl_migration(struct dl_rq *dl_rq)
|
||||
static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
|
||||
{
|
||||
struct task_struct *p = dl_task_of(dl_se);
|
||||
dl_rq = &rq_of_dl_rq(dl_rq)->dl;
|
||||
|
||||
if (p->nr_cpus_allowed > 1)
|
||||
dl_rq->dl_nr_migratory++;
|
||||
@@ -146,7 +145,6 @@ static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
|
||||
static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
|
||||
{
|
||||
struct task_struct *p = dl_task_of(dl_se);
|
||||
dl_rq = &rq_of_dl_rq(dl_rq)->dl;
|
||||
|
||||
if (p->nr_cpus_allowed > 1)
|
||||
dl_rq->dl_nr_migratory--;
|
||||
@@ -564,6 +562,8 @@ int dl_runtime_exceeded(struct rq *rq, struct sched_dl_entity *dl_se)
|
||||
return 1;
|
||||
}
|
||||
|
||||
extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
|
||||
|
||||
/*
|
||||
* Update the current task's runtime statistics (provided it is still
|
||||
* a -deadline task and has not been removed from the dl_rq).
|
||||
@@ -627,11 +627,13 @@ static void update_curr_dl(struct rq *rq)
|
||||
struct rt_rq *rt_rq = &rq->rt;
|
||||
|
||||
raw_spin_lock(&rt_rq->rt_runtime_lock);
|
||||
rt_rq->rt_time += delta_exec;
|
||||
/*
|
||||
* We'll let actual RT tasks worry about the overflow here, we
|
||||
* have our own CBS to keep us inline -- see above.
|
||||
* have our own CBS to keep us inline; only account when RT
|
||||
* bandwidth is relevant.
|
||||
*/
|
||||
if (sched_rt_bandwidth_account(rt_rq))
|
||||
rt_rq->rt_time += delta_exec;
|
||||
raw_spin_unlock(&rt_rq->rt_runtime_lock);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7001,15 +7001,15 @@ static void switched_from_fair(struct rq *rq, struct task_struct *p)
|
||||
struct cfs_rq *cfs_rq = cfs_rq_of(se);
|
||||
|
||||
/*
|
||||
* Ensure the task's vruntime is normalized, so that when its
|
||||
* Ensure the task's vruntime is normalized, so that when it's
|
||||
* switched back to the fair class the enqueue_entity(.flags=0) will
|
||||
* do the right thing.
|
||||
*
|
||||
* If it was on_rq, then the dequeue_entity(.flags=0) will already
|
||||
* have normalized the vruntime, if it was !on_rq, then only when
|
||||
* If it's on_rq, then the dequeue_entity(.flags=0) will already
|
||||
* have normalized the vruntime, if it's !on_rq, then only when
|
||||
* the task is sleeping will it still have non-normalized vruntime.
|
||||
*/
|
||||
if (!se->on_rq && p->state != TASK_RUNNING) {
|
||||
if (!p->on_rq && p->state != TASK_RUNNING) {
|
||||
/*
|
||||
* Fix up our vruntime so that the current sleep doesn't
|
||||
* cause 'unlimited' sleep bonus.
|
||||
|
||||
@@ -538,6 +538,14 @@ static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
|
||||
|
||||
#endif /* CONFIG_RT_GROUP_SCHED */
|
||||
|
||||
bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
|
||||
{
|
||||
struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
|
||||
|
||||
return (hrtimer_active(&rt_b->rt_period_timer) ||
|
||||
rt_rq->rt_time < rt_b->rt_runtime);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/*
|
||||
* We ran out of runtime, see if we can borrow some from our neighbours.
|
||||
|
||||
@@ -1777,6 +1777,16 @@ static void trace_module_add_events(struct module *mod)
|
||||
{
|
||||
struct ftrace_event_call **call, **start, **end;
|
||||
|
||||
if (!mod->num_trace_events)
|
||||
return;
|
||||
|
||||
/* Don't add infrastructure for mods without tracepoints */
|
||||
if (trace_module_has_bad_taint(mod)) {
|
||||
pr_err("%s: module has bad taint, not creating trace events\n",
|
||||
mod->name);
|
||||
return;
|
||||
}
|
||||
|
||||
start = mod->trace_events;
|
||||
end = mod->trace_events + mod->num_trace_events;
|
||||
|
||||
|
||||
@@ -631,6 +631,11 @@ void tracepoint_iter_reset(struct tracepoint_iter *iter)
|
||||
EXPORT_SYMBOL_GPL(tracepoint_iter_reset);
|
||||
|
||||
#ifdef CONFIG_MODULES
|
||||
bool trace_module_has_bad_taint(struct module *mod)
|
||||
{
|
||||
return mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP));
|
||||
}
|
||||
|
||||
static int tracepoint_module_coming(struct module *mod)
|
||||
{
|
||||
struct tp_module *tp_mod, *iter;
|
||||
@@ -641,7 +646,7 @@ static int tracepoint_module_coming(struct module *mod)
|
||||
* module headers (for forced load), to make sure we don't cause a crash.
|
||||
* Staging and out-of-tree GPL modules are fine.
|
||||
*/
|
||||
if (mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP)))
|
||||
if (trace_module_has_bad_taint(mod))
|
||||
return 0;
|
||||
mutex_lock(&tracepoints_mutex);
|
||||
tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL);
|
||||
|
||||
Reference in New Issue
Block a user