Merge branch 'linus' into sched/core
Merge reason: we want to queue up dependent cleanup Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
@@ -914,6 +914,15 @@ NORET_TYPE void do_exit(long code)
|
||||
if (unlikely(!tsk->pid))
|
||||
panic("Attempted to kill the idle task!");
|
||||
|
||||
/*
|
||||
* If do_exit is called because this processes oopsed, it's possible
|
||||
* that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
|
||||
* continuing. Amongst other possible reasons, this is to prevent
|
||||
* mm_release()->clear_child_tid() from writing to a user-controlled
|
||||
* kernel address.
|
||||
*/
|
||||
set_fs(USER_DS);
|
||||
|
||||
tracehook_report_exit(&code);
|
||||
|
||||
validate_creds_for_do_exit(tsk);
|
||||
|
||||
@@ -620,7 +620,7 @@ static struct pmu perf_breakpoint = {
|
||||
.read = hw_breakpoint_pmu_read,
|
||||
};
|
||||
|
||||
static int __init init_hw_breakpoint(void)
|
||||
int __init init_hw_breakpoint(void)
|
||||
{
|
||||
unsigned int **task_bp_pinned;
|
||||
int cpu, err_cpu;
|
||||
@@ -655,6 +655,5 @@ static int __init init_hw_breakpoint(void)
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
||||
core_initcall(init_hw_breakpoint);
|
||||
|
||||
|
||||
|
||||
@@ -214,7 +214,7 @@ static int irq_spurious_proc_show(struct seq_file *m, void *v)
|
||||
|
||||
static int irq_spurious_proc_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, irq_spurious_proc_show, NULL);
|
||||
return single_open(file, irq_spurious_proc_show, PDE(inode)->data);
|
||||
}
|
||||
|
||||
static const struct file_operations irq_spurious_proc_fops = {
|
||||
|
||||
@@ -145,7 +145,9 @@ void irq_work_run(void)
|
||||
* Clear the BUSY bit and return to the free state if
|
||||
* no-one else claimed it meanwhile.
|
||||
*/
|
||||
cmpxchg(&entry->next, next_flags(NULL, IRQ_WORK_BUSY), NULL);
|
||||
(void)cmpxchg(&entry->next,
|
||||
next_flags(NULL, IRQ_WORK_BUSY),
|
||||
NULL);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(irq_work_run);
|
||||
|
||||
@@ -2326,6 +2326,18 @@ static void find_module_sections(struct module *mod, struct load_info *info)
|
||||
kmemleak_scan_area(mod->trace_events, sizeof(*mod->trace_events) *
|
||||
mod->num_trace_events, GFP_KERNEL);
|
||||
#endif
|
||||
#ifdef CONFIG_TRACING
|
||||
mod->trace_bprintk_fmt_start = section_objs(info, "__trace_printk_fmt",
|
||||
sizeof(*mod->trace_bprintk_fmt_start),
|
||||
&mod->num_trace_bprintk_fmt);
|
||||
/*
|
||||
* This section contains pointers to allocated objects in the trace
|
||||
* code and not scanning it leads to false positives.
|
||||
*/
|
||||
kmemleak_scan_area(mod->trace_bprintk_fmt_start,
|
||||
sizeof(*mod->trace_bprintk_fmt_start) *
|
||||
mod->num_trace_bprintk_fmt, GFP_KERNEL);
|
||||
#endif
|
||||
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
|
||||
/* sechdrs[0].sh_size is always zero */
|
||||
mod->ftrace_callsites = section_objs(info, "__mcount_loc",
|
||||
|
||||
@@ -31,6 +31,7 @@
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/ftrace_event.h>
|
||||
#include <linux/hw_breakpoint.h>
|
||||
|
||||
#include <asm/irq_regs.h>
|
||||
|
||||
@@ -1286,8 +1287,6 @@ void __perf_event_task_sched_out(struct task_struct *task,
|
||||
{
|
||||
int ctxn;
|
||||
|
||||
perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0);
|
||||
|
||||
for_each_task_context_nr(ctxn)
|
||||
perf_event_context_sched_out(task, ctxn, next);
|
||||
}
|
||||
@@ -1621,8 +1620,12 @@ static void rotate_ctx(struct perf_event_context *ctx)
|
||||
{
|
||||
raw_spin_lock(&ctx->lock);
|
||||
|
||||
/* Rotate the first entry last of non-pinned groups */
|
||||
list_rotate_left(&ctx->flexible_groups);
|
||||
/*
|
||||
* Rotate the first entry last of non-pinned groups. Rotation might be
|
||||
* disabled by the inheritance code.
|
||||
*/
|
||||
if (!ctx->rotate_disable)
|
||||
list_rotate_left(&ctx->flexible_groups);
|
||||
|
||||
raw_spin_unlock(&ctx->lock);
|
||||
}
|
||||
@@ -2234,11 +2237,6 @@ int perf_event_release_kernel(struct perf_event *event)
|
||||
raw_spin_unlock_irq(&ctx->lock);
|
||||
mutex_unlock(&ctx->mutex);
|
||||
|
||||
mutex_lock(&event->owner->perf_event_mutex);
|
||||
list_del_init(&event->owner_entry);
|
||||
mutex_unlock(&event->owner->perf_event_mutex);
|
||||
put_task_struct(event->owner);
|
||||
|
||||
free_event(event);
|
||||
|
||||
return 0;
|
||||
@@ -2251,9 +2249,43 @@ EXPORT_SYMBOL_GPL(perf_event_release_kernel);
|
||||
static int perf_release(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct perf_event *event = file->private_data;
|
||||
struct task_struct *owner;
|
||||
|
||||
file->private_data = NULL;
|
||||
|
||||
rcu_read_lock();
|
||||
owner = ACCESS_ONCE(event->owner);
|
||||
/*
|
||||
* Matches the smp_wmb() in perf_event_exit_task(). If we observe
|
||||
* !owner it means the list deletion is complete and we can indeed
|
||||
* free this event, otherwise we need to serialize on
|
||||
* owner->perf_event_mutex.
|
||||
*/
|
||||
smp_read_barrier_depends();
|
||||
if (owner) {
|
||||
/*
|
||||
* Since delayed_put_task_struct() also drops the last
|
||||
* task reference we can safely take a new reference
|
||||
* while holding the rcu_read_lock().
|
||||
*/
|
||||
get_task_struct(owner);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
if (owner) {
|
||||
mutex_lock(&owner->perf_event_mutex);
|
||||
/*
|
||||
* We have to re-check the event->owner field, if it is cleared
|
||||
* we raced with perf_event_exit_task(), acquiring the mutex
|
||||
* ensured they're done, and we can proceed with freeing the
|
||||
* event.
|
||||
*/
|
||||
if (event->owner)
|
||||
list_del_init(&event->owner_entry);
|
||||
mutex_unlock(&owner->perf_event_mutex);
|
||||
put_task_struct(owner);
|
||||
}
|
||||
|
||||
return perf_event_release_kernel(event);
|
||||
}
|
||||
|
||||
@@ -5677,7 +5709,7 @@ SYSCALL_DEFINE5(perf_event_open,
|
||||
mutex_unlock(&ctx->mutex);
|
||||
|
||||
event->owner = current;
|
||||
get_task_struct(current);
|
||||
|
||||
mutex_lock(¤t->perf_event_mutex);
|
||||
list_add_tail(&event->owner_entry, ¤t->perf_event_list);
|
||||
mutex_unlock(¤t->perf_event_mutex);
|
||||
@@ -5745,12 +5777,6 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
|
||||
++ctx->generation;
|
||||
mutex_unlock(&ctx->mutex);
|
||||
|
||||
event->owner = current;
|
||||
get_task_struct(current);
|
||||
mutex_lock(¤t->perf_event_mutex);
|
||||
list_add_tail(&event->owner_entry, ¤t->perf_event_list);
|
||||
mutex_unlock(¤t->perf_event_mutex);
|
||||
|
||||
return event;
|
||||
|
||||
err_free:
|
||||
@@ -5901,8 +5927,24 @@ again:
|
||||
*/
|
||||
void perf_event_exit_task(struct task_struct *child)
|
||||
{
|
||||
struct perf_event *event, *tmp;
|
||||
int ctxn;
|
||||
|
||||
mutex_lock(&child->perf_event_mutex);
|
||||
list_for_each_entry_safe(event, tmp, &child->perf_event_list,
|
||||
owner_entry) {
|
||||
list_del_init(&event->owner_entry);
|
||||
|
||||
/*
|
||||
* Ensure the list deletion is visible before we clear
|
||||
* the owner, closes a race against perf_release() where
|
||||
* we need to serialize on the owner->perf_event_mutex.
|
||||
*/
|
||||
smp_wmb();
|
||||
event->owner = NULL;
|
||||
}
|
||||
mutex_unlock(&child->perf_event_mutex);
|
||||
|
||||
for_each_task_context_nr(ctxn)
|
||||
perf_event_exit_task_context(child, ctxn);
|
||||
}
|
||||
@@ -6122,6 +6164,7 @@ int perf_event_init_context(struct task_struct *child, int ctxn)
|
||||
struct perf_event *event;
|
||||
struct task_struct *parent = current;
|
||||
int inherited_all = 1;
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
|
||||
child->perf_event_ctxp[ctxn] = NULL;
|
||||
@@ -6162,6 +6205,15 @@ int perf_event_init_context(struct task_struct *child, int ctxn)
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* We can't hold ctx->lock when iterating the ->flexible_group list due
|
||||
* to allocations, but we need to prevent rotation because
|
||||
* rotate_ctx() will change the list from interrupt context.
|
||||
*/
|
||||
raw_spin_lock_irqsave(&parent_ctx->lock, flags);
|
||||
parent_ctx->rotate_disable = 1;
|
||||
raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
|
||||
|
||||
list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
|
||||
ret = inherit_task_group(event, parent, parent_ctx,
|
||||
child, ctxn, &inherited_all);
|
||||
@@ -6169,6 +6221,10 @@ int perf_event_init_context(struct task_struct *child, int ctxn)
|
||||
break;
|
||||
}
|
||||
|
||||
raw_spin_lock_irqsave(&parent_ctx->lock, flags);
|
||||
parent_ctx->rotate_disable = 0;
|
||||
raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
|
||||
|
||||
child_ctx = child->perf_event_ctxp[ctxn];
|
||||
|
||||
if (child_ctx && inherited_all) {
|
||||
@@ -6321,6 +6377,8 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
|
||||
|
||||
void __init perf_event_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
perf_event_init_all_cpus();
|
||||
init_srcu_struct(&pmus_srcu);
|
||||
perf_pmu_register(&perf_swevent);
|
||||
@@ -6328,4 +6386,7 @@ void __init perf_event_init(void)
|
||||
perf_pmu_register(&perf_task_clock);
|
||||
perf_tp_register();
|
||||
perf_cpu_notifier(perf_cpu_notify);
|
||||
|
||||
ret = init_hw_breakpoint();
|
||||
WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
|
||||
}
|
||||
|
||||
@@ -37,13 +37,13 @@ static int check_clock(const clockid_t which_clock)
|
||||
if (pid == 0)
|
||||
return 0;
|
||||
|
||||
read_lock(&tasklist_lock);
|
||||
rcu_read_lock();
|
||||
p = find_task_by_vpid(pid);
|
||||
if (!p || !(CPUCLOCK_PERTHREAD(which_clock) ?
|
||||
same_thread_group(p, current) : thread_group_leader(p))) {
|
||||
same_thread_group(p, current) : has_group_leader_pid(p))) {
|
||||
error = -EINVAL;
|
||||
}
|
||||
read_unlock(&tasklist_lock);
|
||||
rcu_read_unlock();
|
||||
|
||||
return error;
|
||||
}
|
||||
@@ -390,7 +390,7 @@ int posix_cpu_timer_create(struct k_itimer *new_timer)
|
||||
|
||||
INIT_LIST_HEAD(&new_timer->it.cpu.entry);
|
||||
|
||||
read_lock(&tasklist_lock);
|
||||
rcu_read_lock();
|
||||
if (CPUCLOCK_PERTHREAD(new_timer->it_clock)) {
|
||||
if (pid == 0) {
|
||||
p = current;
|
||||
@@ -404,7 +404,7 @@ int posix_cpu_timer_create(struct k_itimer *new_timer)
|
||||
p = current->group_leader;
|
||||
} else {
|
||||
p = find_task_by_vpid(pid);
|
||||
if (p && !thread_group_leader(p))
|
||||
if (p && !has_group_leader_pid(p))
|
||||
p = NULL;
|
||||
}
|
||||
}
|
||||
@@ -414,7 +414,7 @@ int posix_cpu_timer_create(struct k_itimer *new_timer)
|
||||
} else {
|
||||
ret = -EINVAL;
|
||||
}
|
||||
read_unlock(&tasklist_lock);
|
||||
rcu_read_unlock();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -327,7 +327,6 @@ static int create_image(int platform_mode)
|
||||
int hibernation_snapshot(int platform_mode)
|
||||
{
|
||||
int error;
|
||||
gfp_t saved_mask;
|
||||
|
||||
error = platform_begin(platform_mode);
|
||||
if (error)
|
||||
@@ -339,7 +338,7 @@ int hibernation_snapshot(int platform_mode)
|
||||
goto Close;
|
||||
|
||||
suspend_console();
|
||||
saved_mask = clear_gfp_allowed_mask(GFP_IOFS);
|
||||
pm_restrict_gfp_mask();
|
||||
error = dpm_suspend_start(PMSG_FREEZE);
|
||||
if (error)
|
||||
goto Recover_platform;
|
||||
@@ -348,7 +347,10 @@ int hibernation_snapshot(int platform_mode)
|
||||
goto Recover_platform;
|
||||
|
||||
error = create_image(platform_mode);
|
||||
/* Control returns here after successful restore */
|
||||
/*
|
||||
* Control returns here (1) after the image has been created or the
|
||||
* image creation has failed and (2) after a successful restore.
|
||||
*/
|
||||
|
||||
Resume_devices:
|
||||
/* We may need to release the preallocated image pages here. */
|
||||
@@ -357,7 +359,10 @@ int hibernation_snapshot(int platform_mode)
|
||||
|
||||
dpm_resume_end(in_suspend ?
|
||||
(error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE);
|
||||
set_gfp_allowed_mask(saved_mask);
|
||||
|
||||
if (error || !in_suspend)
|
||||
pm_restore_gfp_mask();
|
||||
|
||||
resume_console();
|
||||
Close:
|
||||
platform_end(platform_mode);
|
||||
@@ -452,17 +457,16 @@ static int resume_target_kernel(bool platform_mode)
|
||||
int hibernation_restore(int platform_mode)
|
||||
{
|
||||
int error;
|
||||
gfp_t saved_mask;
|
||||
|
||||
pm_prepare_console();
|
||||
suspend_console();
|
||||
saved_mask = clear_gfp_allowed_mask(GFP_IOFS);
|
||||
pm_restrict_gfp_mask();
|
||||
error = dpm_suspend_start(PMSG_QUIESCE);
|
||||
if (!error) {
|
||||
error = resume_target_kernel(platform_mode);
|
||||
dpm_resume_end(PMSG_RECOVER);
|
||||
}
|
||||
set_gfp_allowed_mask(saved_mask);
|
||||
pm_restore_gfp_mask();
|
||||
resume_console();
|
||||
pm_restore_console();
|
||||
return error;
|
||||
@@ -476,7 +480,6 @@ int hibernation_restore(int platform_mode)
|
||||
int hibernation_platform_enter(void)
|
||||
{
|
||||
int error;
|
||||
gfp_t saved_mask;
|
||||
|
||||
if (!hibernation_ops)
|
||||
return -ENOSYS;
|
||||
@@ -492,7 +495,6 @@ int hibernation_platform_enter(void)
|
||||
|
||||
entering_platform_hibernation = true;
|
||||
suspend_console();
|
||||
saved_mask = clear_gfp_allowed_mask(GFP_IOFS);
|
||||
error = dpm_suspend_start(PMSG_HIBERNATE);
|
||||
if (error) {
|
||||
if (hibernation_ops->recover)
|
||||
@@ -536,7 +538,6 @@ int hibernation_platform_enter(void)
|
||||
Resume_devices:
|
||||
entering_platform_hibernation = false;
|
||||
dpm_resume_end(PMSG_RESTORE);
|
||||
set_gfp_allowed_mask(saved_mask);
|
||||
resume_console();
|
||||
|
||||
Close:
|
||||
@@ -646,6 +647,7 @@ int hibernate(void)
|
||||
swsusp_free();
|
||||
if (!error)
|
||||
power_down();
|
||||
pm_restore_gfp_mask();
|
||||
} else {
|
||||
pr_debug("PM: Image restored successfully.\n");
|
||||
}
|
||||
|
||||
@@ -197,7 +197,6 @@ static int suspend_enter(suspend_state_t state)
|
||||
int suspend_devices_and_enter(suspend_state_t state)
|
||||
{
|
||||
int error;
|
||||
gfp_t saved_mask;
|
||||
|
||||
if (!suspend_ops)
|
||||
return -ENOSYS;
|
||||
@@ -208,7 +207,7 @@ int suspend_devices_and_enter(suspend_state_t state)
|
||||
goto Close;
|
||||
}
|
||||
suspend_console();
|
||||
saved_mask = clear_gfp_allowed_mask(GFP_IOFS);
|
||||
pm_restrict_gfp_mask();
|
||||
suspend_test_start();
|
||||
error = dpm_suspend_start(PMSG_SUSPEND);
|
||||
if (error) {
|
||||
@@ -225,7 +224,7 @@ int suspend_devices_and_enter(suspend_state_t state)
|
||||
suspend_test_start();
|
||||
dpm_resume_end(PMSG_RESUME);
|
||||
suspend_test_finish("resume devices");
|
||||
set_gfp_allowed_mask(saved_mask);
|
||||
pm_restore_gfp_mask();
|
||||
resume_console();
|
||||
Close:
|
||||
if (suspend_ops->end)
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
*
|
||||
* Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz>
|
||||
* Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
|
||||
* Copyright (C) 2010 Bojan Smojver <bojan@rexursive.com>
|
||||
*
|
||||
* This file is released under the GPLv2.
|
||||
*
|
||||
@@ -753,30 +754,43 @@ static int load_image_lzo(struct swap_map_handle *handle,
|
||||
{
|
||||
unsigned int m;
|
||||
int error = 0;
|
||||
struct bio *bio;
|
||||
struct timeval start;
|
||||
struct timeval stop;
|
||||
unsigned nr_pages;
|
||||
size_t off, unc_len, cmp_len;
|
||||
unsigned char *unc, *cmp, *page;
|
||||
size_t i, off, unc_len, cmp_len;
|
||||
unsigned char *unc, *cmp, *page[LZO_CMP_PAGES];
|
||||
|
||||
page = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH);
|
||||
if (!page) {
|
||||
printk(KERN_ERR "PM: Failed to allocate LZO page\n");
|
||||
return -ENOMEM;
|
||||
for (i = 0; i < LZO_CMP_PAGES; i++) {
|
||||
page[i] = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH);
|
||||
if (!page[i]) {
|
||||
printk(KERN_ERR "PM: Failed to allocate LZO page\n");
|
||||
|
||||
while (i)
|
||||
free_page((unsigned long)page[--i]);
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
unc = vmalloc(LZO_UNC_SIZE);
|
||||
if (!unc) {
|
||||
printk(KERN_ERR "PM: Failed to allocate LZO uncompressed\n");
|
||||
free_page((unsigned long)page);
|
||||
|
||||
for (i = 0; i < LZO_CMP_PAGES; i++)
|
||||
free_page((unsigned long)page[i]);
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
cmp = vmalloc(LZO_CMP_SIZE);
|
||||
if (!cmp) {
|
||||
printk(KERN_ERR "PM: Failed to allocate LZO compressed\n");
|
||||
|
||||
vfree(unc);
|
||||
free_page((unsigned long)page);
|
||||
for (i = 0; i < LZO_CMP_PAGES; i++)
|
||||
free_page((unsigned long)page[i]);
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@@ -787,6 +801,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
|
||||
if (!m)
|
||||
m = 1;
|
||||
nr_pages = 0;
|
||||
bio = NULL;
|
||||
do_gettimeofday(&start);
|
||||
|
||||
error = snapshot_write_next(snapshot);
|
||||
@@ -794,11 +809,11 @@ static int load_image_lzo(struct swap_map_handle *handle,
|
||||
goto out_finish;
|
||||
|
||||
for (;;) {
|
||||
error = swap_read_page(handle, page, NULL); /* sync */
|
||||
error = swap_read_page(handle, page[0], NULL); /* sync */
|
||||
if (error)
|
||||
break;
|
||||
|
||||
cmp_len = *(size_t *)page;
|
||||
cmp_len = *(size_t *)page[0];
|
||||
if (unlikely(!cmp_len ||
|
||||
cmp_len > lzo1x_worst_compress(LZO_UNC_SIZE))) {
|
||||
printk(KERN_ERR "PM: Invalid LZO compressed length\n");
|
||||
@@ -806,13 +821,20 @@ static int load_image_lzo(struct swap_map_handle *handle,
|
||||
break;
|
||||
}
|
||||
|
||||
memcpy(cmp, page, PAGE_SIZE);
|
||||
for (off = PAGE_SIZE; off < LZO_HEADER + cmp_len; off += PAGE_SIZE) {
|
||||
error = swap_read_page(handle, page, NULL); /* sync */
|
||||
for (off = PAGE_SIZE, i = 1;
|
||||
off < LZO_HEADER + cmp_len; off += PAGE_SIZE, i++) {
|
||||
error = swap_read_page(handle, page[i], &bio);
|
||||
if (error)
|
||||
goto out_finish;
|
||||
}
|
||||
|
||||
memcpy(cmp + off, page, PAGE_SIZE);
|
||||
error = hib_wait_on_bio_chain(&bio); /* need all data now */
|
||||
if (error)
|
||||
goto out_finish;
|
||||
|
||||
for (off = 0, i = 0;
|
||||
off < LZO_HEADER + cmp_len; off += PAGE_SIZE, i++) {
|
||||
memcpy(cmp + off, page[i], PAGE_SIZE);
|
||||
}
|
||||
|
||||
unc_len = LZO_UNC_SIZE;
|
||||
@@ -857,7 +879,8 @@ out_finish:
|
||||
|
||||
vfree(cmp);
|
||||
vfree(unc);
|
||||
free_page((unsigned long)page);
|
||||
for (i = 0; i < LZO_CMP_PAGES; i++)
|
||||
free_page((unsigned long)page[i]);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
@@ -263,6 +263,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
|
||||
case SNAPSHOT_UNFREEZE:
|
||||
if (!data->frozen || data->ready)
|
||||
break;
|
||||
pm_restore_gfp_mask();
|
||||
thaw_processes();
|
||||
usermodehelper_enable();
|
||||
data->frozen = 0;
|
||||
@@ -275,6 +276,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
|
||||
error = -EPERM;
|
||||
break;
|
||||
}
|
||||
pm_restore_gfp_mask();
|
||||
error = hibernation_snapshot(data->platform_support);
|
||||
if (!error)
|
||||
error = put_user(in_suspend, (int __user *)arg);
|
||||
|
||||
@@ -1082,13 +1082,15 @@ void printk_tick(void)
|
||||
|
||||
int printk_needs_cpu(int cpu)
|
||||
{
|
||||
if (unlikely(cpu_is_offline(cpu)))
|
||||
printk_tick();
|
||||
return per_cpu(printk_pending, cpu);
|
||||
}
|
||||
|
||||
void wake_up_klogd(void)
|
||||
{
|
||||
if (waitqueue_active(&log_wait))
|
||||
__raw_get_cpu_var(printk_pending) = 1;
|
||||
this_cpu_write(printk_pending, 1);
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -1903,10 +1903,6 @@ static void pull_task(struct rq *src_rq, struct task_struct *p,
|
||||
set_task_cpu(p, this_cpu);
|
||||
activate_task(this_rq, p, 0);
|
||||
check_preempt_curr(this_rq, p, 0);
|
||||
|
||||
/* re-arm NEWIDLE balancing when moving tasks */
|
||||
src_rq->avg_idle = this_rq->avg_idle = 2*sysctl_sched_migration_cost;
|
||||
this_rq->idle_stamp = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -3408,8 +3404,10 @@ static void idle_balance(int this_cpu, struct rq *this_rq)
|
||||
interval = msecs_to_jiffies(sd->balance_interval);
|
||||
if (time_after(next_balance, sd->last_balance + interval))
|
||||
next_balance = sd->last_balance + interval;
|
||||
if (pulled_task)
|
||||
if (pulled_task) {
|
||||
this_rq->idle_stamp = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
raw_spin_lock(&this_rq->lock);
|
||||
|
||||
@@ -1283,6 +1283,8 @@ void trace_dump_stack(void)
|
||||
__ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count());
|
||||
}
|
||||
|
||||
static DEFINE_PER_CPU(int, user_stack_count);
|
||||
|
||||
void
|
||||
ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
|
||||
{
|
||||
@@ -1301,6 +1303,18 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
|
||||
if (unlikely(in_nmi()))
|
||||
return;
|
||||
|
||||
/*
|
||||
* prevent recursion, since the user stack tracing may
|
||||
* trigger other kernel events.
|
||||
*/
|
||||
preempt_disable();
|
||||
if (__this_cpu_read(user_stack_count))
|
||||
goto out;
|
||||
|
||||
__this_cpu_inc(user_stack_count);
|
||||
|
||||
|
||||
|
||||
event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
|
||||
sizeof(*entry), flags, pc);
|
||||
if (!event)
|
||||
@@ -1318,6 +1332,11 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
|
||||
save_stack_trace_user(&trace);
|
||||
if (!filter_check_discard(call, entry, buffer, event))
|
||||
ring_buffer_unlock_commit(buffer, event);
|
||||
|
||||
__this_cpu_dec(user_stack_count);
|
||||
|
||||
out:
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
#ifdef UNUSED
|
||||
|
||||
Reference in New Issue
Block a user