Merge tag 'v3.12-rc4' into sched/core
Merge Linux v3.12-rc4 to fix a conflict and also to refresh the tree before applying more scheduler patches. Conflicts: arch/avr32/include/asm/Kbuild Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
@@ -1117,9 +1117,10 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
|
||||
|
||||
sleep_time = timeout_start + audit_backlog_wait_time -
|
||||
jiffies;
|
||||
if ((long)sleep_time > 0)
|
||||
if ((long)sleep_time > 0) {
|
||||
wait_for_auditd(sleep_time);
|
||||
continue;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if (audit_rate_check() && printk_ratelimit())
|
||||
printk(KERN_WARNING
|
||||
|
||||
@@ -50,6 +50,15 @@ void context_tracking_user_enter(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
* Repeat the user_enter() check here because some archs may be calling
|
||||
* this from asm and if no CPU needs context tracking, they shouldn't
|
||||
* go further. Repeat the check here until they support the static key
|
||||
* check.
|
||||
*/
|
||||
if (!static_key_false(&context_tracking_enabled))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Some contexts may involve an exception occuring in an irq,
|
||||
* leading to that nesting:
|
||||
@@ -151,6 +160,9 @@ void context_tracking_user_exit(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (!static_key_false(&context_tracking_enabled))
|
||||
return;
|
||||
|
||||
if (in_interrupt())
|
||||
return;
|
||||
|
||||
|
||||
@@ -3660,6 +3660,26 @@ static void calc_timer_values(struct perf_event *event,
|
||||
*running = ctx_time - event->tstamp_running;
|
||||
}
|
||||
|
||||
static void perf_event_init_userpage(struct perf_event *event)
|
||||
{
|
||||
struct perf_event_mmap_page *userpg;
|
||||
struct ring_buffer *rb;
|
||||
|
||||
rcu_read_lock();
|
||||
rb = rcu_dereference(event->rb);
|
||||
if (!rb)
|
||||
goto unlock;
|
||||
|
||||
userpg = rb->user_page;
|
||||
|
||||
/* Allow new userspace to detect that bit 0 is deprecated */
|
||||
userpg->cap_bit0_is_deprecated = 1;
|
||||
userpg->size = offsetof(struct perf_event_mmap_page, __reserved);
|
||||
|
||||
unlock:
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
void __weak arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
|
||||
{
|
||||
}
|
||||
@@ -4044,6 +4064,7 @@ again:
|
||||
ring_buffer_attach(event, rb);
|
||||
rcu_assign_pointer(event->rb, rb);
|
||||
|
||||
perf_event_init_userpage(event);
|
||||
perf_event_update_userpage(event);
|
||||
|
||||
unlock:
|
||||
|
||||
@@ -571,6 +571,10 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait)
|
||||
DECLARE_COMPLETION_ONSTACK(done);
|
||||
int retval = 0;
|
||||
|
||||
if (!sub_info->path) {
|
||||
call_usermodehelper_freeinfo(sub_info);
|
||||
return -EINVAL;
|
||||
}
|
||||
helper_lock();
|
||||
if (!khelper_wq || usermodehelper_disabled) {
|
||||
retval = -EBUSY;
|
||||
|
||||
@@ -254,11 +254,11 @@ int parse_args(const char *doing,
|
||||
|
||||
|
||||
STANDARD_PARAM_DEF(byte, unsigned char, "%hhu", unsigned long, kstrtoul);
|
||||
STANDARD_PARAM_DEF(short, short, "%hi", long, kstrtoul);
|
||||
STANDARD_PARAM_DEF(short, short, "%hi", long, kstrtol);
|
||||
STANDARD_PARAM_DEF(ushort, unsigned short, "%hu", unsigned long, kstrtoul);
|
||||
STANDARD_PARAM_DEF(int, int, "%i", long, kstrtoul);
|
||||
STANDARD_PARAM_DEF(int, int, "%i", long, kstrtol);
|
||||
STANDARD_PARAM_DEF(uint, unsigned int, "%u", unsigned long, kstrtoul);
|
||||
STANDARD_PARAM_DEF(long, long, "%li", long, kstrtoul);
|
||||
STANDARD_PARAM_DEF(long, long, "%li", long, kstrtol);
|
||||
STANDARD_PARAM_DEF(ulong, unsigned long, "%lu", unsigned long, kstrtoul);
|
||||
|
||||
int param_set_charp(const char *val, const struct kernel_param *kp)
|
||||
|
||||
@@ -273,6 +273,11 @@ void free_pid(struct pid *pid)
|
||||
*/
|
||||
wake_up_process(ns->child_reaper);
|
||||
break;
|
||||
case PIDNS_HASH_ADDING:
|
||||
/* Handle a fork failure of the first process */
|
||||
WARN_ON(ns->child_reaper);
|
||||
ns->nr_hashed = 0;
|
||||
/* fall through */
|
||||
case 0:
|
||||
schedule_work(&ns->proc_work);
|
||||
break;
|
||||
|
||||
@@ -743,7 +743,10 @@ int create_basic_memory_bitmaps(void)
|
||||
struct memory_bitmap *bm1, *bm2;
|
||||
int error = 0;
|
||||
|
||||
BUG_ON(forbidden_pages_map || free_pages_map);
|
||||
if (forbidden_pages_map && free_pages_map)
|
||||
return 0;
|
||||
else
|
||||
BUG_ON(forbidden_pages_map || free_pages_map);
|
||||
|
||||
bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
|
||||
if (!bm1)
|
||||
|
||||
@@ -39,6 +39,7 @@ static struct snapshot_data {
|
||||
char frozen;
|
||||
char ready;
|
||||
char platform_support;
|
||||
bool free_bitmaps;
|
||||
} snapshot_state;
|
||||
|
||||
atomic_t snapshot_device_available = ATOMIC_INIT(1);
|
||||
@@ -82,6 +83,10 @@ static int snapshot_open(struct inode *inode, struct file *filp)
|
||||
data->swap = -1;
|
||||
data->mode = O_WRONLY;
|
||||
error = pm_notifier_call_chain(PM_RESTORE_PREPARE);
|
||||
if (!error) {
|
||||
error = create_basic_memory_bitmaps();
|
||||
data->free_bitmaps = !error;
|
||||
}
|
||||
if (error)
|
||||
pm_notifier_call_chain(PM_POST_RESTORE);
|
||||
}
|
||||
@@ -111,6 +116,8 @@ static int snapshot_release(struct inode *inode, struct file *filp)
|
||||
pm_restore_gfp_mask();
|
||||
free_basic_memory_bitmaps();
|
||||
thaw_processes();
|
||||
} else if (data->free_bitmaps) {
|
||||
free_basic_memory_bitmaps();
|
||||
}
|
||||
pm_notifier_call_chain(data->mode == O_RDONLY ?
|
||||
PM_POST_HIBERNATION : PM_POST_RESTORE);
|
||||
@@ -231,6 +238,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
|
||||
break;
|
||||
pm_restore_gfp_mask();
|
||||
free_basic_memory_bitmaps();
|
||||
data->free_bitmaps = false;
|
||||
thaw_processes();
|
||||
data->frozen = 0;
|
||||
break;
|
||||
|
||||
@@ -32,7 +32,14 @@ EXPORT_SYMBOL(cad_pid);
|
||||
#endif
|
||||
enum reboot_mode reboot_mode DEFAULT_REBOOT_MODE;
|
||||
|
||||
int reboot_default;
|
||||
/*
|
||||
* This variable is used privately to keep track of whether or not
|
||||
* reboot_type is still set to its default value (i.e., reboot= hasn't
|
||||
* been set on the command line). This is needed so that we can
|
||||
* suppress DMI scanning for reboot quirks. Without it, it's
|
||||
* impossible to override a faulty reboot quirk without recompiling.
|
||||
*/
|
||||
int reboot_default = 1;
|
||||
int reboot_cpu;
|
||||
enum reboot_type reboot_type = BOOT_ACPI;
|
||||
int reboot_force;
|
||||
|
||||
@@ -328,10 +328,19 @@ void irq_enter(void)
|
||||
|
||||
static inline void invoke_softirq(void)
|
||||
{
|
||||
if (!force_irqthreads)
|
||||
__do_softirq();
|
||||
else
|
||||
if (!force_irqthreads) {
|
||||
/*
|
||||
* We can safely execute softirq on the current stack if
|
||||
* it is the irq stack, because it should be near empty
|
||||
* at this stage. But we have no way to know if the arch
|
||||
* calls irq_exit() on the irq stack. So call softirq
|
||||
* in its own stack to prevent from any overrun on top
|
||||
* of a potentially deep task stack.
|
||||
*/
|
||||
do_softirq();
|
||||
} else {
|
||||
wakeup_softirqd();
|
||||
}
|
||||
}
|
||||
|
||||
static inline void tick_irq_exit(void)
|
||||
|
||||
@@ -516,13 +516,13 @@ static void sync_cmos_clock(struct work_struct *work)
|
||||
schedule_delayed_work(&sync_cmos_work, timespec_to_jiffies(&next));
|
||||
}
|
||||
|
||||
static void notify_cmos_timer(void)
|
||||
void ntp_notify_cmos_timer(void)
|
||||
{
|
||||
schedule_delayed_work(&sync_cmos_work, 0);
|
||||
}
|
||||
|
||||
#else
|
||||
static inline void notify_cmos_timer(void) { }
|
||||
void ntp_notify_cmos_timer(void) { }
|
||||
#endif
|
||||
|
||||
|
||||
@@ -687,8 +687,6 @@ int __do_adjtimex(struct timex *txc, struct timespec *ts, s32 *time_tai)
|
||||
if (!(time_status & STA_NANO))
|
||||
txc->time.tv_usec /= NSEC_PER_USEC;
|
||||
|
||||
notify_cmos_timer();
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
@@ -1703,6 +1703,8 @@ int do_adjtimex(struct timex *txc)
|
||||
write_seqcount_end(&timekeeper_seq);
|
||||
raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
|
||||
|
||||
ntp_notify_cmos_timer();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
@@ -486,7 +486,52 @@ static struct smp_hotplug_thread watchdog_threads = {
|
||||
.unpark = watchdog_enable,
|
||||
};
|
||||
|
||||
static int watchdog_enable_all_cpus(void)
|
||||
static void restart_watchdog_hrtimer(void *info)
|
||||
{
|
||||
struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* No need to cancel and restart hrtimer if it is currently executing
|
||||
* because it will reprogram itself with the new period now.
|
||||
* We should never see it unqueued here because we are running per-cpu
|
||||
* with interrupts disabled.
|
||||
*/
|
||||
ret = hrtimer_try_to_cancel(hrtimer);
|
||||
if (ret == 1)
|
||||
hrtimer_start(hrtimer, ns_to_ktime(sample_period),
|
||||
HRTIMER_MODE_REL_PINNED);
|
||||
}
|
||||
|
||||
static void update_timers(int cpu)
|
||||
{
|
||||
struct call_single_data data = {.func = restart_watchdog_hrtimer};
|
||||
/*
|
||||
* Make sure that perf event counter will adopt to a new
|
||||
* sampling period. Updating the sampling period directly would
|
||||
* be much nicer but we do not have an API for that now so
|
||||
* let's use a big hammer.
|
||||
* Hrtimer will adopt the new period on the next tick but this
|
||||
* might be late already so we have to restart the timer as well.
|
||||
*/
|
||||
watchdog_nmi_disable(cpu);
|
||||
__smp_call_function_single(cpu, &data, 1);
|
||||
watchdog_nmi_enable(cpu);
|
||||
}
|
||||
|
||||
static void update_timers_all_cpus(void)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
get_online_cpus();
|
||||
preempt_disable();
|
||||
for_each_online_cpu(cpu)
|
||||
update_timers(cpu);
|
||||
preempt_enable();
|
||||
put_online_cpus();
|
||||
}
|
||||
|
||||
static int watchdog_enable_all_cpus(bool sample_period_changed)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
@@ -496,6 +541,8 @@ static int watchdog_enable_all_cpus(void)
|
||||
pr_err("Failed to create watchdog threads, disabled\n");
|
||||
else
|
||||
watchdog_running = 1;
|
||||
} else if (sample_period_changed) {
|
||||
update_timers_all_cpus();
|
||||
}
|
||||
|
||||
return err;
|
||||
@@ -520,13 +567,15 @@ int proc_dowatchdog(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
int err, old_thresh, old_enabled;
|
||||
static DEFINE_MUTEX(watchdog_proc_mutex);
|
||||
|
||||
mutex_lock(&watchdog_proc_mutex);
|
||||
old_thresh = ACCESS_ONCE(watchdog_thresh);
|
||||
old_enabled = ACCESS_ONCE(watchdog_user_enabled);
|
||||
|
||||
err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
|
||||
if (err || !write)
|
||||
return err;
|
||||
goto out;
|
||||
|
||||
set_sample_period();
|
||||
/*
|
||||
@@ -535,7 +584,7 @@ int proc_dowatchdog(struct ctl_table *table, int write,
|
||||
* watchdog_*_all_cpus() function takes care of this.
|
||||
*/
|
||||
if (watchdog_user_enabled && watchdog_thresh)
|
||||
err = watchdog_enable_all_cpus();
|
||||
err = watchdog_enable_all_cpus(old_thresh != watchdog_thresh);
|
||||
else
|
||||
watchdog_disable_all_cpus();
|
||||
|
||||
@@ -544,7 +593,8 @@ int proc_dowatchdog(struct ctl_table *table, int write,
|
||||
watchdog_thresh = old_thresh;
|
||||
watchdog_user_enabled = old_enabled;
|
||||
}
|
||||
|
||||
out:
|
||||
mutex_unlock(&watchdog_proc_mutex);
|
||||
return err;
|
||||
}
|
||||
#endif /* CONFIG_SYSCTL */
|
||||
@@ -554,5 +604,5 @@ void __init lockup_detector_init(void)
|
||||
set_sample_period();
|
||||
|
||||
if (watchdog_user_enabled)
|
||||
watchdog_enable_all_cpus();
|
||||
watchdog_enable_all_cpus(false);
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user