Merge branch 'akpm' (incoming from Andrew)
Merge misc patches from Andrew Morton: - Florian has vanished so I appear to have become fbdev maintainer again :( - Joel and Mark are distracted to welcome to the new OCFS2 maintainer - The backlight queue - Small core kernel changes - lib/ updates - The rtc queue - Various random bits * akpm: (164 commits) rtc: rtc-davinci: use devm_*() functions rtc: rtc-max8997: use devm_request_threaded_irq() rtc: rtc-max8907: use devm_request_threaded_irq() rtc: rtc-da9052: use devm_request_threaded_irq() rtc: rtc-wm831x: use devm_request_threaded_irq() rtc: rtc-tps80031: use devm_request_threaded_irq() rtc: rtc-lp8788: use devm_request_threaded_irq() rtc: rtc-coh901331: use devm_clk_get() rtc: rtc-vt8500: use devm_*() functions rtc: rtc-tps6586x: use devm_request_threaded_irq() rtc: rtc-imxdi: use devm_clk_get() rtc: rtc-cmos: use dev_warn()/dev_dbg() instead of printk()/pr_debug() rtc: rtc-pcf8583: use dev_warn() instead of printk() rtc: rtc-sun4v: use pr_warn() instead of printk() rtc: rtc-vr41xx: use dev_info() instead of printk() rtc: rtc-rs5c313: use pr_err() instead of printk() rtc: rtc-at91rm9200: use dev_dbg()/dev_err() instead of printk()/pr_debug() rtc: rtc-rs5c372: use dev_dbg()/dev_warn() instead of printk()/pr_debug() rtc: rtc-ds2404: use dev_err() instead of printk() rtc: rtc-efi: use dev_err()/dev_warn()/pr_err() instead of printk() ...
This commit is contained in:
@@ -593,7 +593,7 @@ COMPAT_SYSCALL_DEFINE5(waitid,
|
||||
else
|
||||
ret = put_compat_rusage(&ru, uru);
|
||||
if (ret)
|
||||
return ret;
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
BUG_ON(info.si_code & __SI_MASK);
|
||||
|
||||
@@ -153,8 +153,7 @@ int copy_namespaces(unsigned long flags, struct task_struct *tsk)
|
||||
goto out;
|
||||
}
|
||||
|
||||
new_ns = create_new_namespaces(flags, tsk,
|
||||
task_cred_xxx(tsk, user_ns), tsk->fs);
|
||||
new_ns = create_new_namespaces(flags, tsk, user_ns, tsk->fs);
|
||||
if (IS_ERR(new_ns)) {
|
||||
err = PTR_ERR(new_ns);
|
||||
goto out;
|
||||
|
||||
183
kernel/smp.c
183
kernel/smp.c
@@ -16,22 +16,12 @@
|
||||
#include "smpboot.h"
|
||||
|
||||
#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
|
||||
static struct {
|
||||
struct list_head queue;
|
||||
raw_spinlock_t lock;
|
||||
} call_function __cacheline_aligned_in_smp =
|
||||
{
|
||||
.queue = LIST_HEAD_INIT(call_function.queue),
|
||||
.lock = __RAW_SPIN_LOCK_UNLOCKED(call_function.lock),
|
||||
};
|
||||
|
||||
enum {
|
||||
CSD_FLAG_LOCK = 0x01,
|
||||
};
|
||||
|
||||
struct call_function_data {
|
||||
struct call_single_data csd;
|
||||
atomic_t refs;
|
||||
struct call_single_data __percpu *csd;
|
||||
cpumask_var_t cpumask;
|
||||
cpumask_var_t cpumask_ipi;
|
||||
};
|
||||
@@ -60,6 +50,11 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
|
||||
if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
|
||||
cpu_to_node(cpu)))
|
||||
return notifier_from_errno(-ENOMEM);
|
||||
cfd->csd = alloc_percpu(struct call_single_data);
|
||||
if (!cfd->csd) {
|
||||
free_cpumask_var(cfd->cpumask);
|
||||
return notifier_from_errno(-ENOMEM);
|
||||
}
|
||||
break;
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
@@ -70,6 +65,7 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
|
||||
case CPU_DEAD_FROZEN:
|
||||
free_cpumask_var(cfd->cpumask);
|
||||
free_cpumask_var(cfd->cpumask_ipi);
|
||||
free_percpu(cfd->csd);
|
||||
break;
|
||||
#endif
|
||||
};
|
||||
@@ -170,85 +166,6 @@ void generic_exec_single(int cpu, struct call_single_data *data, int wait)
|
||||
csd_lock_wait(data);
|
||||
}
|
||||
|
||||
/*
|
||||
* Invoked by arch to handle an IPI for call function. Must be called with
|
||||
* interrupts disabled.
|
||||
*/
|
||||
void generic_smp_call_function_interrupt(void)
|
||||
{
|
||||
struct call_function_data *data;
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
/*
|
||||
* Shouldn't receive this interrupt on a cpu that is not yet online.
|
||||
*/
|
||||
WARN_ON_ONCE(!cpu_online(cpu));
|
||||
|
||||
/*
|
||||
* Ensure entry is visible on call_function_queue after we have
|
||||
* entered the IPI. See comment in smp_call_function_many.
|
||||
* If we don't have this, then we may miss an entry on the list
|
||||
* and never get another IPI to process it.
|
||||
*/
|
||||
smp_mb();
|
||||
|
||||
/*
|
||||
* It's ok to use list_for_each_rcu() here even though we may
|
||||
* delete 'pos', since list_del_rcu() doesn't clear ->next
|
||||
*/
|
||||
list_for_each_entry_rcu(data, &call_function.queue, csd.list) {
|
||||
int refs;
|
||||
smp_call_func_t func;
|
||||
|
||||
/*
|
||||
* Since we walk the list without any locks, we might
|
||||
* see an entry that was completed, removed from the
|
||||
* list and is in the process of being reused.
|
||||
*
|
||||
* We must check that the cpu is in the cpumask before
|
||||
* checking the refs, and both must be set before
|
||||
* executing the callback on this cpu.
|
||||
*/
|
||||
|
||||
if (!cpumask_test_cpu(cpu, data->cpumask))
|
||||
continue;
|
||||
|
||||
smp_rmb();
|
||||
|
||||
if (atomic_read(&data->refs) == 0)
|
||||
continue;
|
||||
|
||||
func = data->csd.func; /* save for later warn */
|
||||
func(data->csd.info);
|
||||
|
||||
/*
|
||||
* If the cpu mask is not still set then func enabled
|
||||
* interrupts (BUG), and this cpu took another smp call
|
||||
* function interrupt and executed func(info) twice
|
||||
* on this cpu. That nested execution decremented refs.
|
||||
*/
|
||||
if (!cpumask_test_and_clear_cpu(cpu, data->cpumask)) {
|
||||
WARN(1, "%pf enabled interrupts and double executed\n", func);
|
||||
continue;
|
||||
}
|
||||
|
||||
refs = atomic_dec_return(&data->refs);
|
||||
WARN_ON(refs < 0);
|
||||
|
||||
if (refs)
|
||||
continue;
|
||||
|
||||
WARN_ON(!cpumask_empty(data->cpumask));
|
||||
|
||||
raw_spin_lock(&call_function.lock);
|
||||
list_del_rcu(&data->csd.list);
|
||||
raw_spin_unlock(&call_function.lock);
|
||||
|
||||
csd_unlock(&data->csd);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
* Invoked by arch to handle an IPI for call function single. Must be
|
||||
* called from the arch with interrupts disabled.
|
||||
@@ -453,8 +370,7 @@ void smp_call_function_many(const struct cpumask *mask,
|
||||
smp_call_func_t func, void *info, bool wait)
|
||||
{
|
||||
struct call_function_data *data;
|
||||
unsigned long flags;
|
||||
int refs, cpu, next_cpu, this_cpu = smp_processor_id();
|
||||
int cpu, next_cpu, this_cpu = smp_processor_id();
|
||||
|
||||
/*
|
||||
* Can deadlock when called with interrupts disabled.
|
||||
@@ -486,50 +402,13 @@ void smp_call_function_many(const struct cpumask *mask,
|
||||
}
|
||||
|
||||
data = &__get_cpu_var(cfd_data);
|
||||
csd_lock(&data->csd);
|
||||
|
||||
/* This BUG_ON verifies our reuse assertions and can be removed */
|
||||
BUG_ON(atomic_read(&data->refs) || !cpumask_empty(data->cpumask));
|
||||
|
||||
/*
|
||||
* The global call function queue list add and delete are protected
|
||||
* by a lock, but the list is traversed without any lock, relying
|
||||
* on the rcu list add and delete to allow safe concurrent traversal.
|
||||
* We reuse the call function data without waiting for any grace
|
||||
* period after some other cpu removes it from the global queue.
|
||||
* This means a cpu might find our data block as it is being
|
||||
* filled out.
|
||||
*
|
||||
* We hold off the interrupt handler on the other cpu by
|
||||
* ordering our writes to the cpu mask vs our setting of the
|
||||
* refs counter. We assert only the cpu owning the data block
|
||||
* will set a bit in cpumask, and each bit will only be cleared
|
||||
* by the subject cpu. Each cpu must first find its bit is
|
||||
* set and then check that refs is set indicating the element is
|
||||
* ready to be processed, otherwise it must skip the entry.
|
||||
*
|
||||
* On the previous iteration refs was set to 0 by another cpu.
|
||||
* To avoid the use of transitivity, set the counter to 0 here
|
||||
* so the wmb will pair with the rmb in the interrupt handler.
|
||||
*/
|
||||
atomic_set(&data->refs, 0); /* convert 3rd to 1st party write */
|
||||
|
||||
data->csd.func = func;
|
||||
data->csd.info = info;
|
||||
|
||||
/* Ensure 0 refs is visible before mask. Also orders func and info */
|
||||
smp_wmb();
|
||||
|
||||
/* We rely on the "and" being processed before the store */
|
||||
cpumask_and(data->cpumask, mask, cpu_online_mask);
|
||||
cpumask_clear_cpu(this_cpu, data->cpumask);
|
||||
refs = cpumask_weight(data->cpumask);
|
||||
|
||||
/* Some callers race with other cpus changing the passed mask */
|
||||
if (unlikely(!refs)) {
|
||||
csd_unlock(&data->csd);
|
||||
if (unlikely(!cpumask_weight(data->cpumask)))
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* After we put an entry into the list, data->cpumask
|
||||
@@ -537,34 +416,32 @@ void smp_call_function_many(const struct cpumask *mask,
|
||||
* a SMP function call, so data->cpumask will be zero.
|
||||
*/
|
||||
cpumask_copy(data->cpumask_ipi, data->cpumask);
|
||||
raw_spin_lock_irqsave(&call_function.lock, flags);
|
||||
/*
|
||||
* Place entry at the _HEAD_ of the list, so that any cpu still
|
||||
* observing the entry in generic_smp_call_function_interrupt()
|
||||
* will not miss any other list entries:
|
||||
*/
|
||||
list_add_rcu(&data->csd.list, &call_function.queue);
|
||||
/*
|
||||
* We rely on the wmb() in list_add_rcu to complete our writes
|
||||
* to the cpumask before this write to refs, which indicates
|
||||
* data is on the list and is ready to be processed.
|
||||
*/
|
||||
atomic_set(&data->refs, refs);
|
||||
raw_spin_unlock_irqrestore(&call_function.lock, flags);
|
||||
|
||||
/*
|
||||
* Make the list addition visible before sending the ipi.
|
||||
* (IPIs must obey or appear to obey normal Linux cache
|
||||
* coherency rules -- see comment in generic_exec_single).
|
||||
*/
|
||||
smp_mb();
|
||||
for_each_cpu(cpu, data->cpumask) {
|
||||
struct call_single_data *csd = per_cpu_ptr(data->csd, cpu);
|
||||
struct call_single_queue *dst =
|
||||
&per_cpu(call_single_queue, cpu);
|
||||
unsigned long flags;
|
||||
|
||||
csd_lock(csd);
|
||||
csd->func = func;
|
||||
csd->info = info;
|
||||
|
||||
raw_spin_lock_irqsave(&dst->lock, flags);
|
||||
list_add_tail(&csd->list, &dst->list);
|
||||
raw_spin_unlock_irqrestore(&dst->lock, flags);
|
||||
}
|
||||
|
||||
/* Send a message to all CPUs in the map */
|
||||
arch_send_call_function_ipi_mask(data->cpumask_ipi);
|
||||
|
||||
/* Optionally wait for the CPUs to complete */
|
||||
if (wait)
|
||||
csd_lock_wait(&data->csd);
|
||||
if (wait) {
|
||||
for_each_cpu(cpu, data->cpumask) {
|
||||
struct call_single_data *csd =
|
||||
per_cpu_ptr(data->csd, cpu);
|
||||
csd_lock_wait(csd);
|
||||
}
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(smp_call_function_many);
|
||||
|
||||
|
||||
304
kernel/sys.c
304
kernel/sys.c
@@ -47,6 +47,7 @@
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/user_namespace.h>
|
||||
#include <linux/binfmts.h>
|
||||
|
||||
#include <linux/kmsg_dump.h>
|
||||
/* Move somewhere else to avoid recompiling? */
|
||||
@@ -2012,160 +2013,159 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
|
||||
|
||||
error = 0;
|
||||
switch (option) {
|
||||
case PR_SET_PDEATHSIG:
|
||||
if (!valid_signal(arg2)) {
|
||||
error = -EINVAL;
|
||||
break;
|
||||
}
|
||||
me->pdeath_signal = arg2;
|
||||
break;
|
||||
case PR_GET_PDEATHSIG:
|
||||
error = put_user(me->pdeath_signal, (int __user *)arg2);
|
||||
break;
|
||||
case PR_GET_DUMPABLE:
|
||||
error = get_dumpable(me->mm);
|
||||
break;
|
||||
case PR_SET_DUMPABLE:
|
||||
if (arg2 < 0 || arg2 > 1) {
|
||||
error = -EINVAL;
|
||||
break;
|
||||
}
|
||||
set_dumpable(me->mm, arg2);
|
||||
break;
|
||||
|
||||
case PR_SET_UNALIGN:
|
||||
error = SET_UNALIGN_CTL(me, arg2);
|
||||
break;
|
||||
case PR_GET_UNALIGN:
|
||||
error = GET_UNALIGN_CTL(me, arg2);
|
||||
break;
|
||||
case PR_SET_FPEMU:
|
||||
error = SET_FPEMU_CTL(me, arg2);
|
||||
break;
|
||||
case PR_GET_FPEMU:
|
||||
error = GET_FPEMU_CTL(me, arg2);
|
||||
break;
|
||||
case PR_SET_FPEXC:
|
||||
error = SET_FPEXC_CTL(me, arg2);
|
||||
break;
|
||||
case PR_GET_FPEXC:
|
||||
error = GET_FPEXC_CTL(me, arg2);
|
||||
break;
|
||||
case PR_GET_TIMING:
|
||||
error = PR_TIMING_STATISTICAL;
|
||||
break;
|
||||
case PR_SET_TIMING:
|
||||
if (arg2 != PR_TIMING_STATISTICAL)
|
||||
error = -EINVAL;
|
||||
break;
|
||||
case PR_SET_NAME:
|
||||
comm[sizeof(me->comm)-1] = 0;
|
||||
if (strncpy_from_user(comm, (char __user *)arg2,
|
||||
sizeof(me->comm) - 1) < 0)
|
||||
return -EFAULT;
|
||||
set_task_comm(me, comm);
|
||||
proc_comm_connector(me);
|
||||
break;
|
||||
case PR_GET_NAME:
|
||||
get_task_comm(comm, me);
|
||||
if (copy_to_user((char __user *)arg2, comm,
|
||||
sizeof(comm)))
|
||||
return -EFAULT;
|
||||
break;
|
||||
case PR_GET_ENDIAN:
|
||||
error = GET_ENDIAN(me, arg2);
|
||||
break;
|
||||
case PR_SET_ENDIAN:
|
||||
error = SET_ENDIAN(me, arg2);
|
||||
break;
|
||||
case PR_GET_SECCOMP:
|
||||
error = prctl_get_seccomp();
|
||||
break;
|
||||
case PR_SET_SECCOMP:
|
||||
error = prctl_set_seccomp(arg2, (char __user *)arg3);
|
||||
break;
|
||||
case PR_GET_TSC:
|
||||
error = GET_TSC_CTL(arg2);
|
||||
break;
|
||||
case PR_SET_TSC:
|
||||
error = SET_TSC_CTL(arg2);
|
||||
break;
|
||||
case PR_TASK_PERF_EVENTS_DISABLE:
|
||||
error = perf_event_task_disable();
|
||||
break;
|
||||
case PR_TASK_PERF_EVENTS_ENABLE:
|
||||
error = perf_event_task_enable();
|
||||
break;
|
||||
case PR_GET_TIMERSLACK:
|
||||
error = current->timer_slack_ns;
|
||||
break;
|
||||
case PR_SET_TIMERSLACK:
|
||||
if (arg2 <= 0)
|
||||
current->timer_slack_ns =
|
||||
current->default_timer_slack_ns;
|
||||
else
|
||||
current->timer_slack_ns = arg2;
|
||||
break;
|
||||
case PR_MCE_KILL:
|
||||
if (arg4 | arg5)
|
||||
return -EINVAL;
|
||||
switch (arg2) {
|
||||
case PR_MCE_KILL_CLEAR:
|
||||
if (arg3 != 0)
|
||||
return -EINVAL;
|
||||
current->flags &= ~PF_MCE_PROCESS;
|
||||
break;
|
||||
case PR_MCE_KILL_SET:
|
||||
current->flags |= PF_MCE_PROCESS;
|
||||
if (arg3 == PR_MCE_KILL_EARLY)
|
||||
current->flags |= PF_MCE_EARLY;
|
||||
else if (arg3 == PR_MCE_KILL_LATE)
|
||||
current->flags &= ~PF_MCE_EARLY;
|
||||
else if (arg3 == PR_MCE_KILL_DEFAULT)
|
||||
current->flags &=
|
||||
~(PF_MCE_EARLY|PF_MCE_PROCESS);
|
||||
else
|
||||
return -EINVAL;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
case PR_MCE_KILL_GET:
|
||||
if (arg2 | arg3 | arg4 | arg5)
|
||||
return -EINVAL;
|
||||
if (current->flags & PF_MCE_PROCESS)
|
||||
error = (current->flags & PF_MCE_EARLY) ?
|
||||
PR_MCE_KILL_EARLY : PR_MCE_KILL_LATE;
|
||||
else
|
||||
error = PR_MCE_KILL_DEFAULT;
|
||||
break;
|
||||
case PR_SET_MM:
|
||||
error = prctl_set_mm(arg2, arg3, arg4, arg5);
|
||||
break;
|
||||
case PR_GET_TID_ADDRESS:
|
||||
error = prctl_get_tid_address(me, (int __user **)arg2);
|
||||
break;
|
||||
case PR_SET_CHILD_SUBREAPER:
|
||||
me->signal->is_child_subreaper = !!arg2;
|
||||
break;
|
||||
case PR_GET_CHILD_SUBREAPER:
|
||||
error = put_user(me->signal->is_child_subreaper,
|
||||
(int __user *) arg2);
|
||||
break;
|
||||
case PR_SET_NO_NEW_PRIVS:
|
||||
if (arg2 != 1 || arg3 || arg4 || arg5)
|
||||
return -EINVAL;
|
||||
|
||||
current->no_new_privs = 1;
|
||||
break;
|
||||
case PR_GET_NO_NEW_PRIVS:
|
||||
if (arg2 || arg3 || arg4 || arg5)
|
||||
return -EINVAL;
|
||||
return current->no_new_privs ? 1 : 0;
|
||||
default:
|
||||
case PR_SET_PDEATHSIG:
|
||||
if (!valid_signal(arg2)) {
|
||||
error = -EINVAL;
|
||||
break;
|
||||
}
|
||||
me->pdeath_signal = arg2;
|
||||
break;
|
||||
case PR_GET_PDEATHSIG:
|
||||
error = put_user(me->pdeath_signal, (int __user *)arg2);
|
||||
break;
|
||||
case PR_GET_DUMPABLE:
|
||||
error = get_dumpable(me->mm);
|
||||
break;
|
||||
case PR_SET_DUMPABLE:
|
||||
if (arg2 != SUID_DUMP_DISABLE && arg2 != SUID_DUMP_USER) {
|
||||
error = -EINVAL;
|
||||
break;
|
||||
}
|
||||
set_dumpable(me->mm, arg2);
|
||||
break;
|
||||
|
||||
case PR_SET_UNALIGN:
|
||||
error = SET_UNALIGN_CTL(me, arg2);
|
||||
break;
|
||||
case PR_GET_UNALIGN:
|
||||
error = GET_UNALIGN_CTL(me, arg2);
|
||||
break;
|
||||
case PR_SET_FPEMU:
|
||||
error = SET_FPEMU_CTL(me, arg2);
|
||||
break;
|
||||
case PR_GET_FPEMU:
|
||||
error = GET_FPEMU_CTL(me, arg2);
|
||||
break;
|
||||
case PR_SET_FPEXC:
|
||||
error = SET_FPEXC_CTL(me, arg2);
|
||||
break;
|
||||
case PR_GET_FPEXC:
|
||||
error = GET_FPEXC_CTL(me, arg2);
|
||||
break;
|
||||
case PR_GET_TIMING:
|
||||
error = PR_TIMING_STATISTICAL;
|
||||
break;
|
||||
case PR_SET_TIMING:
|
||||
if (arg2 != PR_TIMING_STATISTICAL)
|
||||
error = -EINVAL;
|
||||
break;
|
||||
case PR_SET_NAME:
|
||||
comm[sizeof(me->comm) - 1] = 0;
|
||||
if (strncpy_from_user(comm, (char __user *)arg2,
|
||||
sizeof(me->comm) - 1) < 0)
|
||||
return -EFAULT;
|
||||
set_task_comm(me, comm);
|
||||
proc_comm_connector(me);
|
||||
break;
|
||||
case PR_GET_NAME:
|
||||
get_task_comm(comm, me);
|
||||
if (copy_to_user((char __user *)arg2, comm, sizeof(comm)))
|
||||
return -EFAULT;
|
||||
break;
|
||||
case PR_GET_ENDIAN:
|
||||
error = GET_ENDIAN(me, arg2);
|
||||
break;
|
||||
case PR_SET_ENDIAN:
|
||||
error = SET_ENDIAN(me, arg2);
|
||||
break;
|
||||
case PR_GET_SECCOMP:
|
||||
error = prctl_get_seccomp();
|
||||
break;
|
||||
case PR_SET_SECCOMP:
|
||||
error = prctl_set_seccomp(arg2, (char __user *)arg3);
|
||||
break;
|
||||
case PR_GET_TSC:
|
||||
error = GET_TSC_CTL(arg2);
|
||||
break;
|
||||
case PR_SET_TSC:
|
||||
error = SET_TSC_CTL(arg2);
|
||||
break;
|
||||
case PR_TASK_PERF_EVENTS_DISABLE:
|
||||
error = perf_event_task_disable();
|
||||
break;
|
||||
case PR_TASK_PERF_EVENTS_ENABLE:
|
||||
error = perf_event_task_enable();
|
||||
break;
|
||||
case PR_GET_TIMERSLACK:
|
||||
error = current->timer_slack_ns;
|
||||
break;
|
||||
case PR_SET_TIMERSLACK:
|
||||
if (arg2 <= 0)
|
||||
current->timer_slack_ns =
|
||||
current->default_timer_slack_ns;
|
||||
else
|
||||
current->timer_slack_ns = arg2;
|
||||
break;
|
||||
case PR_MCE_KILL:
|
||||
if (arg4 | arg5)
|
||||
return -EINVAL;
|
||||
switch (arg2) {
|
||||
case PR_MCE_KILL_CLEAR:
|
||||
if (arg3 != 0)
|
||||
return -EINVAL;
|
||||
current->flags &= ~PF_MCE_PROCESS;
|
||||
break;
|
||||
case PR_MCE_KILL_SET:
|
||||
current->flags |= PF_MCE_PROCESS;
|
||||
if (arg3 == PR_MCE_KILL_EARLY)
|
||||
current->flags |= PF_MCE_EARLY;
|
||||
else if (arg3 == PR_MCE_KILL_LATE)
|
||||
current->flags &= ~PF_MCE_EARLY;
|
||||
else if (arg3 == PR_MCE_KILL_DEFAULT)
|
||||
current->flags &=
|
||||
~(PF_MCE_EARLY|PF_MCE_PROCESS);
|
||||
else
|
||||
return -EINVAL;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
case PR_MCE_KILL_GET:
|
||||
if (arg2 | arg3 | arg4 | arg5)
|
||||
return -EINVAL;
|
||||
if (current->flags & PF_MCE_PROCESS)
|
||||
error = (current->flags & PF_MCE_EARLY) ?
|
||||
PR_MCE_KILL_EARLY : PR_MCE_KILL_LATE;
|
||||
else
|
||||
error = PR_MCE_KILL_DEFAULT;
|
||||
break;
|
||||
case PR_SET_MM:
|
||||
error = prctl_set_mm(arg2, arg3, arg4, arg5);
|
||||
break;
|
||||
case PR_GET_TID_ADDRESS:
|
||||
error = prctl_get_tid_address(me, (int __user **)arg2);
|
||||
break;
|
||||
case PR_SET_CHILD_SUBREAPER:
|
||||
me->signal->is_child_subreaper = !!arg2;
|
||||
break;
|
||||
case PR_GET_CHILD_SUBREAPER:
|
||||
error = put_user(me->signal->is_child_subreaper,
|
||||
(int __user *)arg2);
|
||||
break;
|
||||
case PR_SET_NO_NEW_PRIVS:
|
||||
if (arg2 != 1 || arg3 || arg4 || arg5)
|
||||
return -EINVAL;
|
||||
|
||||
current->no_new_privs = 1;
|
||||
break;
|
||||
case PR_GET_NO_NEW_PRIVS:
|
||||
if (arg2 || arg3 || arg4 || arg5)
|
||||
return -EINVAL;
|
||||
return current->no_new_privs ? 1 : 0;
|
||||
default:
|
||||
error = -EINVAL;
|
||||
break;
|
||||
}
|
||||
return error;
|
||||
}
|
||||
|
||||
@@ -240,7 +240,7 @@ EXPORT_SYMBOL(current_fs_time);
|
||||
* Avoid unnecessary multiplications/divisions in the
|
||||
* two most common HZ cases:
|
||||
*/
|
||||
inline unsigned int jiffies_to_msecs(const unsigned long j)
|
||||
unsigned int jiffies_to_msecs(const unsigned long j)
|
||||
{
|
||||
#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
|
||||
return (MSEC_PER_SEC / HZ) * j;
|
||||
@@ -256,7 +256,7 @@ inline unsigned int jiffies_to_msecs(const unsigned long j)
|
||||
}
|
||||
EXPORT_SYMBOL(jiffies_to_msecs);
|
||||
|
||||
inline unsigned int jiffies_to_usecs(const unsigned long j)
|
||||
unsigned int jiffies_to_usecs(const unsigned long j)
|
||||
{
|
||||
#if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
|
||||
return (USEC_PER_SEC / HZ) * j;
|
||||
|
||||
Reference in New Issue
Block a user