Merge branch 'linus' into core/printk
Conflicts: kernel/printk.c Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
@@ -738,7 +738,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
|
||||
if (!audit_enabled && msg_type != AUDIT_USER_AVC)
|
||||
return 0;
|
||||
|
||||
err = audit_filter_user(&NETLINK_CB(skb), msg_type);
|
||||
err = audit_filter_user(&NETLINK_CB(skb));
|
||||
if (err == 1) {
|
||||
err = 0;
|
||||
if (msg_type == AUDIT_USER_TTY) {
|
||||
@@ -779,7 +779,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
|
||||
}
|
||||
/* fallthrough */
|
||||
case AUDIT_LIST:
|
||||
err = audit_receive_filter(nlh->nlmsg_type, NETLINK_CB(skb).pid,
|
||||
err = audit_receive_filter(msg_type, NETLINK_CB(skb).pid,
|
||||
uid, seq, data, nlmsg_len(nlh),
|
||||
loginuid, sessionid, sid);
|
||||
break;
|
||||
@@ -798,7 +798,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
|
||||
}
|
||||
/* fallthrough */
|
||||
case AUDIT_LIST_RULES:
|
||||
err = audit_receive_filter(nlh->nlmsg_type, NETLINK_CB(skb).pid,
|
||||
err = audit_receive_filter(msg_type, NETLINK_CB(skb).pid,
|
||||
uid, seq, data, nlmsg_len(nlh),
|
||||
loginuid, sessionid, sid);
|
||||
break;
|
||||
|
||||
@@ -1544,6 +1544,7 @@ static void audit_log_rule_change(uid_t loginuid, u32 sessionid, u32 sid,
|
||||
* @data: payload data
|
||||
* @datasz: size of payload data
|
||||
* @loginuid: loginuid of sender
|
||||
* @sessionid: sessionid for netlink audit message
|
||||
* @sid: SE Linux Security ID of sender
|
||||
*/
|
||||
int audit_receive_filter(int type, int pid, int uid, int seq, void *data,
|
||||
@@ -1720,7 +1721,7 @@ static int audit_filter_user_rules(struct netlink_skb_parms *cb,
|
||||
return 1;
|
||||
}
|
||||
|
||||
int audit_filter_user(struct netlink_skb_parms *cb, int type)
|
||||
int audit_filter_user(struct netlink_skb_parms *cb)
|
||||
{
|
||||
enum audit_state state = AUDIT_DISABLED;
|
||||
struct audit_entry *e;
|
||||
|
||||
@@ -52,12 +52,96 @@ static void warn_legacy_capability_use(void)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Version 2 capabilities worked fine, but the linux/capability.h file
|
||||
* that accompanied their introduction encouraged their use without
|
||||
* the necessary user-space source code changes. As such, we have
|
||||
* created a version 3 with equivalent functionality to version 2, but
|
||||
* with a header change to protect legacy source code from using
|
||||
* version 2 when it wanted to use version 1. If your system has code
|
||||
* that trips the following warning, it is using version 2 specific
|
||||
* capabilities and may be doing so insecurely.
|
||||
*
|
||||
* The remedy is to either upgrade your version of libcap (to 2.10+,
|
||||
* if the application is linked against it), or recompile your
|
||||
* application with modern kernel headers and this warning will go
|
||||
* away.
|
||||
*/
|
||||
|
||||
static void warn_deprecated_v2(void)
|
||||
{
|
||||
static int warned;
|
||||
|
||||
if (!warned) {
|
||||
char name[sizeof(current->comm)];
|
||||
|
||||
printk(KERN_INFO "warning: `%s' uses deprecated v2"
|
||||
" capabilities in a way that may be insecure.\n",
|
||||
get_task_comm(name, current));
|
||||
warned = 1;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Version check. Return the number of u32s in each capability flag
|
||||
* array, or a negative value on error.
|
||||
*/
|
||||
static int cap_validate_magic(cap_user_header_t header, unsigned *tocopy)
|
||||
{
|
||||
__u32 version;
|
||||
|
||||
if (get_user(version, &header->version))
|
||||
return -EFAULT;
|
||||
|
||||
switch (version) {
|
||||
case _LINUX_CAPABILITY_VERSION_1:
|
||||
warn_legacy_capability_use();
|
||||
*tocopy = _LINUX_CAPABILITY_U32S_1;
|
||||
break;
|
||||
case _LINUX_CAPABILITY_VERSION_2:
|
||||
warn_deprecated_v2();
|
||||
/*
|
||||
* fall through - v3 is otherwise equivalent to v2.
|
||||
*/
|
||||
case _LINUX_CAPABILITY_VERSION_3:
|
||||
*tocopy = _LINUX_CAPABILITY_U32S_3;
|
||||
break;
|
||||
default:
|
||||
if (put_user((u32)_KERNEL_CAPABILITY_VERSION, &header->version))
|
||||
return -EFAULT;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* For sys_getproccap() and sys_setproccap(), any of the three
|
||||
* capability set pointers may be NULL -- indicating that that set is
|
||||
* uninteresting and/or not to be changed.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Atomically modify the effective capabilities returning the original
|
||||
* value. No permission check is performed here - it is assumed that the
|
||||
* caller is permitted to set the desired effective capabilities.
|
||||
*/
|
||||
kernel_cap_t cap_set_effective(const kernel_cap_t pE_new)
|
||||
{
|
||||
kernel_cap_t pE_old;
|
||||
|
||||
spin_lock(&task_capability_lock);
|
||||
|
||||
pE_old = current->cap_effective;
|
||||
current->cap_effective = pE_new;
|
||||
|
||||
spin_unlock(&task_capability_lock);
|
||||
|
||||
return pE_old;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(cap_set_effective);
|
||||
|
||||
/**
|
||||
* sys_capget - get the capabilities of a given process.
|
||||
* @header: pointer to struct that contains capability version and
|
||||
@@ -71,27 +155,13 @@ asmlinkage long sys_capget(cap_user_header_t header, cap_user_data_t dataptr)
|
||||
{
|
||||
int ret = 0;
|
||||
pid_t pid;
|
||||
__u32 version;
|
||||
struct task_struct *target;
|
||||
unsigned tocopy;
|
||||
kernel_cap_t pE, pI, pP;
|
||||
|
||||
if (get_user(version, &header->version))
|
||||
return -EFAULT;
|
||||
|
||||
switch (version) {
|
||||
case _LINUX_CAPABILITY_VERSION_1:
|
||||
warn_legacy_capability_use();
|
||||
tocopy = _LINUX_CAPABILITY_U32S_1;
|
||||
break;
|
||||
case _LINUX_CAPABILITY_VERSION_2:
|
||||
tocopy = _LINUX_CAPABILITY_U32S_2;
|
||||
break;
|
||||
default:
|
||||
if (put_user(_LINUX_CAPABILITY_VERSION, &header->version))
|
||||
return -EFAULT;
|
||||
return -EINVAL;
|
||||
}
|
||||
ret = cap_validate_magic(header, &tocopy);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
|
||||
if (get_user(pid, &header->pid))
|
||||
return -EFAULT;
|
||||
@@ -118,7 +188,7 @@ out:
|
||||
spin_unlock(&task_capability_lock);
|
||||
|
||||
if (!ret) {
|
||||
struct __user_cap_data_struct kdata[_LINUX_CAPABILITY_U32S];
|
||||
struct __user_cap_data_struct kdata[_KERNEL_CAPABILITY_U32S];
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < tocopy; i++) {
|
||||
@@ -128,7 +198,7 @@ out:
|
||||
}
|
||||
|
||||
/*
|
||||
* Note, in the case, tocopy < _LINUX_CAPABILITY_U32S,
|
||||
* Note, in the case, tocopy < _KERNEL_CAPABILITY_U32S,
|
||||
* we silently drop the upper capabilities here. This
|
||||
* has the effect of making older libcap
|
||||
* implementations implicitly drop upper capability
|
||||
@@ -240,30 +310,16 @@ static inline int cap_set_all(kernel_cap_t *effective,
|
||||
*/
|
||||
asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data)
|
||||
{
|
||||
struct __user_cap_data_struct kdata[_LINUX_CAPABILITY_U32S];
|
||||
struct __user_cap_data_struct kdata[_KERNEL_CAPABILITY_U32S];
|
||||
unsigned i, tocopy;
|
||||
kernel_cap_t inheritable, permitted, effective;
|
||||
__u32 version;
|
||||
struct task_struct *target;
|
||||
int ret;
|
||||
pid_t pid;
|
||||
|
||||
if (get_user(version, &header->version))
|
||||
return -EFAULT;
|
||||
|
||||
switch (version) {
|
||||
case _LINUX_CAPABILITY_VERSION_1:
|
||||
warn_legacy_capability_use();
|
||||
tocopy = _LINUX_CAPABILITY_U32S_1;
|
||||
break;
|
||||
case _LINUX_CAPABILITY_VERSION_2:
|
||||
tocopy = _LINUX_CAPABILITY_U32S_2;
|
||||
break;
|
||||
default:
|
||||
if (put_user(_LINUX_CAPABILITY_VERSION, &header->version))
|
||||
return -EFAULT;
|
||||
return -EINVAL;
|
||||
}
|
||||
ret = cap_validate_magic(header, &tocopy);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
|
||||
if (get_user(pid, &header->pid))
|
||||
return -EFAULT;
|
||||
@@ -281,7 +337,7 @@ asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data)
|
||||
permitted.cap[i] = kdata[i].permitted;
|
||||
inheritable.cap[i] = kdata[i].inheritable;
|
||||
}
|
||||
while (i < _LINUX_CAPABILITY_U32S) {
|
||||
while (i < _KERNEL_CAPABILITY_U32S) {
|
||||
effective.cap[i] = 0;
|
||||
permitted.cap[i] = 0;
|
||||
inheritable.cap[i] = 0;
|
||||
|
||||
@@ -2903,7 +2903,7 @@ int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys)
|
||||
cg = tsk->cgroups;
|
||||
parent = task_cgroup(tsk, subsys->subsys_id);
|
||||
|
||||
snprintf(nodename, MAX_CGROUP_TYPE_NAMELEN, "node_%d", tsk->pid);
|
||||
snprintf(nodename, MAX_CGROUP_TYPE_NAMELEN, "%d", tsk->pid);
|
||||
|
||||
/* Pin the hierarchy */
|
||||
atomic_inc(&parent->root->sb->s_active);
|
||||
|
||||
@@ -797,8 +797,10 @@ static int update_cpumask(struct cpuset *cs, char *buf)
|
||||
retval = cpulist_parse(buf, trialcs.cpus_allowed);
|
||||
if (retval < 0)
|
||||
return retval;
|
||||
|
||||
if (!cpus_subset(trialcs.cpus_allowed, cpu_online_map))
|
||||
return -EINVAL;
|
||||
}
|
||||
cpus_and(trialcs.cpus_allowed, trialcs.cpus_allowed, cpu_online_map);
|
||||
retval = validate_change(cs, &trialcs);
|
||||
if (retval < 0)
|
||||
return retval;
|
||||
@@ -932,9 +934,11 @@ static int update_nodemask(struct cpuset *cs, char *buf)
|
||||
retval = nodelist_parse(buf, trialcs.mems_allowed);
|
||||
if (retval < 0)
|
||||
goto done;
|
||||
|
||||
if (!nodes_subset(trialcs.mems_allowed,
|
||||
node_states[N_HIGH_MEMORY]))
|
||||
return -EINVAL;
|
||||
}
|
||||
nodes_and(trialcs.mems_allowed, trialcs.mems_allowed,
|
||||
node_states[N_HIGH_MEMORY]);
|
||||
oldmem = cs->mems_allowed;
|
||||
if (nodes_equal(oldmem, trialcs.mems_allowed)) {
|
||||
retval = 0; /* Too easy - nothing to do */
|
||||
@@ -1033,8 +1037,8 @@ int current_cpuset_is_being_rebound(void)
|
||||
|
||||
static int update_relax_domain_level(struct cpuset *cs, s64 val)
|
||||
{
|
||||
if ((int)val < 0)
|
||||
val = -1;
|
||||
if (val < -1 || val >= SD_LV_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
if (val != cs->relax_domain_level) {
|
||||
cs->relax_domain_level = val;
|
||||
@@ -1886,6 +1890,12 @@ static void common_cpu_mem_hotplug_unplug(void)
|
||||
top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
|
||||
scan_for_empty_cpusets(&top_cpuset);
|
||||
|
||||
/*
|
||||
* Scheduler destroys domains on hotplug events.
|
||||
* Rebuild them based on the current settings.
|
||||
*/
|
||||
rebuild_sched_domains();
|
||||
|
||||
cgroup_unlock();
|
||||
}
|
||||
|
||||
|
||||
@@ -126,6 +126,12 @@ static void __exit_signal(struct task_struct *tsk)
|
||||
|
||||
__unhash_process(tsk);
|
||||
|
||||
/*
|
||||
* Do this under ->siglock, we can race with another thread
|
||||
* doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals.
|
||||
*/
|
||||
flush_sigqueue(&tsk->pending);
|
||||
|
||||
tsk->signal = NULL;
|
||||
tsk->sighand = NULL;
|
||||
spin_unlock(&sighand->siglock);
|
||||
@@ -133,7 +139,6 @@ static void __exit_signal(struct task_struct *tsk)
|
||||
|
||||
__cleanup_sighand(sighand);
|
||||
clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
|
||||
flush_sigqueue(&tsk->pending);
|
||||
if (sig) {
|
||||
flush_sigqueue(&sig->shared_pending);
|
||||
taskstats_tgid_free(sig);
|
||||
|
||||
@@ -1096,21 +1096,64 @@ static void unqueue_me_pi(struct futex_q *q)
|
||||
* private futexes.
|
||||
*/
|
||||
static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
|
||||
struct task_struct *newowner)
|
||||
struct task_struct *newowner,
|
||||
struct rw_semaphore *fshared)
|
||||
{
|
||||
u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
|
||||
struct futex_pi_state *pi_state = q->pi_state;
|
||||
struct task_struct *oldowner = pi_state->owner;
|
||||
u32 uval, curval, newval;
|
||||
int ret;
|
||||
int ret, attempt = 0;
|
||||
|
||||
/* Owner died? */
|
||||
if (!pi_state->owner)
|
||||
newtid |= FUTEX_OWNER_DIED;
|
||||
|
||||
/*
|
||||
* We are here either because we stole the rtmutex from the
|
||||
* pending owner or we are the pending owner which failed to
|
||||
* get the rtmutex. We have to replace the pending owner TID
|
||||
* in the user space variable. This must be atomic as we have
|
||||
* to preserve the owner died bit here.
|
||||
*
|
||||
* Note: We write the user space value _before_ changing the
|
||||
* pi_state because we can fault here. Imagine swapped out
|
||||
* pages or a fork, which was running right before we acquired
|
||||
* mmap_sem, that marked all the anonymous memory readonly for
|
||||
* cow.
|
||||
*
|
||||
* Modifying pi_state _before_ the user space value would
|
||||
* leave the pi_state in an inconsistent state when we fault
|
||||
* here, because we need to drop the hash bucket lock to
|
||||
* handle the fault. This might be observed in the PID check
|
||||
* in lookup_pi_state.
|
||||
*/
|
||||
retry:
|
||||
if (get_futex_value_locked(&uval, uaddr))
|
||||
goto handle_fault;
|
||||
|
||||
while (1) {
|
||||
newval = (uval & FUTEX_OWNER_DIED) | newtid;
|
||||
|
||||
curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
|
||||
|
||||
if (curval == -EFAULT)
|
||||
goto handle_fault;
|
||||
if (curval == uval)
|
||||
break;
|
||||
uval = curval;
|
||||
}
|
||||
|
||||
/*
|
||||
* We fixed up user space. Now we need to fix the pi_state
|
||||
* itself.
|
||||
*/
|
||||
if (pi_state->owner != NULL) {
|
||||
spin_lock_irq(&pi_state->owner->pi_lock);
|
||||
WARN_ON(list_empty(&pi_state->list));
|
||||
list_del_init(&pi_state->list);
|
||||
spin_unlock_irq(&pi_state->owner->pi_lock);
|
||||
} else
|
||||
newtid |= FUTEX_OWNER_DIED;
|
||||
}
|
||||
|
||||
pi_state->owner = newowner;
|
||||
|
||||
@@ -1118,26 +1161,35 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
|
||||
WARN_ON(!list_empty(&pi_state->list));
|
||||
list_add(&pi_state->list, &newowner->pi_state_list);
|
||||
spin_unlock_irq(&newowner->pi_lock);
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* We own it, so we have to replace the pending owner
|
||||
* TID. This must be atomic as we have preserve the
|
||||
* owner died bit here.
|
||||
* To handle the page fault we need to drop the hash bucket
|
||||
* lock here. That gives the other task (either the pending
|
||||
* owner itself or the task which stole the rtmutex) the
|
||||
* chance to try the fixup of the pi_state. So once we are
|
||||
* back from handling the fault we need to check the pi_state
|
||||
* after reacquiring the hash bucket lock and before trying to
|
||||
* do another fixup. When the fixup has been done already we
|
||||
* simply return.
|
||||
*/
|
||||
ret = get_futex_value_locked(&uval, uaddr);
|
||||
handle_fault:
|
||||
spin_unlock(q->lock_ptr);
|
||||
|
||||
while (!ret) {
|
||||
newval = (uval & FUTEX_OWNER_DIED) | newtid;
|
||||
ret = futex_handle_fault((unsigned long)uaddr, fshared, attempt++);
|
||||
|
||||
curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
|
||||
spin_lock(q->lock_ptr);
|
||||
|
||||
if (curval == -EFAULT)
|
||||
ret = -EFAULT;
|
||||
if (curval == uval)
|
||||
break;
|
||||
uval = curval;
|
||||
}
|
||||
return ret;
|
||||
/*
|
||||
* Check if someone else fixed it for us:
|
||||
*/
|
||||
if (pi_state->owner != oldowner)
|
||||
return 0;
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
goto retry;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1507,7 +1559,7 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
|
||||
* that case:
|
||||
*/
|
||||
if (q.pi_state->owner != curr)
|
||||
ret = fixup_pi_state_owner(uaddr, &q, curr);
|
||||
ret = fixup_pi_state_owner(uaddr, &q, curr, fshared);
|
||||
} else {
|
||||
/*
|
||||
* Catch the rare case, where the lock was released
|
||||
@@ -1539,7 +1591,8 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
|
||||
int res;
|
||||
|
||||
owner = rt_mutex_owner(&q.pi_state->pi_mutex);
|
||||
res = fixup_pi_state_owner(uaddr, &q, owner);
|
||||
res = fixup_pi_state_owner(uaddr, &q, owner,
|
||||
fshared);
|
||||
|
||||
/* propagate -EFAULT, if the fixup failed */
|
||||
if (res)
|
||||
|
||||
@@ -1003,10 +1003,18 @@ hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
|
||||
*/
|
||||
raise = timer->state == HRTIMER_STATE_PENDING;
|
||||
|
||||
/*
|
||||
* We use preempt_disable to prevent this task from migrating after
|
||||
* setting up the softirq and raising it. Otherwise, if me migrate
|
||||
* we will raise the softirq on the wrong CPU.
|
||||
*/
|
||||
preempt_disable();
|
||||
|
||||
unlock_hrtimer_base(timer, &flags);
|
||||
|
||||
if (raise)
|
||||
hrtimer_raise_softirq();
|
||||
preempt_enable();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -52,6 +52,7 @@
|
||||
#include <asm/byteorder.h>
|
||||
#include <asm/atomic.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/unaligned.h>
|
||||
|
||||
static int kgdb_break_asap;
|
||||
|
||||
@@ -227,8 +228,6 @@ void __weak kgdb_disable_hw_debug(struct pt_regs *regs)
|
||||
* GDB remote protocol parser:
|
||||
*/
|
||||
|
||||
static const char hexchars[] = "0123456789abcdef";
|
||||
|
||||
static int hex(char ch)
|
||||
{
|
||||
if ((ch >= 'a') && (ch <= 'f'))
|
||||
@@ -316,8 +315,8 @@ static void put_packet(char *buffer)
|
||||
}
|
||||
|
||||
kgdb_io_ops->write_char('#');
|
||||
kgdb_io_ops->write_char(hexchars[checksum >> 4]);
|
||||
kgdb_io_ops->write_char(hexchars[checksum & 0xf]);
|
||||
kgdb_io_ops->write_char(hex_asc_hi(checksum));
|
||||
kgdb_io_ops->write_char(hex_asc_lo(checksum));
|
||||
if (kgdb_io_ops->flush)
|
||||
kgdb_io_ops->flush();
|
||||
|
||||
@@ -478,8 +477,8 @@ static void error_packet(char *pkt, int error)
|
||||
{
|
||||
error = -error;
|
||||
pkt[0] = 'E';
|
||||
pkt[1] = hexchars[(error / 10)];
|
||||
pkt[2] = hexchars[(error % 10)];
|
||||
pkt[1] = hex_asc[(error / 10)];
|
||||
pkt[2] = hex_asc[(error % 10)];
|
||||
pkt[3] = '\0';
|
||||
}
|
||||
|
||||
@@ -510,10 +509,7 @@ static void int_to_threadref(unsigned char *id, int value)
|
||||
scan = (unsigned char *)id;
|
||||
while (i--)
|
||||
*scan++ = 0;
|
||||
*scan++ = (value >> 24) & 0xff;
|
||||
*scan++ = (value >> 16) & 0xff;
|
||||
*scan++ = (value >> 8) & 0xff;
|
||||
*scan++ = (value & 0xff);
|
||||
put_unaligned_be32(value, scan);
|
||||
}
|
||||
|
||||
static struct task_struct *getthread(struct pt_regs *regs, int tid)
|
||||
@@ -1503,7 +1499,8 @@ int kgdb_nmicallback(int cpu, void *regs)
|
||||
return 1;
|
||||
}
|
||||
|
||||
void kgdb_console_write(struct console *co, const char *s, unsigned count)
|
||||
static void kgdb_console_write(struct console *co, const char *s,
|
||||
unsigned count)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
|
||||
@@ -699,8 +699,9 @@ static int __register_kprobes(struct kprobe **kps, int num,
|
||||
return -EINVAL;
|
||||
for (i = 0; i < num; i++) {
|
||||
ret = __register_kprobe(kps[i], called_from);
|
||||
if (ret < 0 && i > 0) {
|
||||
unregister_kprobes(kps, i);
|
||||
if (ret < 0) {
|
||||
if (i > 0)
|
||||
unregister_kprobes(kps, i);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -776,8 +777,9 @@ static int __register_jprobes(struct jprobe **jps, int num,
|
||||
jp->kp.break_handler = longjmp_break_handler;
|
||||
ret = __register_kprobe(&jp->kp, called_from);
|
||||
}
|
||||
if (ret < 0 && i > 0) {
|
||||
unregister_jprobes(jps, i);
|
||||
if (ret < 0) {
|
||||
if (i > 0)
|
||||
unregister_jprobes(jps, i);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -920,8 +922,9 @@ static int __register_kretprobes(struct kretprobe **rps, int num,
|
||||
return -EINVAL;
|
||||
for (i = 0; i < num; i++) {
|
||||
ret = __register_kretprobe(rps[i], called_from);
|
||||
if (ret < 0 && i > 0) {
|
||||
unregister_kretprobes(rps, i);
|
||||
if (ret < 0) {
|
||||
if (i > 0)
|
||||
unregister_kretprobes(rps, i);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1337,7 +1337,19 @@ out_unreg:
|
||||
kobject_put(&mod->mkobj.kobj);
|
||||
return err;
|
||||
}
|
||||
#endif
|
||||
|
||||
static void mod_sysfs_fini(struct module *mod)
|
||||
{
|
||||
kobject_put(&mod->mkobj.kobj);
|
||||
}
|
||||
|
||||
#else /* CONFIG_SYSFS */
|
||||
|
||||
static void mod_sysfs_fini(struct module *mod)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SYSFS */
|
||||
|
||||
static void mod_kobject_remove(struct module *mod)
|
||||
{
|
||||
@@ -1345,7 +1357,7 @@ static void mod_kobject_remove(struct module *mod)
|
||||
module_param_sysfs_remove(mod);
|
||||
kobject_put(mod->mkobj.drivers_dir);
|
||||
kobject_put(mod->holders_dir);
|
||||
kobject_put(&mod->mkobj.kobj);
|
||||
mod_sysfs_fini(mod);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1780,7 +1792,7 @@ static struct module *load_module(void __user *umod,
|
||||
|
||||
/* Sanity checks against insmoding binaries or wrong arch,
|
||||
weird elf version */
|
||||
if (memcmp(hdr->e_ident, ELFMAG, 4) != 0
|
||||
if (memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0
|
||||
|| hdr->e_type != ET_REL
|
||||
|| !elf_check_arch(hdr)
|
||||
|| hdr->e_shentsize != sizeof(*sechdrs)) {
|
||||
|
||||
@@ -89,8 +89,22 @@ static void force_quiescent_state(struct rcu_data *rdp,
|
||||
/*
|
||||
* Don't send IPI to itself. With irqs disabled,
|
||||
* rdp->cpu is the current cpu.
|
||||
*
|
||||
* cpu_online_map is updated by the _cpu_down()
|
||||
* using stop_machine_run(). Since we're in irqs disabled
|
||||
* section, stop_machine_run() is not exectuting, hence
|
||||
* the cpu_online_map is stable.
|
||||
*
|
||||
* However, a cpu might have been offlined _just_ before
|
||||
* we disabled irqs while entering here.
|
||||
* And rcu subsystem might not yet have handled the CPU_DEAD
|
||||
* notification, leading to the offlined cpu's bit
|
||||
* being set in the rcp->cpumask.
|
||||
*
|
||||
* Hence cpumask = (rcp->cpumask & cpu_online_map) to prevent
|
||||
* sending smp_reschedule() to an offlined CPU.
|
||||
*/
|
||||
cpumask = rcp->cpumask;
|
||||
cpus_and(cpumask, rcp->cpumask, cpu_online_map);
|
||||
cpu_clear(rdp->cpu, cpumask);
|
||||
for_each_cpu_mask(cpu, cpumask)
|
||||
smp_send_reschedule(cpu);
|
||||
|
||||
@@ -217,8 +217,6 @@ long rcu_batches_completed(void)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rcu_batches_completed);
|
||||
|
||||
EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
|
||||
|
||||
void __rcu_read_lock(void)
|
||||
{
|
||||
int idx;
|
||||
|
||||
@@ -1191,7 +1191,7 @@ static ssize_t relay_file_splice_read(struct file *in,
|
||||
ret = 0;
|
||||
spliced = 0;
|
||||
|
||||
while (len) {
|
||||
while (len && !spliced) {
|
||||
ret = subbuf_splice_actor(in, ppos, pipe, len, flags, &nonpad_ret);
|
||||
if (ret < 0)
|
||||
break;
|
||||
|
||||
523
kernel/sched.c
523
kernel/sched.c
@@ -136,7 +136,7 @@ static inline void sg_inc_cpu_power(struct sched_group *sg, u32 val)
|
||||
|
||||
static inline int rt_policy(int policy)
|
||||
{
|
||||
if (unlikely(policy == SCHED_FIFO) || unlikely(policy == SCHED_RR))
|
||||
if (unlikely(policy == SCHED_FIFO || policy == SCHED_RR))
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
@@ -312,12 +312,15 @@ static DEFINE_SPINLOCK(task_group_lock);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* A weight of 0, 1 or ULONG_MAX can cause arithmetics problems.
|
||||
* A weight of 0 or 1 can cause arithmetics problems.
|
||||
* A weight of a cfs_rq is the sum of weights of which entities
|
||||
* are queued on this cfs_rq, so a weight of a entity should not be
|
||||
* too large, so as the shares value of a task group.
|
||||
* (The default weight is 1024 - so there's no practical
|
||||
* limitation from this.)
|
||||
*/
|
||||
#define MIN_SHARES 2
|
||||
#define MAX_SHARES (ULONG_MAX - 1)
|
||||
#define MAX_SHARES (1UL << 18)
|
||||
|
||||
static int init_task_group_load = INIT_TASK_GROUP_LOAD;
|
||||
#endif
|
||||
@@ -398,43 +401,6 @@ struct cfs_rq {
|
||||
*/
|
||||
struct list_head leaf_cfs_rq_list;
|
||||
struct task_group *tg; /* group that "owns" this runqueue */
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
unsigned long task_weight;
|
||||
unsigned long shares;
|
||||
/*
|
||||
* We need space to build a sched_domain wide view of the full task
|
||||
* group tree, in order to avoid depending on dynamic memory allocation
|
||||
* during the load balancing we place this in the per cpu task group
|
||||
* hierarchy. This limits the load balancing to one instance per cpu,
|
||||
* but more should not be needed anyway.
|
||||
*/
|
||||
struct aggregate_struct {
|
||||
/*
|
||||
* load = weight(cpus) * f(tg)
|
||||
*
|
||||
* Where f(tg) is the recursive weight fraction assigned to
|
||||
* this group.
|
||||
*/
|
||||
unsigned long load;
|
||||
|
||||
/*
|
||||
* part of the group weight distributed to this span.
|
||||
*/
|
||||
unsigned long shares;
|
||||
|
||||
/*
|
||||
* The sum of all runqueue weights within this span.
|
||||
*/
|
||||
unsigned long rq_weight;
|
||||
|
||||
/*
|
||||
* Weight contributed by tasks; this is the part we can
|
||||
* influence by moving tasks around.
|
||||
*/
|
||||
unsigned long task_weight;
|
||||
} aggregate;
|
||||
#endif
|
||||
#endif
|
||||
};
|
||||
|
||||
@@ -1161,6 +1127,7 @@ static enum hrtimer_restart hrtick(struct hrtimer *timer)
|
||||
return HRTIMER_NORESTART;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static void hotplug_hrtick_disable(int cpu)
|
||||
{
|
||||
struct rq *rq = cpu_rq(cpu);
|
||||
@@ -1216,6 +1183,7 @@ static void init_hrtick(void)
|
||||
{
|
||||
hotcpu_notifier(hotplug_hrtick, 0);
|
||||
}
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
static void init_rq_hrtick(struct rq *rq)
|
||||
{
|
||||
@@ -1368,17 +1336,19 @@ static void __resched_task(struct task_struct *p, int tif_bit)
|
||||
*/
|
||||
#define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
|
||||
|
||||
/*
|
||||
* delta *= weight / lw
|
||||
*/
|
||||
static unsigned long
|
||||
calc_delta_mine(unsigned long delta_exec, unsigned long weight,
|
||||
struct load_weight *lw)
|
||||
{
|
||||
u64 tmp;
|
||||
|
||||
if (!lw->inv_weight)
|
||||
lw->inv_weight = 1 + (WMULT_CONST-lw->weight/2)/(lw->weight+1);
|
||||
if (!lw->inv_weight) {
|
||||
if (BITS_PER_LONG > 32 && unlikely(lw->weight >= WMULT_CONST))
|
||||
lw->inv_weight = 1;
|
||||
else
|
||||
lw->inv_weight = 1 + (WMULT_CONST-lw->weight/2)
|
||||
/ (lw->weight+1);
|
||||
}
|
||||
|
||||
tmp = (u64)delta_exec * weight;
|
||||
/*
|
||||
@@ -1393,6 +1363,12 @@ calc_delta_mine(unsigned long delta_exec, unsigned long weight,
|
||||
return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
|
||||
}
|
||||
|
||||
static inline unsigned long
|
||||
calc_delta_fair(unsigned long delta_exec, struct load_weight *lw)
|
||||
{
|
||||
return calc_delta_mine(delta_exec, NICE_0_LOAD, lw);
|
||||
}
|
||||
|
||||
static inline void update_load_add(struct load_weight *lw, unsigned long inc)
|
||||
{
|
||||
lw->weight += inc;
|
||||
@@ -1505,326 +1481,6 @@ static unsigned long source_load(int cpu, int type);
|
||||
static unsigned long target_load(int cpu, int type);
|
||||
static unsigned long cpu_avg_load_per_task(int cpu);
|
||||
static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
|
||||
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
|
||||
/*
|
||||
* Group load balancing.
|
||||
*
|
||||
* We calculate a few balance domain wide aggregate numbers; load and weight.
|
||||
* Given the pictures below, and assuming each item has equal weight:
|
||||
*
|
||||
* root 1 - thread
|
||||
* / | \ A - group
|
||||
* A 1 B
|
||||
* /|\ / \
|
||||
* C 2 D 3 4
|
||||
* | |
|
||||
* 5 6
|
||||
*
|
||||
* load:
|
||||
* A and B get 1/3-rd of the total load. C and D get 1/3-rd of A's 1/3-rd,
|
||||
* which equals 1/9-th of the total load.
|
||||
*
|
||||
* shares:
|
||||
* The weight of this group on the selected cpus.
|
||||
*
|
||||
* rq_weight:
|
||||
* Direct sum of all the cpu's their rq weight, e.g. A would get 3 while
|
||||
* B would get 2.
|
||||
*
|
||||
* task_weight:
|
||||
* Part of the rq_weight contributed by tasks; all groups except B would
|
||||
* get 1, B gets 2.
|
||||
*/
|
||||
|
||||
static inline struct aggregate_struct *
|
||||
aggregate(struct task_group *tg, struct sched_domain *sd)
|
||||
{
|
||||
return &tg->cfs_rq[sd->first_cpu]->aggregate;
|
||||
}
|
||||
|
||||
typedef void (*aggregate_func)(struct task_group *, struct sched_domain *);
|
||||
|
||||
/*
|
||||
* Iterate the full tree, calling @down when first entering a node and @up when
|
||||
* leaving it for the final time.
|
||||
*/
|
||||
static
|
||||
void aggregate_walk_tree(aggregate_func down, aggregate_func up,
|
||||
struct sched_domain *sd)
|
||||
{
|
||||
struct task_group *parent, *child;
|
||||
|
||||
rcu_read_lock();
|
||||
parent = &root_task_group;
|
||||
down:
|
||||
(*down)(parent, sd);
|
||||
list_for_each_entry_rcu(child, &parent->children, siblings) {
|
||||
parent = child;
|
||||
goto down;
|
||||
|
||||
up:
|
||||
continue;
|
||||
}
|
||||
(*up)(parent, sd);
|
||||
|
||||
child = parent;
|
||||
parent = parent->parent;
|
||||
if (parent)
|
||||
goto up;
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
/*
|
||||
* Calculate the aggregate runqueue weight.
|
||||
*/
|
||||
static
|
||||
void aggregate_group_weight(struct task_group *tg, struct sched_domain *sd)
|
||||
{
|
||||
unsigned long rq_weight = 0;
|
||||
unsigned long task_weight = 0;
|
||||
int i;
|
||||
|
||||
for_each_cpu_mask(i, sd->span) {
|
||||
rq_weight += tg->cfs_rq[i]->load.weight;
|
||||
task_weight += tg->cfs_rq[i]->task_weight;
|
||||
}
|
||||
|
||||
aggregate(tg, sd)->rq_weight = rq_weight;
|
||||
aggregate(tg, sd)->task_weight = task_weight;
|
||||
}
|
||||
|
||||
/*
|
||||
* Compute the weight of this group on the given cpus.
|
||||
*/
|
||||
static
|
||||
void aggregate_group_shares(struct task_group *tg, struct sched_domain *sd)
|
||||
{
|
||||
unsigned long shares = 0;
|
||||
int i;
|
||||
|
||||
for_each_cpu_mask(i, sd->span)
|
||||
shares += tg->cfs_rq[i]->shares;
|
||||
|
||||
if ((!shares && aggregate(tg, sd)->rq_weight) || shares > tg->shares)
|
||||
shares = tg->shares;
|
||||
|
||||
aggregate(tg, sd)->shares = shares;
|
||||
}
|
||||
|
||||
/*
|
||||
* Compute the load fraction assigned to this group, relies on the aggregate
|
||||
* weight and this group's parent's load, i.e. top-down.
|
||||
*/
|
||||
static
|
||||
void aggregate_group_load(struct task_group *tg, struct sched_domain *sd)
|
||||
{
|
||||
unsigned long load;
|
||||
|
||||
if (!tg->parent) {
|
||||
int i;
|
||||
|
||||
load = 0;
|
||||
for_each_cpu_mask(i, sd->span)
|
||||
load += cpu_rq(i)->load.weight;
|
||||
|
||||
} else {
|
||||
load = aggregate(tg->parent, sd)->load;
|
||||
|
||||
/*
|
||||
* shares is our weight in the parent's rq so
|
||||
* shares/parent->rq_weight gives our fraction of the load
|
||||
*/
|
||||
load *= aggregate(tg, sd)->shares;
|
||||
load /= aggregate(tg->parent, sd)->rq_weight + 1;
|
||||
}
|
||||
|
||||
aggregate(tg, sd)->load = load;
|
||||
}
|
||||
|
||||
static void __set_se_shares(struct sched_entity *se, unsigned long shares);
|
||||
|
||||
/*
|
||||
* Calculate and set the cpu's group shares.
|
||||
*/
|
||||
static void
|
||||
__update_group_shares_cpu(struct task_group *tg, struct sched_domain *sd,
|
||||
int tcpu)
|
||||
{
|
||||
int boost = 0;
|
||||
unsigned long shares;
|
||||
unsigned long rq_weight;
|
||||
|
||||
if (!tg->se[tcpu])
|
||||
return;
|
||||
|
||||
rq_weight = tg->cfs_rq[tcpu]->load.weight;
|
||||
|
||||
/*
|
||||
* If there are currently no tasks on the cpu pretend there is one of
|
||||
* average load so that when a new task gets to run here it will not
|
||||
* get delayed by group starvation.
|
||||
*/
|
||||
if (!rq_weight) {
|
||||
boost = 1;
|
||||
rq_weight = NICE_0_LOAD;
|
||||
}
|
||||
|
||||
/*
|
||||
* \Sum shares * rq_weight
|
||||
* shares = -----------------------
|
||||
* \Sum rq_weight
|
||||
*
|
||||
*/
|
||||
shares = aggregate(tg, sd)->shares * rq_weight;
|
||||
shares /= aggregate(tg, sd)->rq_weight + 1;
|
||||
|
||||
/*
|
||||
* record the actual number of shares, not the boosted amount.
|
||||
*/
|
||||
tg->cfs_rq[tcpu]->shares = boost ? 0 : shares;
|
||||
|
||||
if (shares < MIN_SHARES)
|
||||
shares = MIN_SHARES;
|
||||
else if (shares > MAX_SHARES)
|
||||
shares = MAX_SHARES;
|
||||
|
||||
__set_se_shares(tg->se[tcpu], shares);
|
||||
}
|
||||
|
||||
/*
|
||||
* Re-adjust the weights on the cpu the task came from and on the cpu the
|
||||
* task went to.
|
||||
*/
|
||||
static void
|
||||
__move_group_shares(struct task_group *tg, struct sched_domain *sd,
|
||||
int scpu, int dcpu)
|
||||
{
|
||||
unsigned long shares;
|
||||
|
||||
shares = tg->cfs_rq[scpu]->shares + tg->cfs_rq[dcpu]->shares;
|
||||
|
||||
__update_group_shares_cpu(tg, sd, scpu);
|
||||
__update_group_shares_cpu(tg, sd, dcpu);
|
||||
|
||||
/*
|
||||
* ensure we never loose shares due to rounding errors in the
|
||||
* above redistribution.
|
||||
*/
|
||||
shares -= tg->cfs_rq[scpu]->shares + tg->cfs_rq[dcpu]->shares;
|
||||
if (shares)
|
||||
tg->cfs_rq[dcpu]->shares += shares;
|
||||
}
|
||||
|
||||
/*
|
||||
* Because changing a group's shares changes the weight of the super-group
|
||||
* we need to walk up the tree and change all shares until we hit the root.
|
||||
*/
|
||||
static void
|
||||
move_group_shares(struct task_group *tg, struct sched_domain *sd,
|
||||
int scpu, int dcpu)
|
||||
{
|
||||
while (tg) {
|
||||
__move_group_shares(tg, sd, scpu, dcpu);
|
||||
tg = tg->parent;
|
||||
}
|
||||
}
|
||||
|
||||
static
|
||||
void aggregate_group_set_shares(struct task_group *tg, struct sched_domain *sd)
|
||||
{
|
||||
unsigned long shares = aggregate(tg, sd)->shares;
|
||||
int i;
|
||||
|
||||
for_each_cpu_mask(i, sd->span) {
|
||||
struct rq *rq = cpu_rq(i);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&rq->lock, flags);
|
||||
__update_group_shares_cpu(tg, sd, i);
|
||||
spin_unlock_irqrestore(&rq->lock, flags);
|
||||
}
|
||||
|
||||
aggregate_group_shares(tg, sd);
|
||||
|
||||
/*
|
||||
* ensure we never loose shares due to rounding errors in the
|
||||
* above redistribution.
|
||||
*/
|
||||
shares -= aggregate(tg, sd)->shares;
|
||||
if (shares) {
|
||||
tg->cfs_rq[sd->first_cpu]->shares += shares;
|
||||
aggregate(tg, sd)->shares += shares;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Calculate the accumulative weight and recursive load of each task group
|
||||
* while walking down the tree.
|
||||
*/
|
||||
static
|
||||
void aggregate_get_down(struct task_group *tg, struct sched_domain *sd)
|
||||
{
|
||||
aggregate_group_weight(tg, sd);
|
||||
aggregate_group_shares(tg, sd);
|
||||
aggregate_group_load(tg, sd);
|
||||
}
|
||||
|
||||
/*
|
||||
* Rebalance the cpu shares while walking back up the tree.
|
||||
*/
|
||||
static
|
||||
void aggregate_get_up(struct task_group *tg, struct sched_domain *sd)
|
||||
{
|
||||
aggregate_group_set_shares(tg, sd);
|
||||
}
|
||||
|
||||
static DEFINE_PER_CPU(spinlock_t, aggregate_lock);
|
||||
|
||||
static void __init init_aggregate(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
for_each_possible_cpu(i)
|
||||
spin_lock_init(&per_cpu(aggregate_lock, i));
|
||||
}
|
||||
|
||||
static int get_aggregate(struct sched_domain *sd)
|
||||
{
|
||||
if (!spin_trylock(&per_cpu(aggregate_lock, sd->first_cpu)))
|
||||
return 0;
|
||||
|
||||
aggregate_walk_tree(aggregate_get_down, aggregate_get_up, sd);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void put_aggregate(struct sched_domain *sd)
|
||||
{
|
||||
spin_unlock(&per_cpu(aggregate_lock, sd->first_cpu));
|
||||
}
|
||||
|
||||
static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares)
|
||||
{
|
||||
cfs_rq->shares = shares;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static inline void init_aggregate(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int get_aggregate(struct sched_domain *sd)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void put_aggregate(struct sched_domain *sd)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#else /* CONFIG_SMP */
|
||||
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
@@ -1845,14 +1501,26 @@ static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares)
|
||||
|
||||
#define sched_class_highest (&rt_sched_class)
|
||||
|
||||
static void inc_nr_running(struct rq *rq)
|
||||
static inline void inc_load(struct rq *rq, const struct task_struct *p)
|
||||
{
|
||||
rq->nr_running++;
|
||||
update_load_add(&rq->load, p->se.load.weight);
|
||||
}
|
||||
|
||||
static void dec_nr_running(struct rq *rq)
|
||||
static inline void dec_load(struct rq *rq, const struct task_struct *p)
|
||||
{
|
||||
update_load_sub(&rq->load, p->se.load.weight);
|
||||
}
|
||||
|
||||
static void inc_nr_running(struct task_struct *p, struct rq *rq)
|
||||
{
|
||||
rq->nr_running++;
|
||||
inc_load(rq, p);
|
||||
}
|
||||
|
||||
static void dec_nr_running(struct task_struct *p, struct rq *rq)
|
||||
{
|
||||
rq->nr_running--;
|
||||
dec_load(rq, p);
|
||||
}
|
||||
|
||||
static void set_load_weight(struct task_struct *p)
|
||||
@@ -1944,7 +1612,7 @@ static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
|
||||
rq->nr_uninterruptible--;
|
||||
|
||||
enqueue_task(rq, p, wakeup);
|
||||
inc_nr_running(rq);
|
||||
inc_nr_running(p, rq);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1956,7 +1624,7 @@ static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep)
|
||||
rq->nr_uninterruptible++;
|
||||
|
||||
dequeue_task(rq, p, sleep);
|
||||
dec_nr_running(rq);
|
||||
dec_nr_running(p, rq);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -2609,7 +2277,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
|
||||
* management (if any):
|
||||
*/
|
||||
p->sched_class->task_new(rq, p);
|
||||
inc_nr_running(rq);
|
||||
inc_nr_running(p, rq);
|
||||
}
|
||||
check_preempt_curr(rq, p);
|
||||
#ifdef CONFIG_SMP
|
||||
@@ -3600,12 +3268,9 @@ static int load_balance(int this_cpu, struct rq *this_rq,
|
||||
unsigned long imbalance;
|
||||
struct rq *busiest;
|
||||
unsigned long flags;
|
||||
int unlock_aggregate;
|
||||
|
||||
cpus_setall(*cpus);
|
||||
|
||||
unlock_aggregate = get_aggregate(sd);
|
||||
|
||||
/*
|
||||
* When power savings policy is enabled for the parent domain, idle
|
||||
* sibling can pick up load irrespective of busy siblings. In this case,
|
||||
@@ -3721,9 +3386,8 @@ redo:
|
||||
|
||||
if (!ld_moved && !sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
|
||||
!test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
|
||||
ld_moved = -1;
|
||||
|
||||
goto out;
|
||||
return -1;
|
||||
return ld_moved;
|
||||
|
||||
out_balanced:
|
||||
schedstat_inc(sd, lb_balanced[idle]);
|
||||
@@ -3738,13 +3402,8 @@ out_one_pinned:
|
||||
|
||||
if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
|
||||
!test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
|
||||
ld_moved = -1;
|
||||
else
|
||||
ld_moved = 0;
|
||||
out:
|
||||
if (unlock_aggregate)
|
||||
put_aggregate(sd);
|
||||
return ld_moved;
|
||||
return -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -4430,7 +4089,7 @@ static inline void schedule_debug(struct task_struct *prev)
|
||||
* schedule() atomically, we ignore that path for now.
|
||||
* Otherwise, whine if we are scheduling when we should not be.
|
||||
*/
|
||||
if (unlikely(in_atomic_preempt_off()) && unlikely(!prev->exit_state))
|
||||
if (unlikely(in_atomic_preempt_off() && !prev->exit_state))
|
||||
__schedule_bug(prev);
|
||||
|
||||
profile_hit(SCHED_PROFILING, __builtin_return_address(0));
|
||||
@@ -4510,12 +4169,10 @@ need_resched_nonpreemptible:
|
||||
clear_tsk_need_resched(prev);
|
||||
|
||||
if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
|
||||
if (unlikely((prev->state & TASK_INTERRUPTIBLE) &&
|
||||
signal_pending(prev))) {
|
||||
if (unlikely(signal_pending_state(prev->state, prev)))
|
||||
prev->state = TASK_RUNNING;
|
||||
} else {
|
||||
else
|
||||
deactivate_task(rq, prev, 1);
|
||||
}
|
||||
switch_count = &prev->nvcsw;
|
||||
}
|
||||
|
||||
@@ -4741,22 +4398,20 @@ do_wait_for_common(struct completion *x, long timeout, int state)
|
||||
signal_pending(current)) ||
|
||||
(state == TASK_KILLABLE &&
|
||||
fatal_signal_pending(current))) {
|
||||
__remove_wait_queue(&x->wait, &wait);
|
||||
return -ERESTARTSYS;
|
||||
timeout = -ERESTARTSYS;
|
||||
break;
|
||||
}
|
||||
__set_current_state(state);
|
||||
spin_unlock_irq(&x->wait.lock);
|
||||
timeout = schedule_timeout(timeout);
|
||||
spin_lock_irq(&x->wait.lock);
|
||||
if (!timeout) {
|
||||
__remove_wait_queue(&x->wait, &wait);
|
||||
return timeout;
|
||||
}
|
||||
} while (!x->done);
|
||||
} while (!x->done && timeout);
|
||||
__remove_wait_queue(&x->wait, &wait);
|
||||
if (!x->done)
|
||||
return timeout;
|
||||
}
|
||||
x->done--;
|
||||
return timeout;
|
||||
return timeout ?: 1;
|
||||
}
|
||||
|
||||
static long __sched
|
||||
@@ -4931,8 +4586,10 @@ void set_user_nice(struct task_struct *p, long nice)
|
||||
goto out_unlock;
|
||||
}
|
||||
on_rq = p->se.on_rq;
|
||||
if (on_rq)
|
||||
if (on_rq) {
|
||||
dequeue_task(rq, p, 0);
|
||||
dec_load(rq, p);
|
||||
}
|
||||
|
||||
p->static_prio = NICE_TO_PRIO(nice);
|
||||
set_load_weight(p);
|
||||
@@ -4942,6 +4599,7 @@ void set_user_nice(struct task_struct *p, long nice)
|
||||
|
||||
if (on_rq) {
|
||||
enqueue_task(rq, p, 0);
|
||||
inc_load(rq, p);
|
||||
/*
|
||||
* If the task increased its priority or is running and
|
||||
* lowered its priority, then reschedule its CPU:
|
||||
@@ -6229,6 +5887,7 @@ static void migrate_dead_tasks(unsigned int dead_cpu)
|
||||
next = pick_next_task(rq, rq->curr);
|
||||
if (!next)
|
||||
break;
|
||||
next->sched_class->put_prev_task(rq, next);
|
||||
migrate_dead(dead_cpu, next);
|
||||
|
||||
}
|
||||
@@ -7219,7 +6878,12 @@ static int default_relax_domain_level = -1;
|
||||
|
||||
static int __init setup_relax_domain_level(char *str)
|
||||
{
|
||||
default_relax_domain_level = simple_strtoul(str, NULL, 0);
|
||||
unsigned long val;
|
||||
|
||||
val = simple_strtoul(str, NULL, 0);
|
||||
if (val < SD_LV_MAX)
|
||||
default_relax_domain_level = val;
|
||||
|
||||
return 1;
|
||||
}
|
||||
__setup("relax_domain_level=", setup_relax_domain_level);
|
||||
@@ -7316,7 +6980,6 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
|
||||
SD_INIT(sd, ALLNODES);
|
||||
set_domain_attribute(sd, attr);
|
||||
sd->span = *cpu_map;
|
||||
sd->first_cpu = first_cpu(sd->span);
|
||||
cpu_to_allnodes_group(i, cpu_map, &sd->groups, tmpmask);
|
||||
p = sd;
|
||||
sd_allnodes = 1;
|
||||
@@ -7327,7 +6990,6 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
|
||||
SD_INIT(sd, NODE);
|
||||
set_domain_attribute(sd, attr);
|
||||
sched_domain_node_span(cpu_to_node(i), &sd->span);
|
||||
sd->first_cpu = first_cpu(sd->span);
|
||||
sd->parent = p;
|
||||
if (p)
|
||||
p->child = sd;
|
||||
@@ -7339,7 +7001,6 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
|
||||
SD_INIT(sd, CPU);
|
||||
set_domain_attribute(sd, attr);
|
||||
sd->span = *nodemask;
|
||||
sd->first_cpu = first_cpu(sd->span);
|
||||
sd->parent = p;
|
||||
if (p)
|
||||
p->child = sd;
|
||||
@@ -7351,7 +7012,6 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
|
||||
SD_INIT(sd, MC);
|
||||
set_domain_attribute(sd, attr);
|
||||
sd->span = cpu_coregroup_map(i);
|
||||
sd->first_cpu = first_cpu(sd->span);
|
||||
cpus_and(sd->span, sd->span, *cpu_map);
|
||||
sd->parent = p;
|
||||
p->child = sd;
|
||||
@@ -7364,7 +7024,6 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
|
||||
SD_INIT(sd, SIBLING);
|
||||
set_domain_attribute(sd, attr);
|
||||
sd->span = per_cpu(cpu_sibling_map, i);
|
||||
sd->first_cpu = first_cpu(sd->span);
|
||||
cpus_and(sd->span, sd->span, *cpu_map);
|
||||
sd->parent = p;
|
||||
p->child = sd;
|
||||
@@ -7568,8 +7227,8 @@ static int build_sched_domains(const cpumask_t *cpu_map)
|
||||
|
||||
static cpumask_t *doms_cur; /* current sched domains */
|
||||
static int ndoms_cur; /* number of sched domains in 'doms_cur' */
|
||||
static struct sched_domain_attr *dattr_cur; /* attribues of custom domains
|
||||
in 'doms_cur' */
|
||||
static struct sched_domain_attr *dattr_cur;
|
||||
/* attribues of custom domains in 'doms_cur' */
|
||||
|
||||
/*
|
||||
* Special case: If a kmalloc of a doms_cur partition (array of
|
||||
@@ -7582,6 +7241,18 @@ void __attribute__((weak)) arch_update_cpu_topology(void)
|
||||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* Free current domain masks.
|
||||
* Called after all cpus are attached to NULL domain.
|
||||
*/
|
||||
static void free_sched_domains(void)
|
||||
{
|
||||
ndoms_cur = 0;
|
||||
if (doms_cur != &fallback_doms)
|
||||
kfree(doms_cur);
|
||||
doms_cur = &fallback_doms;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set up scheduler domains and groups. Callers must hold the hotplug lock.
|
||||
* For now this just excludes isolated cpus, but could be used to
|
||||
@@ -7729,6 +7400,7 @@ int arch_reinit_sched_domains(void)
|
||||
get_online_cpus();
|
||||
mutex_lock(&sched_domains_mutex);
|
||||
detach_destroy_domains(&cpu_online_map);
|
||||
free_sched_domains();
|
||||
err = arch_init_sched_domains(&cpu_online_map);
|
||||
mutex_unlock(&sched_domains_mutex);
|
||||
put_online_cpus();
|
||||
@@ -7814,6 +7486,7 @@ static int update_sched_domains(struct notifier_block *nfb,
|
||||
case CPU_DOWN_PREPARE:
|
||||
case CPU_DOWN_PREPARE_FROZEN:
|
||||
detach_destroy_domains(&cpu_online_map);
|
||||
free_sched_domains();
|
||||
return NOTIFY_OK;
|
||||
|
||||
case CPU_UP_CANCELED:
|
||||
@@ -7832,8 +7505,16 @@ static int update_sched_domains(struct notifier_block *nfb,
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
#ifndef CONFIG_CPUSETS
|
||||
/*
|
||||
* Create default domain partitioning if cpusets are disabled.
|
||||
* Otherwise we let cpusets rebuild the domains based on the
|
||||
* current setup.
|
||||
*/
|
||||
|
||||
/* The hotplug lock is already held by cpu_up/cpu_down */
|
||||
arch_init_sched_domains(&cpu_online_map);
|
||||
#endif
|
||||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
@@ -7973,7 +7654,6 @@ static void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
|
||||
else
|
||||
rt_se->rt_rq = parent->my_q;
|
||||
|
||||
rt_se->rt_rq = &rq->rt;
|
||||
rt_se->my_q = rt_rq;
|
||||
rt_se->parent = parent;
|
||||
INIT_LIST_HEAD(&rt_se->run_list);
|
||||
@@ -8034,7 +7714,6 @@ void __init sched_init(void)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
init_aggregate();
|
||||
init_defrootdomain();
|
||||
#endif
|
||||
|
||||
@@ -8599,11 +8278,14 @@ void sched_move_task(struct task_struct *tsk)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
static void __set_se_shares(struct sched_entity *se, unsigned long shares)
|
||||
static void set_se_shares(struct sched_entity *se, unsigned long shares)
|
||||
{
|
||||
struct cfs_rq *cfs_rq = se->cfs_rq;
|
||||
struct rq *rq = cfs_rq->rq;
|
||||
int on_rq;
|
||||
|
||||
spin_lock_irq(&rq->lock);
|
||||
|
||||
on_rq = se->on_rq;
|
||||
if (on_rq)
|
||||
dequeue_entity(cfs_rq, se, 0);
|
||||
@@ -8613,17 +8295,8 @@ static void __set_se_shares(struct sched_entity *se, unsigned long shares)
|
||||
|
||||
if (on_rq)
|
||||
enqueue_entity(cfs_rq, se, 0);
|
||||
}
|
||||
|
||||
static void set_se_shares(struct sched_entity *se, unsigned long shares)
|
||||
{
|
||||
struct cfs_rq *cfs_rq = se->cfs_rq;
|
||||
struct rq *rq = cfs_rq->rq;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&rq->lock, flags);
|
||||
__set_se_shares(se, shares);
|
||||
spin_unlock_irqrestore(&rq->lock, flags);
|
||||
spin_unlock_irq(&rq->lock);
|
||||
}
|
||||
|
||||
static DEFINE_MUTEX(shares_mutex);
|
||||
@@ -8662,13 +8335,8 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
|
||||
* w/o tripping rebalance_share or load_balance_fair.
|
||||
*/
|
||||
tg->shares = shares;
|
||||
for_each_possible_cpu(i) {
|
||||
/*
|
||||
* force a rebalance
|
||||
*/
|
||||
cfs_rq_set_shares(tg->cfs_rq[i], 0);
|
||||
for_each_possible_cpu(i)
|
||||
set_se_shares(tg->se[i], shares);
|
||||
}
|
||||
|
||||
/*
|
||||
* Enable load balance activity on this group, by inserting it back on
|
||||
@@ -8707,7 +8375,7 @@ static unsigned long to_ratio(u64 period, u64 runtime)
|
||||
#ifdef CONFIG_CGROUP_SCHED
|
||||
static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
|
||||
{
|
||||
struct task_group *tgi, *parent = tg->parent;
|
||||
struct task_group *tgi, *parent = tg ? tg->parent : NULL;
|
||||
unsigned long total = 0;
|
||||
|
||||
if (!parent) {
|
||||
@@ -8834,6 +8502,9 @@ int sched_group_set_rt_period(struct task_group *tg, long rt_period_us)
|
||||
rt_period = (u64)rt_period_us * NSEC_PER_USEC;
|
||||
rt_runtime = tg->rt_bandwidth.rt_runtime;
|
||||
|
||||
if (rt_period == 0)
|
||||
return -EINVAL;
|
||||
|
||||
return tg_set_bandwidth(tg, rt_period, rt_runtime);
|
||||
}
|
||||
|
||||
|
||||
@@ -59,22 +59,26 @@ static inline struct sched_clock_data *cpu_sdc(int cpu)
|
||||
return &per_cpu(sched_clock_data, cpu);
|
||||
}
|
||||
|
||||
static __read_mostly int sched_clock_running;
|
||||
|
||||
void sched_clock_init(void)
|
||||
{
|
||||
u64 ktime_now = ktime_to_ns(ktime_get());
|
||||
u64 now = 0;
|
||||
unsigned long now_jiffies = jiffies;
|
||||
int cpu;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct sched_clock_data *scd = cpu_sdc(cpu);
|
||||
|
||||
scd->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
|
||||
scd->prev_jiffies = jiffies;
|
||||
scd->prev_raw = now;
|
||||
scd->tick_raw = now;
|
||||
scd->prev_jiffies = now_jiffies;
|
||||
scd->prev_raw = 0;
|
||||
scd->tick_raw = 0;
|
||||
scd->tick_gtod = ktime_now;
|
||||
scd->clock = ktime_now;
|
||||
}
|
||||
|
||||
sched_clock_running = 1;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -136,6 +140,9 @@ u64 sched_clock_cpu(int cpu)
|
||||
struct sched_clock_data *scd = cpu_sdc(cpu);
|
||||
u64 now, clock;
|
||||
|
||||
if (unlikely(!sched_clock_running))
|
||||
return 0ull;
|
||||
|
||||
WARN_ON_ONCE(!irqs_disabled());
|
||||
now = sched_clock();
|
||||
|
||||
@@ -174,6 +181,9 @@ void sched_clock_tick(void)
|
||||
struct sched_clock_data *scd = this_scd();
|
||||
u64 now, now_gtod;
|
||||
|
||||
if (unlikely(!sched_clock_running))
|
||||
return;
|
||||
|
||||
WARN_ON_ONCE(!irqs_disabled());
|
||||
|
||||
now = sched_clock();
|
||||
|
||||
@@ -167,11 +167,6 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
|
||||
#endif
|
||||
SEQ_printf(m, " .%-30s: %ld\n", "nr_spread_over",
|
||||
cfs_rq->nr_spread_over);
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
#ifdef CONFIG_SMP
|
||||
SEQ_printf(m, " .%-30s: %lu\n", "shares", cfs_rq->shares);
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
|
||||
static void print_cpu(struct seq_file *m, int cpu)
|
||||
|
||||
@@ -333,34 +333,6 @@ int sched_nr_latency_handler(struct ctl_table *table, int write,
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* delta *= w / rw
|
||||
*/
|
||||
static inline unsigned long
|
||||
calc_delta_weight(unsigned long delta, struct sched_entity *se)
|
||||
{
|
||||
for_each_sched_entity(se) {
|
||||
delta = calc_delta_mine(delta,
|
||||
se->load.weight, &cfs_rq_of(se)->load);
|
||||
}
|
||||
|
||||
return delta;
|
||||
}
|
||||
|
||||
/*
|
||||
* delta *= rw / w
|
||||
*/
|
||||
static inline unsigned long
|
||||
calc_delta_fair(unsigned long delta, struct sched_entity *se)
|
||||
{
|
||||
for_each_sched_entity(se) {
|
||||
delta = calc_delta_mine(delta,
|
||||
cfs_rq_of(se)->load.weight, &se->load);
|
||||
}
|
||||
|
||||
return delta;
|
||||
}
|
||||
|
||||
/*
|
||||
* The idea is to set a period in which each task runs once.
|
||||
*
|
||||
@@ -390,54 +362,47 @@ static u64 __sched_period(unsigned long nr_running)
|
||||
*/
|
||||
static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||
{
|
||||
return calc_delta_weight(__sched_period(cfs_rq->nr_running), se);
|
||||
u64 slice = __sched_period(cfs_rq->nr_running);
|
||||
|
||||
for_each_sched_entity(se) {
|
||||
cfs_rq = cfs_rq_of(se);
|
||||
|
||||
slice *= se->load.weight;
|
||||
do_div(slice, cfs_rq->load.weight);
|
||||
}
|
||||
|
||||
|
||||
return slice;
|
||||
}
|
||||
|
||||
/*
|
||||
* We calculate the vruntime slice of a to be inserted task
|
||||
*
|
||||
* vs = s*rw/w = p
|
||||
* vs = s/w = p/rw
|
||||
*/
|
||||
static u64 sched_vslice_add(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||
{
|
||||
unsigned long nr_running = cfs_rq->nr_running;
|
||||
unsigned long weight;
|
||||
u64 vslice;
|
||||
|
||||
if (!se->on_rq)
|
||||
nr_running++;
|
||||
|
||||
return __sched_period(nr_running);
|
||||
}
|
||||
|
||||
/*
|
||||
* The goal of calc_delta_asym() is to be asymmetrically around NICE_0_LOAD, in
|
||||
* that it favours >=0 over <0.
|
||||
*
|
||||
* -20 |
|
||||
* |
|
||||
* 0 --------+-------
|
||||
* .'
|
||||
* 19 .'
|
||||
*
|
||||
*/
|
||||
static unsigned long
|
||||
calc_delta_asym(unsigned long delta, struct sched_entity *se)
|
||||
{
|
||||
struct load_weight lw = {
|
||||
.weight = NICE_0_LOAD,
|
||||
.inv_weight = 1UL << (WMULT_SHIFT-NICE_0_SHIFT)
|
||||
};
|
||||
vslice = __sched_period(nr_running);
|
||||
|
||||
for_each_sched_entity(se) {
|
||||
struct load_weight *se_lw = &se->load;
|
||||
cfs_rq = cfs_rq_of(se);
|
||||
|
||||
if (se->load.weight < NICE_0_LOAD)
|
||||
se_lw = &lw;
|
||||
weight = cfs_rq->load.weight;
|
||||
if (!se->on_rq)
|
||||
weight += se->load.weight;
|
||||
|
||||
delta = calc_delta_mine(delta,
|
||||
cfs_rq_of(se)->load.weight, se_lw);
|
||||
vslice *= NICE_0_LOAD;
|
||||
do_div(vslice, weight);
|
||||
}
|
||||
|
||||
return delta;
|
||||
return vslice;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -454,7 +419,11 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
|
||||
|
||||
curr->sum_exec_runtime += delta_exec;
|
||||
schedstat_add(cfs_rq, exec_clock, delta_exec);
|
||||
delta_exec_weighted = calc_delta_fair(delta_exec, curr);
|
||||
delta_exec_weighted = delta_exec;
|
||||
if (unlikely(curr->load.weight != NICE_0_LOAD)) {
|
||||
delta_exec_weighted = calc_delta_fair(delta_exec_weighted,
|
||||
&curr->load);
|
||||
}
|
||||
curr->vruntime += delta_exec_weighted;
|
||||
}
|
||||
|
||||
@@ -541,27 +510,10 @@ update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||
* Scheduling class queueing methods:
|
||||
*/
|
||||
|
||||
#if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED
|
||||
static void
|
||||
add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight)
|
||||
{
|
||||
cfs_rq->task_weight += weight;
|
||||
}
|
||||
#else
|
||||
static inline void
|
||||
add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
static void
|
||||
account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||
{
|
||||
update_load_add(&cfs_rq->load, se->load.weight);
|
||||
if (!parent_entity(se))
|
||||
inc_cpu_load(rq_of(cfs_rq), se->load.weight);
|
||||
if (entity_is_task(se))
|
||||
add_cfs_task_weight(cfs_rq, se->load.weight);
|
||||
cfs_rq->nr_running++;
|
||||
se->on_rq = 1;
|
||||
list_add(&se->group_node, &cfs_rq->tasks);
|
||||
@@ -571,10 +523,6 @@ static void
|
||||
account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||
{
|
||||
update_load_sub(&cfs_rq->load, se->load.weight);
|
||||
if (!parent_entity(se))
|
||||
dec_cpu_load(rq_of(cfs_rq), se->load.weight);
|
||||
if (entity_is_task(se))
|
||||
add_cfs_task_weight(cfs_rq, -se->load.weight);
|
||||
cfs_rq->nr_running--;
|
||||
se->on_rq = 0;
|
||||
list_del_init(&se->group_node);
|
||||
@@ -661,17 +609,8 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
|
||||
|
||||
if (!initial) {
|
||||
/* sleeps upto a single latency don't count. */
|
||||
if (sched_feat(NEW_FAIR_SLEEPERS)) {
|
||||
unsigned long thresh = sysctl_sched_latency;
|
||||
|
||||
/*
|
||||
* convert the sleeper threshold into virtual time
|
||||
*/
|
||||
if (sched_feat(NORMALIZED_SLEEPER))
|
||||
thresh = calc_delta_fair(thresh, se);
|
||||
|
||||
vruntime -= thresh;
|
||||
}
|
||||
if (sched_feat(NEW_FAIR_SLEEPERS))
|
||||
vruntime -= sysctl_sched_latency;
|
||||
|
||||
/* ensure we never gain time by being placed backwards. */
|
||||
vruntime = max_vruntime(se->vruntime, vruntime);
|
||||
@@ -1057,24 +996,11 @@ wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq,
|
||||
struct task_struct *curr = this_rq->curr;
|
||||
unsigned long tl = this_load;
|
||||
unsigned long tl_per_task;
|
||||
int balanced;
|
||||
|
||||
if (!(this_sd->flags & SD_WAKE_AFFINE))
|
||||
if (!(this_sd->flags & SD_WAKE_AFFINE) || !sched_feat(AFFINE_WAKEUPS))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* If the currently running task will sleep within
|
||||
* a reasonable amount of time then attract this newly
|
||||
* woken task:
|
||||
*/
|
||||
if (sync && curr->sched_class == &fair_sched_class) {
|
||||
if (curr->se.avg_overlap < sysctl_sched_migration_cost &&
|
||||
p->se.avg_overlap < sysctl_sched_migration_cost)
|
||||
return 1;
|
||||
}
|
||||
|
||||
schedstat_inc(p, se.nr_wakeups_affine_attempts);
|
||||
tl_per_task = cpu_avg_load_per_task(this_cpu);
|
||||
|
||||
/*
|
||||
* If sync wakeup then subtract the (maximum possible)
|
||||
* effect of the currently running task from the load
|
||||
@@ -1083,8 +1009,24 @@ wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq,
|
||||
if (sync)
|
||||
tl -= current->se.load.weight;
|
||||
|
||||
balanced = 100*(tl + p->se.load.weight) <= imbalance*load;
|
||||
|
||||
/*
|
||||
* If the currently running task will sleep within
|
||||
* a reasonable amount of time then attract this newly
|
||||
* woken task:
|
||||
*/
|
||||
if (sync && balanced && curr->sched_class == &fair_sched_class) {
|
||||
if (curr->se.avg_overlap < sysctl_sched_migration_cost &&
|
||||
p->se.avg_overlap < sysctl_sched_migration_cost)
|
||||
return 1;
|
||||
}
|
||||
|
||||
schedstat_inc(p, se.nr_wakeups_affine_attempts);
|
||||
tl_per_task = cpu_avg_load_per_task(this_cpu);
|
||||
|
||||
if ((tl <= load && tl + target_load(prev_cpu, idx) <= tl_per_task) ||
|
||||
100*(tl + p->se.load.weight) <= imbalance*load) {
|
||||
balanced) {
|
||||
/*
|
||||
* This domain has SD_WAKE_AFFINE and
|
||||
* p is cache cold in this domain, and
|
||||
@@ -1169,10 +1111,11 @@ static unsigned long wakeup_gran(struct sched_entity *se)
|
||||
unsigned long gran = sysctl_sched_wakeup_granularity;
|
||||
|
||||
/*
|
||||
* More easily preempt - nice tasks, while not making it harder for
|
||||
* + nice tasks.
|
||||
* More easily preempt - nice tasks, while not making
|
||||
* it harder for + nice tasks.
|
||||
*/
|
||||
gran = calc_delta_asym(sysctl_sched_wakeup_granularity, se);
|
||||
if (unlikely(se->load.weight > NICE_0_LOAD))
|
||||
gran = calc_delta_fair(gran, &se->load);
|
||||
|
||||
return gran;
|
||||
}
|
||||
@@ -1366,90 +1309,75 @@ static struct task_struct *load_balance_next_fair(void *arg)
|
||||
return __load_balance_iterator(cfs_rq, cfs_rq->balance_iterator);
|
||||
}
|
||||
|
||||
static unsigned long
|
||||
__load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
|
||||
unsigned long max_load_move, struct sched_domain *sd,
|
||||
enum cpu_idle_type idle, int *all_pinned, int *this_best_prio,
|
||||
struct cfs_rq *cfs_rq)
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
static int cfs_rq_best_prio(struct cfs_rq *cfs_rq)
|
||||
{
|
||||
struct sched_entity *curr;
|
||||
struct task_struct *p;
|
||||
|
||||
if (!cfs_rq->nr_running || !first_fair(cfs_rq))
|
||||
return MAX_PRIO;
|
||||
|
||||
curr = cfs_rq->curr;
|
||||
if (!curr)
|
||||
curr = __pick_next_entity(cfs_rq);
|
||||
|
||||
p = task_of(curr);
|
||||
|
||||
return p->prio;
|
||||
}
|
||||
#endif
|
||||
|
||||
static unsigned long
|
||||
load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
|
||||
unsigned long max_load_move,
|
||||
struct sched_domain *sd, enum cpu_idle_type idle,
|
||||
int *all_pinned, int *this_best_prio)
|
||||
{
|
||||
struct cfs_rq *busy_cfs_rq;
|
||||
long rem_load_move = max_load_move;
|
||||
struct rq_iterator cfs_rq_iterator;
|
||||
|
||||
cfs_rq_iterator.start = load_balance_start_fair;
|
||||
cfs_rq_iterator.next = load_balance_next_fair;
|
||||
cfs_rq_iterator.arg = cfs_rq;
|
||||
|
||||
return balance_tasks(this_rq, this_cpu, busiest,
|
||||
max_load_move, sd, idle, all_pinned,
|
||||
this_best_prio, &cfs_rq_iterator);
|
||||
}
|
||||
|
||||
for_each_leaf_cfs_rq(busiest, busy_cfs_rq) {
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
static unsigned long
|
||||
load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
|
||||
unsigned long max_load_move,
|
||||
struct sched_domain *sd, enum cpu_idle_type idle,
|
||||
int *all_pinned, int *this_best_prio)
|
||||
{
|
||||
long rem_load_move = max_load_move;
|
||||
int busiest_cpu = cpu_of(busiest);
|
||||
struct task_group *tg;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry(tg, &task_groups, list) {
|
||||
struct cfs_rq *this_cfs_rq;
|
||||
long imbalance;
|
||||
unsigned long this_weight, busiest_weight;
|
||||
long rem_load, max_load, moved_load;
|
||||
unsigned long maxload;
|
||||
|
||||
this_cfs_rq = cpu_cfs_rq(busy_cfs_rq, this_cpu);
|
||||
|
||||
imbalance = busy_cfs_rq->load.weight - this_cfs_rq->load.weight;
|
||||
/* Don't pull if this_cfs_rq has more load than busy_cfs_rq */
|
||||
if (imbalance <= 0)
|
||||
continue;
|
||||
|
||||
/* Don't pull more than imbalance/2 */
|
||||
imbalance /= 2;
|
||||
maxload = min(rem_load_move, imbalance);
|
||||
|
||||
*this_best_prio = cfs_rq_best_prio(this_cfs_rq);
|
||||
#else
|
||||
# define maxload rem_load_move
|
||||
#endif
|
||||
/*
|
||||
* empty group
|
||||
* pass busy_cfs_rq argument into
|
||||
* load_balance_[start|next]_fair iterators
|
||||
*/
|
||||
if (!aggregate(tg, sd)->task_weight)
|
||||
continue;
|
||||
cfs_rq_iterator.arg = busy_cfs_rq;
|
||||
rem_load_move -= balance_tasks(this_rq, this_cpu, busiest,
|
||||
maxload, sd, idle, all_pinned,
|
||||
this_best_prio,
|
||||
&cfs_rq_iterator);
|
||||
|
||||
rem_load = rem_load_move * aggregate(tg, sd)->rq_weight;
|
||||
rem_load /= aggregate(tg, sd)->load + 1;
|
||||
|
||||
this_weight = tg->cfs_rq[this_cpu]->task_weight;
|
||||
busiest_weight = tg->cfs_rq[busiest_cpu]->task_weight;
|
||||
|
||||
imbalance = (busiest_weight - this_weight) / 2;
|
||||
|
||||
if (imbalance < 0)
|
||||
imbalance = busiest_weight;
|
||||
|
||||
max_load = max(rem_load, imbalance);
|
||||
moved_load = __load_balance_fair(this_rq, this_cpu, busiest,
|
||||
max_load, sd, idle, all_pinned, this_best_prio,
|
||||
tg->cfs_rq[busiest_cpu]);
|
||||
|
||||
if (!moved_load)
|
||||
continue;
|
||||
|
||||
move_group_shares(tg, sd, busiest_cpu, this_cpu);
|
||||
|
||||
moved_load *= aggregate(tg, sd)->load;
|
||||
moved_load /= aggregate(tg, sd)->rq_weight + 1;
|
||||
|
||||
rem_load_move -= moved_load;
|
||||
if (rem_load_move < 0)
|
||||
if (rem_load_move <= 0)
|
||||
break;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return max_load_move - rem_load_move;
|
||||
}
|
||||
#else
|
||||
static unsigned long
|
||||
load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
|
||||
unsigned long max_load_move,
|
||||
struct sched_domain *sd, enum cpu_idle_type idle,
|
||||
int *all_pinned, int *this_best_prio)
|
||||
{
|
||||
return __load_balance_fair(this_rq, this_cpu, busiest,
|
||||
max_load_move, sd, idle, all_pinned,
|
||||
this_best_prio, &busiest->cfs);
|
||||
}
|
||||
#endif
|
||||
|
||||
static int
|
||||
move_one_task_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
|
||||
|
||||
@@ -250,7 +250,8 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
|
||||
if (rt_rq->rt_time || rt_rq->rt_nr_running)
|
||||
idle = 0;
|
||||
spin_unlock(&rt_rq->rt_runtime_lock);
|
||||
}
|
||||
} else if (rt_rq->rt_nr_running)
|
||||
idle = 0;
|
||||
|
||||
if (enqueue)
|
||||
sched_rt_rq_enqueue(rt_rq);
|
||||
@@ -449,13 +450,19 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
|
||||
#endif
|
||||
}
|
||||
|
||||
static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
|
||||
static void __enqueue_rt_entity(struct sched_rt_entity *rt_se)
|
||||
{
|
||||
struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
|
||||
struct rt_prio_array *array = &rt_rq->active;
|
||||
struct rt_rq *group_rq = group_rt_rq(rt_se);
|
||||
|
||||
if (group_rq && rt_rq_throttled(group_rq))
|
||||
/*
|
||||
* Don't enqueue the group if its throttled, or when empty.
|
||||
* The latter is a consequence of the former when a child group
|
||||
* get throttled and the current group doesn't have any other
|
||||
* active members.
|
||||
*/
|
||||
if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
|
||||
return;
|
||||
|
||||
list_add_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se));
|
||||
@@ -464,7 +471,7 @@ static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
|
||||
inc_rt_tasks(rt_se, rt_rq);
|
||||
}
|
||||
|
||||
static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
|
||||
static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
|
||||
{
|
||||
struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
|
||||
struct rt_prio_array *array = &rt_rq->active;
|
||||
@@ -480,11 +487,10 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
|
||||
* Because the prio of an upper entry depends on the lower
|
||||
* entries, we must remove entries top - down.
|
||||
*/
|
||||
static void dequeue_rt_stack(struct task_struct *p)
|
||||
static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
|
||||
{
|
||||
struct sched_rt_entity *rt_se, *back = NULL;
|
||||
struct sched_rt_entity *back = NULL;
|
||||
|
||||
rt_se = &p->rt;
|
||||
for_each_sched_rt_entity(rt_se) {
|
||||
rt_se->back = back;
|
||||
back = rt_se;
|
||||
@@ -492,7 +498,26 @@ static void dequeue_rt_stack(struct task_struct *p)
|
||||
|
||||
for (rt_se = back; rt_se; rt_se = rt_se->back) {
|
||||
if (on_rt_rq(rt_se))
|
||||
dequeue_rt_entity(rt_se);
|
||||
__dequeue_rt_entity(rt_se);
|
||||
}
|
||||
}
|
||||
|
||||
static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
|
||||
{
|
||||
dequeue_rt_stack(rt_se);
|
||||
for_each_sched_rt_entity(rt_se)
|
||||
__enqueue_rt_entity(rt_se);
|
||||
}
|
||||
|
||||
static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
|
||||
{
|
||||
dequeue_rt_stack(rt_se);
|
||||
|
||||
for_each_sched_rt_entity(rt_se) {
|
||||
struct rt_rq *rt_rq = group_rt_rq(rt_se);
|
||||
|
||||
if (rt_rq && rt_rq->rt_nr_running)
|
||||
__enqueue_rt_entity(rt_se);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -506,36 +531,15 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
|
||||
if (wakeup)
|
||||
rt_se->timeout = 0;
|
||||
|
||||
dequeue_rt_stack(p);
|
||||
|
||||
/*
|
||||
* enqueue everybody, bottom - up.
|
||||
*/
|
||||
for_each_sched_rt_entity(rt_se)
|
||||
enqueue_rt_entity(rt_se);
|
||||
|
||||
inc_cpu_load(rq, p->se.load.weight);
|
||||
enqueue_rt_entity(rt_se);
|
||||
}
|
||||
|
||||
static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
|
||||
{
|
||||
struct sched_rt_entity *rt_se = &p->rt;
|
||||
struct rt_rq *rt_rq;
|
||||
|
||||
update_curr_rt(rq);
|
||||
|
||||
dequeue_rt_stack(p);
|
||||
|
||||
/*
|
||||
* re-enqueue all non-empty rt_rq entities.
|
||||
*/
|
||||
for_each_sched_rt_entity(rt_se) {
|
||||
rt_rq = group_rt_rq(rt_se);
|
||||
if (rt_rq && rt_rq->rt_nr_running)
|
||||
enqueue_rt_entity(rt_se);
|
||||
}
|
||||
|
||||
dec_cpu_load(rq, p->se.load.weight);
|
||||
dequeue_rt_entity(rt_se);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -546,8 +550,10 @@ static
|
||||
void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
|
||||
{
|
||||
struct rt_prio_array *array = &rt_rq->active;
|
||||
struct list_head *queue = array->queue + rt_se_prio(rt_se);
|
||||
|
||||
list_move_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se));
|
||||
if (on_rt_rq(rt_se))
|
||||
list_move_tail(&rt_se->run_list, queue);
|
||||
}
|
||||
|
||||
static void requeue_task_rt(struct rq *rq, struct task_struct *p)
|
||||
|
||||
@@ -67,6 +67,7 @@ static int show_schedstat(struct seq_file *seq, void *v)
|
||||
preempt_enable();
|
||||
#endif
|
||||
}
|
||||
kfree(mask_str);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -197,6 +198,9 @@ static inline void sched_info_queued(struct task_struct *t)
|
||||
/*
|
||||
* Called when a process ceases being the active-running process, either
|
||||
* voluntarily or involuntarily. Now we can calculate how long we ran.
|
||||
* Also, if the process is still in the TASK_RUNNING state, call
|
||||
* sched_info_queued() to mark that it has now again started waiting on
|
||||
* the runqueue.
|
||||
*/
|
||||
static inline void sched_info_depart(struct task_struct *t)
|
||||
{
|
||||
@@ -205,6 +209,9 @@ static inline void sched_info_depart(struct task_struct *t)
|
||||
|
||||
t->sched_info.cpu_time += delta;
|
||||
rq_sched_info_depart(task_rq(t), delta);
|
||||
|
||||
if (t->state == TASK_RUNNING)
|
||||
sched_info_queued(t);
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -231,6 +231,40 @@ void flush_signals(struct task_struct *t)
|
||||
spin_unlock_irqrestore(&t->sighand->siglock, flags);
|
||||
}
|
||||
|
||||
static void __flush_itimer_signals(struct sigpending *pending)
|
||||
{
|
||||
sigset_t signal, retain;
|
||||
struct sigqueue *q, *n;
|
||||
|
||||
signal = pending->signal;
|
||||
sigemptyset(&retain);
|
||||
|
||||
list_for_each_entry_safe(q, n, &pending->list, list) {
|
||||
int sig = q->info.si_signo;
|
||||
|
||||
if (likely(q->info.si_code != SI_TIMER)) {
|
||||
sigaddset(&retain, sig);
|
||||
} else {
|
||||
sigdelset(&signal, sig);
|
||||
list_del_init(&q->list);
|
||||
__sigqueue_free(q);
|
||||
}
|
||||
}
|
||||
|
||||
sigorsets(&pending->signal, &signal, &retain);
|
||||
}
|
||||
|
||||
void flush_itimer_signals(void)
|
||||
{
|
||||
struct task_struct *tsk = current;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&tsk->sighand->siglock, flags);
|
||||
__flush_itimer_signals(&tsk->pending);
|
||||
__flush_itimer_signals(&tsk->signal->shared_pending);
|
||||
spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
|
||||
}
|
||||
|
||||
void ignore_signals(struct task_struct *t)
|
||||
{
|
||||
int i;
|
||||
@@ -1240,17 +1274,22 @@ void sigqueue_free(struct sigqueue *q)
|
||||
|
||||
BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
|
||||
/*
|
||||
* If the signal is still pending remove it from the
|
||||
* pending queue. We must hold ->siglock while testing
|
||||
* q->list to serialize with collect_signal().
|
||||
* We must hold ->siglock while testing q->list
|
||||
* to serialize with collect_signal() or with
|
||||
* __exit_signal()->flush_sigqueue().
|
||||
*/
|
||||
spin_lock_irqsave(lock, flags);
|
||||
q->flags &= ~SIGQUEUE_PREALLOC;
|
||||
/*
|
||||
* If it is queued it will be freed when dequeued,
|
||||
* like the "regular" sigqueue.
|
||||
*/
|
||||
if (!list_empty(&q->list))
|
||||
list_del_init(&q->list);
|
||||
q = NULL;
|
||||
spin_unlock_irqrestore(lock, flags);
|
||||
|
||||
q->flags &= ~SIGQUEUE_PREALLOC;
|
||||
__sigqueue_free(q);
|
||||
if (q)
|
||||
__sigqueue_free(q);
|
||||
}
|
||||
|
||||
int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
|
||||
|
||||
@@ -49,12 +49,17 @@ static unsigned long get_timestamp(int this_cpu)
|
||||
return cpu_clock(this_cpu) >> 30LL; /* 2^30 ~= 10^9 */
|
||||
}
|
||||
|
||||
void touch_softlockup_watchdog(void)
|
||||
static void __touch_softlockup_watchdog(void)
|
||||
{
|
||||
int this_cpu = raw_smp_processor_id();
|
||||
|
||||
__raw_get_cpu_var(touch_timestamp) = get_timestamp(this_cpu);
|
||||
}
|
||||
|
||||
void touch_softlockup_watchdog(void)
|
||||
{
|
||||
__raw_get_cpu_var(touch_timestamp) = 0;
|
||||
}
|
||||
EXPORT_SYMBOL(touch_softlockup_watchdog);
|
||||
|
||||
void touch_all_softlockup_watchdogs(void)
|
||||
@@ -80,7 +85,7 @@ void softlockup_tick(void)
|
||||
unsigned long now;
|
||||
|
||||
if (touch_timestamp == 0) {
|
||||
touch_softlockup_watchdog();
|
||||
__touch_softlockup_watchdog();
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -95,7 +100,7 @@ void softlockup_tick(void)
|
||||
|
||||
/* do not print during early bootup: */
|
||||
if (unlikely(system_state != SYSTEM_RUNNING)) {
|
||||
touch_softlockup_watchdog();
|
||||
__touch_softlockup_watchdog();
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -115,6 +120,7 @@ void softlockup_tick(void)
|
||||
printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %lus! [%s:%d]\n",
|
||||
this_cpu, now - touch_timestamp,
|
||||
current->comm, task_pid_nr(current));
|
||||
print_modules();
|
||||
if (regs)
|
||||
show_regs(regs);
|
||||
else
|
||||
@@ -214,7 +220,7 @@ static int watchdog(void *__bind_cpu)
|
||||
sched_setscheduler(current, SCHED_FIFO, ¶m);
|
||||
|
||||
/* initialize timestamp */
|
||||
touch_softlockup_watchdog();
|
||||
__touch_softlockup_watchdog();
|
||||
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
/*
|
||||
@@ -223,7 +229,7 @@ static int watchdog(void *__bind_cpu)
|
||||
* debug-printout triggers in softlockup_tick().
|
||||
*/
|
||||
while (!kthread_should_stop()) {
|
||||
touch_softlockup_watchdog();
|
||||
__touch_softlockup_watchdog();
|
||||
schedule();
|
||||
|
||||
if (kthread_should_stop())
|
||||
|
||||
@@ -62,8 +62,7 @@ static int stopmachine(void *cpu)
|
||||
* help our sisters onto their CPUs. */
|
||||
if (!prepared && !irqs_disabled)
|
||||
yield();
|
||||
else
|
||||
cpu_relax();
|
||||
cpu_relax();
|
||||
}
|
||||
|
||||
/* Ack: we are exiting. */
|
||||
@@ -106,8 +105,10 @@ static int stop_machine(void)
|
||||
}
|
||||
|
||||
/* Wait for them all to come to life. */
|
||||
while (atomic_read(&stopmachine_thread_ack) != stopmachine_num_threads)
|
||||
while (atomic_read(&stopmachine_thread_ack) != stopmachine_num_threads) {
|
||||
yield();
|
||||
cpu_relax();
|
||||
}
|
||||
|
||||
/* If some failed, kill them all. */
|
||||
if (ret < 0) {
|
||||
|
||||
@@ -1652,7 +1652,7 @@ asmlinkage long sys_umask(int mask)
|
||||
asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
|
||||
unsigned long arg4, unsigned long arg5)
|
||||
{
|
||||
long uninitialized_var(error);
|
||||
long error = 0;
|
||||
|
||||
if (security_task_prctl(option, arg2, arg3, arg4, arg5, &error))
|
||||
return error;
|
||||
@@ -1701,9 +1701,7 @@ asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
|
||||
error = PR_TIMING_STATISTICAL;
|
||||
break;
|
||||
case PR_SET_TIMING:
|
||||
if (arg2 == PR_TIMING_STATISTICAL)
|
||||
error = 0;
|
||||
else
|
||||
if (arg2 != PR_TIMING_STATISTICAL)
|
||||
error = -EINVAL;
|
||||
break;
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
* Kai Petzke <wpp@marie.physik.tu-berlin.de>
|
||||
* Theodore Ts'o <tytso@mit.edu>
|
||||
*
|
||||
* Made to use alloc_percpu by Christoph Lameter <clameter@sgi.com>.
|
||||
* Made to use alloc_percpu by Christoph Lameter.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
|
||||
Reference in New Issue
Block a user