Merge branch 'sched/urgent' into sched/core, to merge fixes before applying new changes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
@@ -3573,9 +3573,10 @@ static int _sched_setscheduler(struct task_struct *p, int policy,
|
||||
};
|
||||
|
||||
/*
|
||||
* Fixup the legacy SCHED_RESET_ON_FORK hack
|
||||
* Fixup the legacy SCHED_RESET_ON_FORK hack, except if
|
||||
* the policy=-1 was passed by sched_setparam().
|
||||
*/
|
||||
if (policy & SCHED_RESET_ON_FORK) {
|
||||
if ((policy != -1) && (policy & SCHED_RESET_ON_FORK)) {
|
||||
attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
|
||||
policy &= ~SCHED_RESET_ON_FORK;
|
||||
attr.sched_policy = policy;
|
||||
@@ -4162,7 +4163,6 @@ static void __cond_resched(void)
|
||||
|
||||
int __sched _cond_resched(void)
|
||||
{
|
||||
rcu_cond_resched();
|
||||
if (should_resched()) {
|
||||
__cond_resched();
|
||||
return 1;
|
||||
@@ -4181,18 +4181,15 @@ EXPORT_SYMBOL(_cond_resched);
|
||||
*/
|
||||
int __cond_resched_lock(spinlock_t *lock)
|
||||
{
|
||||
bool need_rcu_resched = rcu_should_resched();
|
||||
int resched = should_resched();
|
||||
int ret = 0;
|
||||
|
||||
lockdep_assert_held(lock);
|
||||
|
||||
if (spin_needbreak(lock) || resched || need_rcu_resched) {
|
||||
if (spin_needbreak(lock) || resched) {
|
||||
spin_unlock(lock);
|
||||
if (resched)
|
||||
__cond_resched();
|
||||
else if (unlikely(need_rcu_resched))
|
||||
rcu_resched();
|
||||
else
|
||||
cpu_relax();
|
||||
ret = 1;
|
||||
@@ -4206,7 +4203,6 @@ int __sched __cond_resched_softirq(void)
|
||||
{
|
||||
BUG_ON(!in_softirq());
|
||||
|
||||
rcu_cond_resched(); /* BH disabled OK, just recording QSes. */
|
||||
if (should_resched()) {
|
||||
local_bh_enable();
|
||||
__cond_resched();
|
||||
|
||||
Reference in New Issue
Block a user