sched: TIF_NEED_RESCHED -> need_reshed() cleanup
Impact: cleanup Use test_tsk_need_resched(), set_tsk_need_resched(), need_resched() instead of using TIF_NEED_RESCHED. Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <49B10BA4.9070209@cn.fujitsu.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
		
							parent
							
								
									7fc07d8410
								
							
						
					
					
						commit
						5ed0cec0ac
					
				| @ -1189,10 +1189,10 @@ static void resched_task(struct task_struct *p) | ||||
| 
 | ||||
| 	assert_spin_locked(&task_rq(p)->lock); | ||||
| 
 | ||||
| 	if (unlikely(test_tsk_thread_flag(p, TIF_NEED_RESCHED))) | ||||
| 	if (test_tsk_need_resched(p)) | ||||
| 		return; | ||||
| 
 | ||||
| 	set_tsk_thread_flag(p, TIF_NEED_RESCHED); | ||||
| 	set_tsk_need_resched(p); | ||||
| 
 | ||||
| 	cpu = task_cpu(p); | ||||
| 	if (cpu == smp_processor_id()) | ||||
| @ -1248,7 +1248,7 @@ void wake_up_idle_cpu(int cpu) | ||||
| 	 * lockless. The worst case is that the other CPU runs the | ||||
| 	 * idle task through an additional NOOP schedule() | ||||
| 	 */ | ||||
| 	set_tsk_thread_flag(rq->idle, TIF_NEED_RESCHED); | ||||
| 	set_tsk_need_resched(rq->idle); | ||||
| 
 | ||||
| 	/* NEED_RESCHED must be visible before we test polling */ | ||||
| 	smp_mb(); | ||||
| @ -4740,7 +4740,7 @@ asmlinkage void __sched preempt_schedule(void) | ||||
| 		 * between schedule and now. | ||||
| 		 */ | ||||
| 		barrier(); | ||||
| 	} while (unlikely(test_thread_flag(TIF_NEED_RESCHED))); | ||||
| 	} while (need_resched()); | ||||
| } | ||||
| EXPORT_SYMBOL(preempt_schedule); | ||||
| 
 | ||||
| @ -4769,7 +4769,7 @@ asmlinkage void __sched preempt_schedule_irq(void) | ||||
| 		 * between schedule and now. | ||||
| 		 */ | ||||
| 		barrier(); | ||||
| 	} while (unlikely(test_thread_flag(TIF_NEED_RESCHED))); | ||||
| 	} while (need_resched()); | ||||
| } | ||||
| 
 | ||||
| #endif /* CONFIG_PREEMPT */ | ||||
|  | ||||
| @ -39,7 +39,7 @@ static  __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag); | ||||
| int __lockfunc __reacquire_kernel_lock(void) | ||||
| { | ||||
| 	while (!_raw_spin_trylock(&kernel_flag)) { | ||||
| 		if (test_thread_flag(TIF_NEED_RESCHED)) | ||||
| 		if (need_resched()) | ||||
| 			return -EAGAIN; | ||||
| 		cpu_relax(); | ||||
| 	} | ||||
|  | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user