Merge branch 'x86/idle' into sched/core
Merge these x86 specific bits - we are going to add generic bits as well. Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
		
						commit
						c9c8986847
					
				| @ -1,6 +1,8 @@ | ||||
| #ifndef _ASM_X86_MWAIT_H | ||||
| #define _ASM_X86_MWAIT_H | ||||
| 
 | ||||
| #include <linux/sched.h> | ||||
| 
 | ||||
| #define MWAIT_SUBSTATE_MASK		0xf | ||||
| #define MWAIT_CSTATE_MASK		0xf | ||||
| #define MWAIT_SUBSTATE_SIZE		4 | ||||
| @ -13,4 +15,45 @@ | ||||
| 
 | ||||
| #define MWAIT_ECX_INTERRUPT_BREAK	0x1 | ||||
| 
 | ||||
| static inline void __monitor(const void *eax, unsigned long ecx, | ||||
| 			     unsigned long edx) | ||||
| { | ||||
| 	/* "monitor %eax, %ecx, %edx;" */ | ||||
| 	asm volatile(".byte 0x0f, 0x01, 0xc8;" | ||||
| 		     :: "a" (eax), "c" (ecx), "d"(edx)); | ||||
| } | ||||
| 
 | ||||
| static inline void __mwait(unsigned long eax, unsigned long ecx) | ||||
| { | ||||
| 	/* "mwait %eax, %ecx;" */ | ||||
| 	asm volatile(".byte 0x0f, 0x01, 0xc9;" | ||||
| 		     :: "a" (eax), "c" (ecx)); | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * This uses new MONITOR/MWAIT instructions on P4 processors with PNI, | ||||
|  * which can obviate IPI to trigger checking of need_resched. | ||||
|  * We execute MONITOR against need_resched and enter optimized wait state | ||||
|  * through MWAIT. Whenever someone changes need_resched, we would be woken | ||||
|  * up from MWAIT (without an IPI). | ||||
|  * | ||||
|  * New with Core Duo processors, MWAIT can take some hints based on CPU | ||||
|  * capability. | ||||
|  */ | ||||
| static inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx) | ||||
| { | ||||
| 	if (!current_set_polling_and_test()) { | ||||
| 		if (static_cpu_has(X86_FEATURE_CLFLUSH_MONITOR)) { | ||||
| 			mb(); | ||||
| 			clflush((void *)¤t_thread_info()->flags); | ||||
| 			mb(); | ||||
| 		} | ||||
| 
 | ||||
| 		__monitor((void *)¤t_thread_info()->flags, 0, 0); | ||||
| 		if (!need_resched()) | ||||
| 			__mwait(eax, ecx); | ||||
| 	} | ||||
| 	__current_clr_polling(); | ||||
| } | ||||
| 
 | ||||
| #endif /* _ASM_X86_MWAIT_H */ | ||||
|  | ||||
| @ -700,29 +700,6 @@ static inline void sync_core(void) | ||||
| #endif | ||||
| } | ||||
| 
 | ||||
| static inline void __monitor(const void *eax, unsigned long ecx, | ||||
| 			     unsigned long edx) | ||||
| { | ||||
| 	/* "monitor %eax, %ecx, %edx;" */ | ||||
| 	asm volatile(".byte 0x0f, 0x01, 0xc8;" | ||||
| 		     :: "a" (eax), "c" (ecx), "d"(edx)); | ||||
| } | ||||
| 
 | ||||
| static inline void __mwait(unsigned long eax, unsigned long ecx) | ||||
| { | ||||
| 	/* "mwait %eax, %ecx;" */ | ||||
| 	asm volatile(".byte 0x0f, 0x01, 0xc9;" | ||||
| 		     :: "a" (eax), "c" (ecx)); | ||||
| } | ||||
| 
 | ||||
| static inline void __sti_mwait(unsigned long eax, unsigned long ecx) | ||||
| { | ||||
| 	trace_hardirqs_on(); | ||||
| 	/* "mwait %eax, %ecx;" */ | ||||
| 	asm volatile("sti; .byte 0x0f, 0x01, 0xc9;" | ||||
| 		     :: "a" (eax), "c" (ecx)); | ||||
| } | ||||
| 
 | ||||
| extern void select_idle_routine(const struct cpuinfo_x86 *c); | ||||
| extern void init_amd_e400_c1e_mask(void); | ||||
| 
 | ||||
|  | ||||
| @ -150,29 +150,6 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu, | ||||
| } | ||||
| EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe); | ||||
| 
 | ||||
| /*
 | ||||
|  * This uses new MONITOR/MWAIT instructions on P4 processors with PNI, | ||||
|  * which can obviate IPI to trigger checking of need_resched. | ||||
|  * We execute MONITOR against need_resched and enter optimized wait state | ||||
|  * through MWAIT. Whenever someone changes need_resched, we would be woken | ||||
|  * up from MWAIT (without an IPI). | ||||
|  * | ||||
|  * New with Core Duo processors, MWAIT can take some hints based on CPU | ||||
|  * capability. | ||||
|  */ | ||||
| void mwait_idle_with_hints(unsigned long ax, unsigned long cx) | ||||
| { | ||||
| 	if (!need_resched()) { | ||||
| 		if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR)) | ||||
| 			clflush((void *)¤t_thread_info()->flags); | ||||
| 
 | ||||
| 		__monitor((void *)¤t_thread_info()->flags, 0, 0); | ||||
| 		smp_mb(); | ||||
| 		if (!need_resched()) | ||||
| 			__mwait(ax, cx); | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx) | ||||
| { | ||||
| 	unsigned int cpu = smp_processor_id(); | ||||
|  | ||||
| @ -1417,7 +1417,9 @@ static inline void mwait_play_dead(void) | ||||
| 		 * The WBINVD is insufficient due to the spurious-wakeup | ||||
| 		 * case where we return around the loop. | ||||
| 		 */ | ||||
| 		mb(); | ||||
| 		clflush(mwait_ptr); | ||||
| 		mb(); | ||||
| 		__monitor(mwait_ptr, 0, 0); | ||||
| 		mb(); | ||||
| 		__mwait(eax, 0); | ||||
|  | ||||
| @ -193,10 +193,7 @@ static int power_saving_thread(void *data) | ||||
| 					CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu); | ||||
| 			stop_critical_timings(); | ||||
| 
 | ||||
| 			__monitor((void *)¤t_thread_info()->flags, 0, 0); | ||||
| 			smp_mb(); | ||||
| 			if (!need_resched()) | ||||
| 				__mwait(power_saving_mwait_eax, 1); | ||||
| 			mwait_idle_with_hints(power_saving_mwait_eax, 1); | ||||
| 
 | ||||
| 			start_critical_timings(); | ||||
| 			if (lapic_marked_unstable) | ||||
|  | ||||
| @ -727,11 +727,6 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev, | ||||
| 	if (unlikely(!pr)) | ||||
| 		return -EINVAL; | ||||
| 
 | ||||
| 	if (cx->entry_method == ACPI_CSTATE_FFH) { | ||||
| 		if (current_set_polling_and_test()) | ||||
| 			return -EINVAL; | ||||
| 	} | ||||
| 
 | ||||
| 	lapic_timer_state_broadcast(pr, cx, 1); | ||||
| 	acpi_idle_do_entry(cx); | ||||
| 
 | ||||
| @ -785,11 +780,6 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, | ||||
| 	if (unlikely(!pr)) | ||||
| 		return -EINVAL; | ||||
| 
 | ||||
| 	if (cx->entry_method == ACPI_CSTATE_FFH) { | ||||
| 		if (current_set_polling_and_test()) | ||||
| 			return -EINVAL; | ||||
| 	} | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Must be done before busmaster disable as we might need to | ||||
| 	 * access HPET ! | ||||
| @ -841,11 +831,6 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	if (cx->entry_method == ACPI_CSTATE_FFH) { | ||||
| 		if (current_set_polling_and_test()) | ||||
| 			return -EINVAL; | ||||
| 	} | ||||
| 
 | ||||
| 	acpi_unlazy_tlb(smp_processor_id()); | ||||
| 
 | ||||
| 	/* Tell the scheduler that we are going deep-idle: */ | ||||
|  | ||||
| @ -377,16 +377,7 @@ static int intel_idle(struct cpuidle_device *dev, | ||||
| 	if (!(lapic_timer_reliable_states & (1 << (cstate)))) | ||||
| 		clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu); | ||||
| 
 | ||||
| 	if (!current_set_polling_and_test()) { | ||||
| 
 | ||||
| 		if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR)) | ||||
| 			clflush((void *)¤t_thread_info()->flags); | ||||
| 
 | ||||
| 		__monitor((void *)¤t_thread_info()->flags, 0, 0); | ||||
| 		smp_mb(); | ||||
| 		if (!need_resched()) | ||||
| 			__mwait(eax, ecx); | ||||
| 	} | ||||
| 	mwait_idle_with_hints(eax, ecx); | ||||
| 
 | ||||
| 	if (!(lapic_timer_reliable_states & (1 << (cstate)))) | ||||
| 		clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu); | ||||
|  | ||||
| @ -438,9 +438,7 @@ static int clamp_thread(void *arg) | ||||
| 			 */ | ||||
| 			local_touch_nmi(); | ||||
| 			stop_critical_timings(); | ||||
| 			__monitor((void *)¤t_thread_info()->flags, 0, 0); | ||||
| 			cpu_relax(); /* allow HT sibling to run */ | ||||
| 			__mwait(eax, ecx); | ||||
| 			mwait_idle_with_hints(eax, ecx); | ||||
| 			start_critical_timings(); | ||||
| 			atomic_inc(&idle_wakeup_counter); | ||||
| 		} | ||||
|  | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user