x86/msr: Allow rdmsr_safe_on_cpu() to schedule
High latencies can be observed caused by a daemon periodically reading various MSR on all cpus. On KASAN enabled kernels ~10ms latencies can be observed simply reading one MSR. Even without KASAN, sending an IPI to a CPU, which is in a deep sleep state or in a long hard IRQ disabled section, waiting for the answer can consume hundreds of microseconds. All usage sites are in preemptible context, convert rdmsr_safe_on_cpu() to use a completion instead of busy polling. Overall daemon cpu usage was reduced by 35 %, and latencies caused by msr_read() disappeared. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Ingo Molnar <mingo@kernel.org> Cc: Hugh Dickins <hughd@google.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Eric Dumazet <eric.dumazet@gmail.com> Link: https://lkml.kernel.org/r/20180323215818.127774-1-edumazet@google.com
This commit is contained in:
		
							parent
							
								
									13cc36d76b
								
							
						
					
					
						commit
						07cde313b2
					
				| @ -2,6 +2,7 @@ | ||||
| #include <linux/export.h> | ||||
| #include <linux/preempt.h> | ||||
| #include <linux/smp.h> | ||||
| #include <linux/completion.h> | ||||
| #include <asm/msr.h> | ||||
| 
 | ||||
| static void __rdmsr_on_cpu(void *info) | ||||
| @ -143,13 +144,19 @@ void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs) | ||||
| } | ||||
| EXPORT_SYMBOL(wrmsr_on_cpus); | ||||
| 
 | ||||
| struct msr_info_completion { | ||||
| 	struct msr_info		msr; | ||||
| 	struct completion	done; | ||||
| }; | ||||
| 
 | ||||
| /* These "safe" variants are slower and should be used when the target MSR
 | ||||
|    may not actually exist. */ | ||||
| static void __rdmsr_safe_on_cpu(void *info) | ||||
| { | ||||
| 	struct msr_info *rv = info; | ||||
| 	struct msr_info_completion *rv = info; | ||||
| 
 | ||||
| 	rv->err = rdmsr_safe(rv->msr_no, &rv->reg.l, &rv->reg.h); | ||||
| 	rv->msr.err = rdmsr_safe(rv->msr.msr_no, &rv->msr.reg.l, &rv->msr.reg.h); | ||||
| 	complete(&rv->done); | ||||
| } | ||||
| 
 | ||||
| static void __wrmsr_safe_on_cpu(void *info) | ||||
| @ -161,17 +168,26 @@ static void __wrmsr_safe_on_cpu(void *info) | ||||
| 
 | ||||
| int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) | ||||
| { | ||||
| 	struct msr_info_completion rv; | ||||
| 	call_single_data_t csd = { | ||||
| 		.func	= __rdmsr_safe_on_cpu, | ||||
| 		.info	= &rv, | ||||
| 	}; | ||||
| 	int err; | ||||
| 	struct msr_info rv; | ||||
| 
 | ||||
| 	memset(&rv, 0, sizeof(rv)); | ||||
| 	init_completion(&rv.done); | ||||
| 	rv.msr.msr_no = msr_no; | ||||
| 
 | ||||
| 	rv.msr_no = msr_no; | ||||
| 	err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1); | ||||
| 	*l = rv.reg.l; | ||||
| 	*h = rv.reg.h; | ||||
| 	err = smp_call_function_single_async(cpu, &csd); | ||||
| 	if (!err) { | ||||
| 		wait_for_completion(&rv.done); | ||||
| 		err = rv.msr.err; | ||||
| 	} | ||||
| 	*l = rv.msr.reg.l; | ||||
| 	*h = rv.msr.reg.h; | ||||
| 
 | ||||
| 	return err ? err : rv.err; | ||||
| 	return err; | ||||
| } | ||||
| EXPORT_SYMBOL(rdmsr_safe_on_cpu); | ||||
| 
 | ||||
|  | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user