forked from Minki/linux
44259b1abf
vread_tsc is short and hot, and it's userspace code so the usual reasons to enable -pg and turn off sibling calls don't apply. (OK, turning off sibling calls has no effect. But it might someday...) As an added benefit, tsc.c is profilable now. Signed-off-by: Andy Lutomirski <luto@mit.edu> Cc: Andi Kleen <andi@firstfloor.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: "David S. Miller" <davem@davemloft.net> Cc: Eric Dumazet <eric.dumazet@gmail.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Borislav Petkov <bp@amd64.org> Link: http://lkml.kernel.org/r/%3C99c6d7f5efa3ccb65b4ac6eb443e1ab7bad47d7b.1306156808.git.luto%40mit.edu%3E Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
37 lines
1007 B
C
37 lines
1007 B
C
/* This code runs in userspace. */
|
|
|
|
#define DISABLE_BRANCH_PROFILING
|
|
#include <asm/vgtod.h>
|
|
|
|
notrace cycle_t __vsyscall_fn vread_tsc(void)
|
|
{
|
|
cycle_t ret;
|
|
u64 last;
|
|
|
|
/*
|
|
* Empirically, a fence (of type that depends on the CPU)
|
|
* before rdtsc is enough to ensure that rdtsc is ordered
|
|
* with respect to loads. The various CPU manuals are unclear
|
|
* as to whether rdtsc can be reordered with later loads,
|
|
* but no one has ever seen it happen.
|
|
*/
|
|
rdtsc_barrier();
|
|
ret = (cycle_t)vget_cycles();
|
|
|
|
last = VVAR(vsyscall_gtod_data).clock.cycle_last;
|
|
|
|
if (likely(ret >= last))
|
|
return ret;
|
|
|
|
/*
|
|
* GCC likes to generate cmov here, but this branch is extremely
|
|
* predictable (it's just a funciton of time and the likely is
|
|
* very likely) and there's a data dependence, so force GCC
|
|
* to generate a branch instead. I don't barrier() because
|
|
* we don't actually need a barrier, and if this function
|
|
* ever gets inlined it will generate worse code.
|
|
*/
|
|
asm volatile ("");
|
|
return last;
|
|
}
|