mirror of
https://github.com/torvalds/linux.git
synced 2024-11-08 05:01:48 +00:00
a7f4255f90
Commit f0fbf0abc0
("x86: integrate delay functions") converted
delay_tsc() into a random delay generator for 64 bit. The reason is
that it merged the mostly identical versions of delay_32.c and
delay_64.c. Though the subtle difference of the result was:
static void delay_tsc(unsigned long loops)
{
- unsigned bclock, now;
+ unsigned long bclock, now;
Now the function uses rdtscl() which returns the lower 32bit of the
TSC. On 32bit that's not problematic as unsigned long is 32bit. On 64
bit this fails when the lower 32bit are close to wrap around when
bclock is read, because the following check
if ((now - bclock) >= loops)
break;
evaluated to true on 64bit for e.g. bclock = 0xffffffff and now = 0
because the unsigned long (now - bclock) of these values results in
0xffffffff00000001 which is definitely larger than the loops
value. That explains Tvortkos observation:
"Because I am seeing udelay(500) (_occasionally_) being short, and
that by delaying for some duration between 0us (yep) and 491us."
Make those variables explicitely u32 again, so this works for both 32
and 64 bit.
Reported-by: Tvrtko Ursulin <tvrtko.ursulin@onelan.co.uk>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: stable@vger.kernel.org # >= 2.6.27
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
141 lines
2.8 KiB
C
141 lines
2.8 KiB
C
/*
|
|
* Precise Delay Loops for i386
|
|
*
|
|
* Copyright (C) 1993 Linus Torvalds
|
|
* Copyright (C) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
|
|
* Copyright (C) 2008 Jiri Hladky <hladky _dot_ jiri _at_ gmail _dot_ com>
|
|
*
|
|
* The __delay function must _NOT_ be inlined as its execution time
|
|
* depends wildly on alignment on many x86 processors. The additional
|
|
* jump magic is needed to get the timing stable on all the CPU's
|
|
* we have to worry about.
|
|
*/
|
|
|
|
#include <linux/module.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/timex.h>
|
|
#include <linux/preempt.h>
|
|
#include <linux/delay.h>
|
|
#include <linux/init.h>
|
|
|
|
#include <asm/processor.h>
|
|
#include <asm/delay.h>
|
|
#include <asm/timer.h>
|
|
|
|
#ifdef CONFIG_SMP
|
|
# include <asm/smp.h>
|
|
#endif
|
|
|
|
/* simple loop based delay: */
|
|
static void delay_loop(unsigned long loops)
|
|
{
|
|
asm volatile(
|
|
" test %0,%0 \n"
|
|
" jz 3f \n"
|
|
" jmp 1f \n"
|
|
|
|
".align 16 \n"
|
|
"1: jmp 2f \n"
|
|
|
|
".align 16 \n"
|
|
"2: dec %0 \n"
|
|
" jnz 2b \n"
|
|
"3: dec %0 \n"
|
|
|
|
: /* we don't need output */
|
|
:"a" (loops)
|
|
);
|
|
}
|
|
|
|
/* TSC based delay: */
|
|
static void delay_tsc(unsigned long __loops)
|
|
{
|
|
u32 bclock, now, loops = __loops;
|
|
int cpu;
|
|
|
|
preempt_disable();
|
|
cpu = smp_processor_id();
|
|
rdtsc_barrier();
|
|
rdtscl(bclock);
|
|
for (;;) {
|
|
rdtsc_barrier();
|
|
rdtscl(now);
|
|
if ((now - bclock) >= loops)
|
|
break;
|
|
|
|
/* Allow RT tasks to run */
|
|
preempt_enable();
|
|
rep_nop();
|
|
preempt_disable();
|
|
|
|
/*
|
|
* It is possible that we moved to another CPU, and
|
|
* since TSC's are per-cpu we need to calculate
|
|
* that. The delay must guarantee that we wait "at
|
|
* least" the amount of time. Being moved to another
|
|
* CPU could make the wait longer but we just need to
|
|
* make sure we waited long enough. Rebalance the
|
|
* counter for this CPU.
|
|
*/
|
|
if (unlikely(cpu != smp_processor_id())) {
|
|
loops -= (now - bclock);
|
|
cpu = smp_processor_id();
|
|
rdtsc_barrier();
|
|
rdtscl(bclock);
|
|
}
|
|
}
|
|
preempt_enable();
|
|
}
|
|
|
|
/*
|
|
* Since we calibrate only once at boot, this
|
|
* function should be set once at boot and not changed
|
|
*/
|
|
static void (*delay_fn)(unsigned long) = delay_loop;
|
|
|
|
void use_tsc_delay(void)
|
|
{
|
|
delay_fn = delay_tsc;
|
|
}
|
|
|
|
int __devinit read_current_timer(unsigned long *timer_val)
|
|
{
|
|
if (delay_fn == delay_tsc) {
|
|
rdtscll(*timer_val);
|
|
return 0;
|
|
}
|
|
return -1;
|
|
}
|
|
|
|
void __delay(unsigned long loops)
|
|
{
|
|
delay_fn(loops);
|
|
}
|
|
EXPORT_SYMBOL(__delay);
|
|
|
|
inline void __const_udelay(unsigned long xloops)
|
|
{
|
|
int d0;
|
|
|
|
xloops *= 4;
|
|
asm("mull %%edx"
|
|
:"=d" (xloops), "=&a" (d0)
|
|
:"1" (xloops), "0"
|
|
(this_cpu_read(cpu_info.loops_per_jiffy) * (HZ/4)));
|
|
|
|
__delay(++xloops);
|
|
}
|
|
EXPORT_SYMBOL(__const_udelay);
|
|
|
|
void __udelay(unsigned long usecs)
|
|
{
|
|
__const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */
|
|
}
|
|
EXPORT_SYMBOL(__udelay);
|
|
|
|
void __ndelay(unsigned long nsecs)
|
|
{
|
|
__const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */
|
|
}
|
|
EXPORT_SYMBOL(__ndelay);
|