2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Precise Delay Loops for x86-64
|
|
|
|
*
|
|
|
|
* Copyright (C) 1993 Linus Torvalds
|
|
|
|
* Copyright (C) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
|
|
|
|
*
|
|
|
|
* The __delay function must _NOT_ be inlined as its execution time
|
|
|
|
* depends wildly on alignment on many x86 processors.
|
|
|
|
*/
|
|
|
|
|
2006-06-26 11:59:44 +00:00
|
|
|
#include <linux/module.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/sched.h>
|
2008-02-06 09:36:42 +00:00
|
|
|
#include <linux/timex.h>
|
2007-11-15 01:00:41 +00:00
|
|
|
#include <linux/preempt.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/delay.h>
|
2008-02-06 09:36:42 +00:00
|
|
|
#include <linux/init.h>
|
2007-11-15 01:00:41 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <asm/delay.h>
|
2005-06-23 07:08:13 +00:00
|
|
|
#include <asm/msr.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
#include <asm/smp.h>
|
|
|
|
#endif
|
|
|
|
|
2008-02-06 09:36:42 +00:00
|
|
|
int __devinit read_current_timer(unsigned long *timer_value)
|
2005-06-23 07:08:13 +00:00
|
|
|
{
|
|
|
|
rdtscll(*timer_value);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
void __delay(unsigned long loops)
|
|
|
|
{
|
|
|
|
unsigned bclock, now;
|
2008-05-25 15:13:32 +00:00
|
|
|
int cpu;
|
2007-11-15 01:00:41 +00:00
|
|
|
|
2008-05-25 15:13:32 +00:00
|
|
|
preempt_disable();
|
|
|
|
cpu = smp_processor_id();
|
2005-04-16 22:20:36 +00:00
|
|
|
rdtscl(bclock);
|
2008-05-25 15:13:32 +00:00
|
|
|
for (;;) {
|
2005-04-16 22:20:36 +00:00
|
|
|
rdtscl(now);
|
2008-05-25 15:13:32 +00:00
|
|
|
if ((now - bclock) >= loops)
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* Allow RT tasks to run */
|
|
|
|
preempt_enable();
|
|
|
|
rep_nop();
|
|
|
|
preempt_disable();
|
|
|
|
|
|
|
|
/*
|
|
|
|
* It is possible that we moved to another CPU, and
|
|
|
|
* since TSC's are per-cpu we need to calculate
|
|
|
|
* that. The delay must guarantee that we wait "at
|
|
|
|
* least" the amount of time. Being moved to another
|
|
|
|
* CPU could make the wait longer but we just need to
|
|
|
|
* make sure we waited long enough. Rebalance the
|
|
|
|
* counter for this CPU.
|
|
|
|
*/
|
|
|
|
if (unlikely(cpu != smp_processor_id())) {
|
|
|
|
loops -= (now - bclock);
|
|
|
|
cpu = smp_processor_id();
|
|
|
|
rdtscl(bclock);
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2007-11-15 01:00:41 +00:00
|
|
|
preempt_enable();
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2006-06-26 11:59:44 +00:00
|
|
|
EXPORT_SYMBOL(__delay);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
inline void __const_udelay(unsigned long xloops)
|
|
|
|
{
|
2007-10-19 18:35:04 +00:00
|
|
|
__delay(((xloops * HZ *
|
|
|
|
cpu_data(raw_smp_processor_id()).loops_per_jiffy) >> 32) + 1);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2006-06-26 11:59:44 +00:00
|
|
|
EXPORT_SYMBOL(__const_udelay);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
void __udelay(unsigned long usecs)
|
|
|
|
{
|
2006-12-07 01:14:07 +00:00
|
|
|
__const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2006-06-26 11:59:44 +00:00
|
|
|
EXPORT_SYMBOL(__udelay);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
void __ndelay(unsigned long nsecs)
|
|
|
|
{
|
|
|
|
__const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */
|
|
|
|
}
|
2006-06-26 11:59:44 +00:00
|
|
|
EXPORT_SYMBOL(__ndelay);
|