Convert alpha to use clocksources instead of arch_gettimeoffset

Alpha has a tsc like rpcc counter that it uses to manage time.
This can be converted to an actual clocksource instead of utilizing
the arch_gettimeoffset method that is really only there for legacy
systems with no continuous counter.

Further cleanups could be made if alpha converted to the clockevent
model.

CC: Thomas Gleixner <tglx@linutronix.de>
CC: Richard Henderson <rth@twiddle.net>
Acked-by: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
Tested-by: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
Signed-off-by: Matt Turner <mattst88@gmail.com>
Signed-off-by: John Stultz <johnstul@us.ibm.com>
This commit is contained in:
John Stultz 2010-03-19 12:23:57 -04:00 committed by Matt Turner
parent ec96e2fe95
commit 9ce34c8f44
2 changed files with 31 additions and 42 deletions

View File

@ -51,10 +51,6 @@ config GENERIC_TIME
bool
default y
config ARCH_USES_GETTIMEOFFSET
bool
default y
config GENERIC_CMOS_UPDATE
def_bool y

View File

@ -51,6 +51,7 @@
#include <linux/mc146818rtc.h>
#include <linux/time.h>
#include <linux/timex.h>
#include <linux/clocksource.h>
#include "proto.h"
#include "irq_impl.h"
@ -332,6 +333,34 @@ rpcc_after_update_in_progress(void)
return rpcc();
}
#ifndef CONFIG_SMP
/* Until and unless we figure out how to get cpu cycle counters
in sync and keep them there, we can't use the rpcc. */
static cycle_t read_rpcc(struct clocksource *cs)
{
cycle_t ret = (cycle_t)rpcc();
return ret;
}
static struct clocksource clocksource_rpcc = {
.name = "rpcc",
.rating = 300,
.read = read_rpcc,
.mask = CLOCKSOURCE_MASK(32),
.flags = CLOCK_SOURCE_IS_CONTINUOUS
};
static inline void register_rpcc_clocksource(long cycle_freq)
{
clocksource_calc_mult_shift(&clocksource_rpcc, cycle_freq, 4);
clocksource_register(&clocksource_rpcc);
}
#else /* !CONFIG_SMP */
static inline void register_rpcc_clocksource(long cycle_freq)
{
}
#endif /* !CONFIG_SMP */
void __init
time_init(void)
{
@ -385,6 +414,8 @@ time_init(void)
__you_loose();
}
register_rpcc_clocksource(cycle_freq);
state.last_time = cc1;
state.scaled_ticks_per_cycle
= ((unsigned long) HZ << FIX_SHIFT) / cycle_freq;
@ -394,44 +425,6 @@ time_init(void)
alpha_mv.init_rtc();
}
/*
* Use the cycle counter to estimate an displacement from the last time
* tick. Unfortunately the Alpha designers made only the low 32-bits of
* the cycle counter active, so we overflow on 8.2 seconds on a 500MHz
* part. So we can't do the "find absolute time in terms of cycles" thing
* that the other ports do.
*/
u32 arch_gettimeoffset(void)
{
#ifdef CONFIG_SMP
/* Until and unless we figure out how to get cpu cycle counters
in sync and keep them there, we can't use the rpcc tricks. */
return 0;
#else
unsigned long delta_cycles, delta_usec, partial_tick;
delta_cycles = rpcc() - state.last_time;
partial_tick = state.partial_tick;
/*
* usec = cycles * ticks_per_cycle * 2**48 * 1e6 / (2**48 * ticks)
* = cycles * (s_t_p_c) * 1e6 / (2**48 * ticks)
* = cycles * (s_t_p_c) * 15625 / (2**42 * ticks)
*
* which, given a 600MHz cycle and a 1024Hz tick, has a
* dynamic range of about 1.7e17, which is less than the
* 1.8e19 in an unsigned long, so we are safe from overflow.
*
* Round, but with .5 up always, since .5 to even is harder
* with no clear gain.
*/
delta_usec = (delta_cycles * state.scaled_ticks_per_cycle
+ partial_tick) * 15625;
delta_usec = ((delta_usec / ((1UL << (FIX_SHIFT-6-1)) * HZ)) + 1) / 2;
return delta_usec * 1000;
#endif
}
/*
* In order to set the CMOS clock precisely, set_rtc_mmss has to be
* called 500 ms after the second nowtime has started, because when