linux/arch/arm/kernel/sched_clock.c
Linus Torvalds 1bf25e78af ARM: arm-soc: late cleanups
These are cleanups and smaller changes that either depend on earlier
 feature branches or came in late during the development cycle.
 We normally try to get all cleanups early, so these are the exceptions:
 
 - A follow-up on the clocksource reworks, hopefully the last time
   we need to merge clocksource subsystem changes through arm-soc.
   A first set of patches was part of the original 3.10 arm-soc cleanup
   series because of interdependencies with timer drivers now moved out
   of arch/arm.
 
 - Migrating the SPEAr13xx platform away from using auxdata for DMA
   channel descriptions towards using information in device tree,
   based on the earlier SPEAr multiplatform series
 
 - A few follow-ups on the Atmel SAMA5 support and other changes
   for Atmel at91 based on the larger at91 reworks.
 
 - Moving the armada irqchip implementation to drivers/irqchip
 
 - Several OMAP cleanups following up on the larger series already
   merged in 3.10.
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1.4.12 (GNU/Linux)
 
 iQIVAwUAUYj5U2CrR//JCVInAQLNIRAAvsCtYOmXTxkRBxdtNEUUbkEjx71Se7q0
 h9PR8vqlkbYwONkJ8a6j8pKq/WJDmLpHQWg/moBsvlGc6uEVBPBFhCWHs1+yGUzX
 GhnJOaIKh3+651hIoXccS+/YZ16e1EAzdCM7+1QegPTldsRGkTOiwXgmR51kmPrz
 6cZ8P5MFqMrWIy4XqWhOBbMDCY/An05IHMpniGIamUg2/uB921Z0wNFvDrnsg97u
 DsVEwimyCJ0j7aO4TH+fkvsjoGWnIhxPtpaIm8iff6TPRI49deRb3zYpnIONm+oG
 /cQrRf3BNW+aiTuRCTEjdBNGtcrYgN6CLWWjzgMhv1itSlX8swBcOhuNJRCGNQRI
 v3wL4aEBxUpPGGL8erc2GIW7pe29YC2UEYI2z1X/5MEzYO589zkkG2k+/3HQVUwp
 dnYpQxhjRMvh4mcodBJFRjzH1Z7agKUwtoKalAHRRH7r5gJDkpL3zLoMhYPTG5IZ
 OwU+aYf+dDxh2kKW0zs8a/qL97UTHjlTRUC9LPoumvJ7LlKeDfzEn7DHUm2gggiu
 dO9ye/NF/xEXoDXTl0Qp2wJ6/sbPSLyCYCIMdP/gJjWUiDDqqZ0VRaKL7vE/JWrd
 NJ7k5yunX8/kRgfqgRFLDdFnPj1JeYHlmexsq4l9TPbPstoIcbw8u1v9sr8aZF+Z
 agh9u4e7QU8=
 =HWfp
 -----END PGP SIGNATURE-----

Merge tag 'cleanup-for-linus-2' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc

Pull ARM SoC late cleanups from Arnd Bergmann:
 "These are cleanups and smaller changes that either depend on earlier
  feature branches or came in late during the development cycle.  We
  normally try to get all cleanups early, so these are the exceptions:

   - A follow-up on the clocksource reworks, hopefully the last time we
     need to merge clocksource subsystem changes through arm-soc.

     A first set of patches was part of the original 3.10 arm-soc
     cleanup series because of interdependencies with timer drivers now
     moved out of arch/arm.

   - Migrating the SPEAr13xx platform away from using auxdata for DMA
     channel descriptions towards using information in device tree,
     based on the earlier SPEAr multiplatform series

   - A few follow-ups on the Atmel SAMA5 support and other changes for
     Atmel at91 based on the larger at91 reworks.

   - Moving the armada irqchip implementation to drivers/irqchip

   - Several OMAP cleanups following up on the larger series already
     merged in 3.10."

* tag 'cleanup-for-linus-2' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc: (50 commits)
  ARM: OMAP4: change the device names in usb_bind_phy
  ARM: OMAP2+: Fix mismerge for timer.c between ff931c82 and da4a686a
  ARM: SPEAr: conditionalize SMP code
  ARM: arch_timer: Silence debug preempt warnings
  ARM: OMAP: remove unused variable
  serial: amba-pl011: fix !CONFIG_DMA_ENGINE case
  ata: arasan: remove the need for platform_data
  ARM: at91/sama5d34ek.dts: remove not needed compatibility string
  ARM: at91: dts: add MCI DMA support
  ARM: at91: dts: add i2c dma support
  ARM: at91: dts: set #dma-cells to the correct value
  ARM: at91: suspend both memory controllers on at91sam9263
  irqchip: armada-370-xp: slightly cleanup irq controller driver
  irqchip: armada-370-xp: move IRQ handler to avoid forward declaration
  irqchip: move IRQ driver for Armada 370/XP
  ARM: mvebu: move L2 cache initialization in init_early()
  devtree: add binding documentation for sp804
  ARM: integrator-cp: convert use CLKSRC_OF for timer init
  ARM: versatile: use OF init for sp804 timer
  ARM: versatile: add versatile dtbs to dtbs target
  ...
2013-05-07 11:22:14 -07:00

218 lines
5.0 KiB
C

/*
* sched_clock.c: support for extending counters to full 64-bit ns counter
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/clocksource.h>
#include <linux/init.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/moduleparam.h>
#include <linux/sched.h>
#include <linux/syscore_ops.h>
#include <linux/timer.h>
#include <asm/sched_clock.h>
struct clock_data {
u64 epoch_ns;
u32 epoch_cyc;
u32 epoch_cyc_copy;
unsigned long rate;
u32 mult;
u32 shift;
bool suspended;
bool needs_suspend;
};
static void sched_clock_poll(unsigned long wrap_ticks);
static DEFINE_TIMER(sched_clock_timer, sched_clock_poll, 0, 0);
static int irqtime = -1;
core_param(irqtime, irqtime, int, 0400);
static struct clock_data cd = {
.mult = NSEC_PER_SEC / HZ,
};
static u32 __read_mostly sched_clock_mask = 0xffffffff;
static u32 notrace jiffy_sched_clock_read(void)
{
return (u32)(jiffies - INITIAL_JIFFIES);
}
static u32 __read_mostly (*read_sched_clock)(void) = jiffy_sched_clock_read;
static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift)
{
return (cyc * mult) >> shift;
}
static unsigned long long notrace cyc_to_sched_clock(u32 cyc, u32 mask)
{
u64 epoch_ns;
u32 epoch_cyc;
if (cd.suspended)
return cd.epoch_ns;
/*
* Load the epoch_cyc and epoch_ns atomically. We do this by
* ensuring that we always write epoch_cyc, epoch_ns and
* epoch_cyc_copy in strict order, and read them in strict order.
* If epoch_cyc and epoch_cyc_copy are not equal, then we're in
* the middle of an update, and we should repeat the load.
*/
do {
epoch_cyc = cd.epoch_cyc;
smp_rmb();
epoch_ns = cd.epoch_ns;
smp_rmb();
} while (epoch_cyc != cd.epoch_cyc_copy);
return epoch_ns + cyc_to_ns((cyc - epoch_cyc) & mask, cd.mult, cd.shift);
}
/*
* Atomically update the sched_clock epoch.
*/
static void notrace update_sched_clock(void)
{
unsigned long flags;
u32 cyc;
u64 ns;
cyc = read_sched_clock();
ns = cd.epoch_ns +
cyc_to_ns((cyc - cd.epoch_cyc) & sched_clock_mask,
cd.mult, cd.shift);
/*
* Write epoch_cyc and epoch_ns in a way that the update is
* detectable in cyc_to_fixed_sched_clock().
*/
raw_local_irq_save(flags);
cd.epoch_cyc_copy = cyc;
smp_wmb();
cd.epoch_ns = ns;
smp_wmb();
cd.epoch_cyc = cyc;
raw_local_irq_restore(flags);
}
static void sched_clock_poll(unsigned long wrap_ticks)
{
mod_timer(&sched_clock_timer, round_jiffies(jiffies + wrap_ticks));
update_sched_clock();
}
void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
{
unsigned long r, w;
u64 res, wrap;
char r_unit;
if (cd.rate > rate)
return;
BUG_ON(bits > 32);
WARN_ON(!irqs_disabled());
read_sched_clock = read;
sched_clock_mask = (1 << bits) - 1;
cd.rate = rate;
/* calculate the mult/shift to convert counter ticks to ns. */
clocks_calc_mult_shift(&cd.mult, &cd.shift, rate, NSEC_PER_SEC, 0);
r = rate;
if (r >= 4000000) {
r /= 1000000;
r_unit = 'M';
} else if (r >= 1000) {
r /= 1000;
r_unit = 'k';
} else
r_unit = ' ';
/* calculate how many ns until we wrap */
wrap = cyc_to_ns((1ULL << bits) - 1, cd.mult, cd.shift);
do_div(wrap, NSEC_PER_MSEC);
w = wrap;
/* calculate the ns resolution of this counter */
res = cyc_to_ns(1ULL, cd.mult, cd.shift);
pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lums\n",
bits, r, r_unit, res, w);
/*
* Start the timer to keep sched_clock() properly updated and
* sets the initial epoch.
*/
sched_clock_timer.data = msecs_to_jiffies(w - (w / 10));
update_sched_clock();
/*
* Ensure that sched_clock() starts off at 0ns
*/
cd.epoch_ns = 0;
/* Enable IRQ time accounting if we have a fast enough sched_clock */
if (irqtime > 0 || (irqtime == -1 && rate >= 1000000))
enable_sched_clock_irqtime();
pr_debug("Registered %pF as sched_clock source\n", read);
}
static unsigned long long notrace sched_clock_32(void)
{
u32 cyc = read_sched_clock();
return cyc_to_sched_clock(cyc, sched_clock_mask);
}
unsigned long long __read_mostly (*sched_clock_func)(void) = sched_clock_32;
unsigned long long notrace sched_clock(void)
{
return sched_clock_func();
}
void __init sched_clock_postinit(void)
{
/*
* If no sched_clock function has been provided at that point,
* make it the final one one.
*/
if (read_sched_clock == jiffy_sched_clock_read)
setup_sched_clock(jiffy_sched_clock_read, 32, HZ);
sched_clock_poll(sched_clock_timer.data);
}
static int sched_clock_suspend(void)
{
sched_clock_poll(sched_clock_timer.data);
cd.suspended = true;
return 0;
}
static void sched_clock_resume(void)
{
cd.epoch_cyc = read_sched_clock();
cd.epoch_cyc_copy = cd.epoch_cyc;
cd.suspended = false;
}
static struct syscore_ops sched_clock_ops = {
.suspend = sched_clock_suspend,
.resume = sched_clock_resume,
};
static int __init sched_clock_syscore_init(void)
{
register_syscore_ops(&sched_clock_ops);
return 0;
}
device_initcall(sched_clock_syscore_init);