mirror of
https://github.com/torvalds/linux.git
synced 2024-12-25 04:11:49 +00:00
06b8e878a9
This moves the ability to scale cputime into generic code. This allows us to fix the issue in kernel/timer.c (noticed by Balbir) where we could only add an unscaled value to the scaled utime/stime. This adds a cputime_to_scaled function. As before, the POWERPC version does the scaling based on the last SPURR/PURR ratio calculated. The generic and s390 (only other arch to implement asm/cputime.h) versions are both NOPs. Also moves the SPURR and PURR snapshots closer. Signed-off-by: Michael Neuling <mikey@neuling.org> Cc: Jay Lan <jlan@engr.sgi.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
236 lines
5.4 KiB
C
236 lines
5.4 KiB
C
/*
|
|
* Definitions for measuring cputime on powerpc machines.
|
|
*
|
|
* Copyright (C) 2006 Paul Mackerras, IBM Corp.
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public License
|
|
* as published by the Free Software Foundation; either version
|
|
* 2 of the License, or (at your option) any later version.
|
|
*
|
|
* If we have CONFIG_VIRT_CPU_ACCOUNTING, we measure cpu time in
|
|
* the same units as the timebase. Otherwise we measure cpu time
|
|
* in jiffies using the generic definitions.
|
|
*/
|
|
|
|
#ifndef __POWERPC_CPUTIME_H
|
|
#define __POWERPC_CPUTIME_H
|
|
|
|
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
|
|
#include <asm-generic/cputime.h>
|
|
#else
|
|
|
|
#include <linux/types.h>
|
|
#include <linux/time.h>
|
|
#include <asm/div64.h>
|
|
#include <asm/time.h>
|
|
#include <asm/param.h>
|
|
|
|
typedef u64 cputime_t;
|
|
typedef u64 cputime64_t;
|
|
|
|
#define cputime_zero ((cputime_t)0)
|
|
#define cputime_max ((~((cputime_t)0) >> 1) - 1)
|
|
#define cputime_add(__a, __b) ((__a) + (__b))
|
|
#define cputime_sub(__a, __b) ((__a) - (__b))
|
|
#define cputime_div(__a, __n) ((__a) / (__n))
|
|
#define cputime_halve(__a) ((__a) >> 1)
|
|
#define cputime_eq(__a, __b) ((__a) == (__b))
|
|
#define cputime_gt(__a, __b) ((__a) > (__b))
|
|
#define cputime_ge(__a, __b) ((__a) >= (__b))
|
|
#define cputime_lt(__a, __b) ((__a) < (__b))
|
|
#define cputime_le(__a, __b) ((__a) <= (__b))
|
|
|
|
#define cputime64_zero ((cputime64_t)0)
|
|
#define cputime64_add(__a, __b) ((__a) + (__b))
|
|
#define cputime64_sub(__a, __b) ((__a) - (__b))
|
|
#define cputime_to_cputime64(__ct) (__ct)
|
|
|
|
#ifdef __KERNEL__
|
|
|
|
/*
|
|
* Convert cputime <-> jiffies
|
|
*/
|
|
extern u64 __cputime_jiffies_factor;
|
|
DECLARE_PER_CPU(unsigned long, cputime_last_delta);
|
|
DECLARE_PER_CPU(unsigned long, cputime_scaled_last_delta);
|
|
|
|
static inline unsigned long cputime_to_jiffies(const cputime_t ct)
|
|
{
|
|
return mulhdu(ct, __cputime_jiffies_factor);
|
|
}
|
|
|
|
/* Estimate the scaled cputime by scaling the real cputime based on
|
|
* the last scaled to real ratio */
|
|
static inline cputime_t cputime_to_scaled(const cputime_t ct)
|
|
{
|
|
if (cpu_has_feature(CPU_FTR_SPURR) &&
|
|
per_cpu(cputime_last_delta, smp_processor_id()))
|
|
return ct *
|
|
per_cpu(cputime_scaled_last_delta, smp_processor_id())/
|
|
per_cpu(cputime_last_delta, smp_processor_id());
|
|
return ct;
|
|
}
|
|
|
|
static inline cputime_t jiffies_to_cputime(const unsigned long jif)
|
|
{
|
|
cputime_t ct;
|
|
unsigned long sec;
|
|
|
|
/* have to be a little careful about overflow */
|
|
ct = jif % HZ;
|
|
sec = jif / HZ;
|
|
if (ct) {
|
|
ct *= tb_ticks_per_sec;
|
|
do_div(ct, HZ);
|
|
}
|
|
if (sec)
|
|
ct += (cputime_t) sec * tb_ticks_per_sec;
|
|
return ct;
|
|
}
|
|
|
|
static inline cputime64_t jiffies64_to_cputime64(const u64 jif)
|
|
{
|
|
cputime_t ct;
|
|
u64 sec;
|
|
|
|
/* have to be a little careful about overflow */
|
|
ct = jif % HZ;
|
|
sec = jif / HZ;
|
|
if (ct) {
|
|
ct *= tb_ticks_per_sec;
|
|
do_div(ct, HZ);
|
|
}
|
|
if (sec)
|
|
ct += (cputime_t) sec * tb_ticks_per_sec;
|
|
return ct;
|
|
}
|
|
|
|
static inline u64 cputime64_to_jiffies64(const cputime_t ct)
|
|
{
|
|
return mulhdu(ct, __cputime_jiffies_factor);
|
|
}
|
|
|
|
/*
|
|
* Convert cputime <-> milliseconds
|
|
*/
|
|
extern u64 __cputime_msec_factor;
|
|
|
|
static inline unsigned long cputime_to_msecs(const cputime_t ct)
|
|
{
|
|
return mulhdu(ct, __cputime_msec_factor);
|
|
}
|
|
|
|
static inline cputime_t msecs_to_cputime(const unsigned long ms)
|
|
{
|
|
cputime_t ct;
|
|
unsigned long sec;
|
|
|
|
/* have to be a little careful about overflow */
|
|
ct = ms % 1000;
|
|
sec = ms / 1000;
|
|
if (ct) {
|
|
ct *= tb_ticks_per_sec;
|
|
do_div(ct, 1000);
|
|
}
|
|
if (sec)
|
|
ct += (cputime_t) sec * tb_ticks_per_sec;
|
|
return ct;
|
|
}
|
|
|
|
/*
|
|
* Convert cputime <-> seconds
|
|
*/
|
|
extern u64 __cputime_sec_factor;
|
|
|
|
static inline unsigned long cputime_to_secs(const cputime_t ct)
|
|
{
|
|
return mulhdu(ct, __cputime_sec_factor);
|
|
}
|
|
|
|
static inline cputime_t secs_to_cputime(const unsigned long sec)
|
|
{
|
|
return (cputime_t) sec * tb_ticks_per_sec;
|
|
}
|
|
|
|
/*
|
|
* Convert cputime <-> timespec
|
|
*/
|
|
static inline void cputime_to_timespec(const cputime_t ct, struct timespec *p)
|
|
{
|
|
u64 x = ct;
|
|
unsigned int frac;
|
|
|
|
frac = do_div(x, tb_ticks_per_sec);
|
|
p->tv_sec = x;
|
|
x = (u64) frac * 1000000000;
|
|
do_div(x, tb_ticks_per_sec);
|
|
p->tv_nsec = x;
|
|
}
|
|
|
|
static inline cputime_t timespec_to_cputime(const struct timespec *p)
|
|
{
|
|
cputime_t ct;
|
|
|
|
ct = (u64) p->tv_nsec * tb_ticks_per_sec;
|
|
do_div(ct, 1000000000);
|
|
return ct + (u64) p->tv_sec * tb_ticks_per_sec;
|
|
}
|
|
|
|
/*
|
|
* Convert cputime <-> timeval
|
|
*/
|
|
static inline void cputime_to_timeval(const cputime_t ct, struct timeval *p)
|
|
{
|
|
u64 x = ct;
|
|
unsigned int frac;
|
|
|
|
frac = do_div(x, tb_ticks_per_sec);
|
|
p->tv_sec = x;
|
|
x = (u64) frac * 1000000;
|
|
do_div(x, tb_ticks_per_sec);
|
|
p->tv_usec = x;
|
|
}
|
|
|
|
static inline cputime_t timeval_to_cputime(const struct timeval *p)
|
|
{
|
|
cputime_t ct;
|
|
|
|
ct = (u64) p->tv_usec * tb_ticks_per_sec;
|
|
do_div(ct, 1000000);
|
|
return ct + (u64) p->tv_sec * tb_ticks_per_sec;
|
|
}
|
|
|
|
/*
|
|
* Convert cputime <-> clock_t (units of 1/USER_HZ seconds)
|
|
*/
|
|
extern u64 __cputime_clockt_factor;
|
|
|
|
static inline unsigned long cputime_to_clock_t(const cputime_t ct)
|
|
{
|
|
return mulhdu(ct, __cputime_clockt_factor);
|
|
}
|
|
|
|
static inline cputime_t clock_t_to_cputime(const unsigned long clk)
|
|
{
|
|
cputime_t ct;
|
|
unsigned long sec;
|
|
|
|
/* have to be a little careful about overflow */
|
|
ct = clk % USER_HZ;
|
|
sec = clk / USER_HZ;
|
|
if (ct) {
|
|
ct *= tb_ticks_per_sec;
|
|
do_div(ct, USER_HZ);
|
|
}
|
|
if (sec)
|
|
ct += (cputime_t) sec * tb_ticks_per_sec;
|
|
return ct;
|
|
}
|
|
|
|
#define cputime64_to_clock_t(ct) cputime_to_clock_t((cputime_t)(ct))
|
|
|
|
#endif /* __KERNEL__ */
|
|
#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
|
|
#endif /* __POWERPC_CPUTIME_H */
|