forked from Minki/linux
[PATCH] Define __raw_get_cpu_var and use it
There are several instances of per_cpu(foo, raw_smp_processor_id()), which is semantically equivalent to __get_cpu_var(foo) but without the warning that smp_processor_id() can give if CONFIG_DEBUG_PREEMPT is enabled. For those architectures with optimized per-cpu implementations, namely ia64, powerpc, s390, sparc64 and x86_64, per_cpu() turns into more and slower code than __get_cpu_var(), so it would be preferable to use __get_cpu_var on those platforms. This defines a __raw_get_cpu_var(x) macro which turns into per_cpu(x, raw_smp_processor_id()) on architectures that use the generic per-cpu implementation, and turns into __get_cpu_var(x) on the architectures that have an optimized per-cpu implementation. Signed-off-by: Paul Mackerras <paulus@samba.org> Acked-by: David S. Miller <davem@davemloft.net> Acked-by: Ingo Molnar <mingo@elte.hu> Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
6ceab8a936
commit
bfe5d83419
@ -14,6 +14,7 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
|
|||||||
/* var is in discarded region: offset to particular copy we want */
|
/* var is in discarded region: offset to particular copy we want */
|
||||||
#define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu]))
|
#define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu]))
|
||||||
#define __get_cpu_var(var) per_cpu(var, smp_processor_id())
|
#define __get_cpu_var(var) per_cpu(var, smp_processor_id())
|
||||||
|
#define __raw_get_cpu_var(var) per_cpu(var, raw_smp_processor_id())
|
||||||
|
|
||||||
/* A macro to avoid #include hell... */
|
/* A macro to avoid #include hell... */
|
||||||
#define percpu_modcopy(pcpudst, src, size) \
|
#define percpu_modcopy(pcpudst, src, size) \
|
||||||
@ -30,6 +31,7 @@ do { \
|
|||||||
|
|
||||||
#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var))
|
#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var))
|
||||||
#define __get_cpu_var(var) per_cpu__##var
|
#define __get_cpu_var(var) per_cpu__##var
|
||||||
|
#define __raw_get_cpu_var(var) per_cpu__##var
|
||||||
|
|
||||||
#endif /* SMP */
|
#endif /* SMP */
|
||||||
|
|
||||||
|
@ -42,6 +42,7 @@ DECLARE_PER_CPU(unsigned long, local_per_cpu_offset);
|
|||||||
|
|
||||||
#define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu]))
|
#define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu]))
|
||||||
#define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __ia64_per_cpu_var(local_per_cpu_offset)))
|
#define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __ia64_per_cpu_var(local_per_cpu_offset)))
|
||||||
|
#define __raw_get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __ia64_per_cpu_var(local_per_cpu_offset)))
|
||||||
|
|
||||||
extern void percpu_modcopy(void *pcpudst, const void *src, unsigned long size);
|
extern void percpu_modcopy(void *pcpudst, const void *src, unsigned long size);
|
||||||
extern void setup_per_cpu_areas (void);
|
extern void setup_per_cpu_areas (void);
|
||||||
@ -51,6 +52,7 @@ extern void *per_cpu_init(void);
|
|||||||
|
|
||||||
#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var))
|
#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var))
|
||||||
#define __get_cpu_var(var) per_cpu__##var
|
#define __get_cpu_var(var) per_cpu__##var
|
||||||
|
#define __raw_get_cpu_var(var) per_cpu__##var
|
||||||
#define per_cpu_init() (__phys_per_cpu_start)
|
#define per_cpu_init() (__phys_per_cpu_start)
|
||||||
|
|
||||||
#endif /* SMP */
|
#endif /* SMP */
|
||||||
|
@ -22,6 +22,7 @@
|
|||||||
/* var is in discarded region: offset to particular copy we want */
|
/* var is in discarded region: offset to particular copy we want */
|
||||||
#define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset(cpu)))
|
#define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset(cpu)))
|
||||||
#define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __my_cpu_offset()))
|
#define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __my_cpu_offset()))
|
||||||
|
#define __raw_get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __my_cpu_offset()))
|
||||||
|
|
||||||
/* A macro to avoid #include hell... */
|
/* A macro to avoid #include hell... */
|
||||||
#define percpu_modcopy(pcpudst, src, size) \
|
#define percpu_modcopy(pcpudst, src, size) \
|
||||||
@ -41,6 +42,7 @@ extern void setup_per_cpu_areas(void);
|
|||||||
|
|
||||||
#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var))
|
#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var))
|
||||||
#define __get_cpu_var(var) per_cpu__##var
|
#define __get_cpu_var(var) per_cpu__##var
|
||||||
|
#define __raw_get_cpu_var(var) per_cpu__##var
|
||||||
|
|
||||||
#endif /* SMP */
|
#endif /* SMP */
|
||||||
|
|
||||||
|
@ -40,6 +40,7 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
|
|||||||
__typeof__(type) per_cpu__##name
|
__typeof__(type) per_cpu__##name
|
||||||
|
|
||||||
#define __get_cpu_var(var) __reloc_hide(var,S390_lowcore.percpu_offset)
|
#define __get_cpu_var(var) __reloc_hide(var,S390_lowcore.percpu_offset)
|
||||||
|
#define __raw_get_cpu_var(var) __reloc_hide(var,S390_lowcore.percpu_offset)
|
||||||
#define per_cpu(var,cpu) __reloc_hide(var,__per_cpu_offset[cpu])
|
#define per_cpu(var,cpu) __reloc_hide(var,__per_cpu_offset[cpu])
|
||||||
|
|
||||||
/* A macro to avoid #include hell... */
|
/* A macro to avoid #include hell... */
|
||||||
@ -57,6 +58,7 @@ do { \
|
|||||||
__typeof__(type) per_cpu__##name
|
__typeof__(type) per_cpu__##name
|
||||||
|
|
||||||
#define __get_cpu_var(var) __reloc_hide(var,0)
|
#define __get_cpu_var(var) __reloc_hide(var,0)
|
||||||
|
#define __raw_get_cpu_var(var) __reloc_hide(var,0)
|
||||||
#define per_cpu(var,cpu) __reloc_hide(var,0)
|
#define per_cpu(var,cpu) __reloc_hide(var,0)
|
||||||
|
|
||||||
#endif /* SMP */
|
#endif /* SMP */
|
||||||
|
@ -21,6 +21,7 @@ register unsigned long __local_per_cpu_offset asm("g5");
|
|||||||
/* var is in discarded region: offset to particular copy we want */
|
/* var is in discarded region: offset to particular copy we want */
|
||||||
#define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset(cpu)))
|
#define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset(cpu)))
|
||||||
#define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __local_per_cpu_offset))
|
#define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __local_per_cpu_offset))
|
||||||
|
#define __raw_get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __local_per_cpu_offset))
|
||||||
|
|
||||||
/* A macro to avoid #include hell... */
|
/* A macro to avoid #include hell... */
|
||||||
#define percpu_modcopy(pcpudst, src, size) \
|
#define percpu_modcopy(pcpudst, src, size) \
|
||||||
@ -37,6 +38,7 @@ do { \
|
|||||||
|
|
||||||
#define per_cpu(var, cpu) (*((void)cpu, &per_cpu__##var))
|
#define per_cpu(var, cpu) (*((void)cpu, &per_cpu__##var))
|
||||||
#define __get_cpu_var(var) per_cpu__##var
|
#define __get_cpu_var(var) per_cpu__##var
|
||||||
|
#define __raw_get_cpu_var(var) per_cpu__##var
|
||||||
|
|
||||||
#endif /* SMP */
|
#endif /* SMP */
|
||||||
|
|
||||||
|
@ -21,6 +21,7 @@
|
|||||||
/* var is in discarded region: offset to particular copy we want */
|
/* var is in discarded region: offset to particular copy we want */
|
||||||
#define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset(cpu)))
|
#define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset(cpu)))
|
||||||
#define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __my_cpu_offset()))
|
#define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __my_cpu_offset()))
|
||||||
|
#define __raw_get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __my_cpu_offset()))
|
||||||
|
|
||||||
/* A macro to avoid #include hell... */
|
/* A macro to avoid #include hell... */
|
||||||
#define percpu_modcopy(pcpudst, src, size) \
|
#define percpu_modcopy(pcpudst, src, size) \
|
||||||
@ -40,6 +41,7 @@ extern void setup_per_cpu_areas(void);
|
|||||||
|
|
||||||
#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var))
|
#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var))
|
||||||
#define __get_cpu_var(var) per_cpu__##var
|
#define __get_cpu_var(var) per_cpu__##var
|
||||||
|
#define __raw_get_cpu_var(var) per_cpu__##var
|
||||||
|
|
||||||
#endif /* SMP */
|
#endif /* SMP */
|
||||||
|
|
||||||
|
@ -576,7 +576,7 @@ void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
|
|||||||
|
|
||||||
memset(timer, 0, sizeof(struct hrtimer));
|
memset(timer, 0, sizeof(struct hrtimer));
|
||||||
|
|
||||||
bases = per_cpu(hrtimer_bases, raw_smp_processor_id());
|
bases = __raw_get_cpu_var(hrtimer_bases);
|
||||||
|
|
||||||
if (clock_id == CLOCK_REALTIME && mode != HRTIMER_ABS)
|
if (clock_id == CLOCK_REALTIME && mode != HRTIMER_ABS)
|
||||||
clock_id = CLOCK_MONOTONIC;
|
clock_id = CLOCK_MONOTONIC;
|
||||||
@ -599,7 +599,7 @@ int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp)
|
|||||||
{
|
{
|
||||||
struct hrtimer_base *bases;
|
struct hrtimer_base *bases;
|
||||||
|
|
||||||
bases = per_cpu(hrtimer_bases, raw_smp_processor_id());
|
bases = __raw_get_cpu_var(hrtimer_bases);
|
||||||
*tp = ktime_to_timespec(bases[which_clock].resolution);
|
*tp = ktime_to_timespec(bases[which_clock].resolution);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -4152,7 +4152,7 @@ EXPORT_SYMBOL(yield);
|
|||||||
*/
|
*/
|
||||||
void __sched io_schedule(void)
|
void __sched io_schedule(void)
|
||||||
{
|
{
|
||||||
struct runqueue *rq = &per_cpu(runqueues, raw_smp_processor_id());
|
struct runqueue *rq = &__raw_get_cpu_var(runqueues);
|
||||||
|
|
||||||
atomic_inc(&rq->nr_iowait);
|
atomic_inc(&rq->nr_iowait);
|
||||||
schedule();
|
schedule();
|
||||||
@ -4163,7 +4163,7 @@ EXPORT_SYMBOL(io_schedule);
|
|||||||
|
|
||||||
long __sched io_schedule_timeout(long timeout)
|
long __sched io_schedule_timeout(long timeout)
|
||||||
{
|
{
|
||||||
struct runqueue *rq = &per_cpu(runqueues, raw_smp_processor_id());
|
struct runqueue *rq = &__raw_get_cpu_var(runqueues);
|
||||||
long ret;
|
long ret;
|
||||||
|
|
||||||
atomic_inc(&rq->nr_iowait);
|
atomic_inc(&rq->nr_iowait);
|
||||||
|
@ -36,7 +36,7 @@ static struct notifier_block panic_block = {
|
|||||||
|
|
||||||
void touch_softlockup_watchdog(void)
|
void touch_softlockup_watchdog(void)
|
||||||
{
|
{
|
||||||
per_cpu(touch_timestamp, raw_smp_processor_id()) = jiffies;
|
__raw_get_cpu_var(touch_timestamp) = jiffies;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(touch_softlockup_watchdog);
|
EXPORT_SYMBOL(touch_softlockup_watchdog);
|
||||||
|
|
||||||
|
@ -146,7 +146,7 @@ static void internal_add_timer(tvec_base_t *base, struct timer_list *timer)
|
|||||||
void fastcall init_timer(struct timer_list *timer)
|
void fastcall init_timer(struct timer_list *timer)
|
||||||
{
|
{
|
||||||
timer->entry.next = NULL;
|
timer->entry.next = NULL;
|
||||||
timer->base = per_cpu(tvec_bases, raw_smp_processor_id());
|
timer->base = __raw_get_cpu_var(tvec_bases);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(init_timer);
|
EXPORT_SYMBOL(init_timer);
|
||||||
|
|
||||||
|
@ -244,7 +244,7 @@ static unsigned int rt_hash_rnd;
|
|||||||
|
|
||||||
static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
|
static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
|
||||||
#define RT_CACHE_STAT_INC(field) \
|
#define RT_CACHE_STAT_INC(field) \
|
||||||
(per_cpu(rt_cache_stat, raw_smp_processor_id()).field++)
|
(__raw_get_cpu_var(rt_cache_stat).field++)
|
||||||
|
|
||||||
static int rt_intern_hash(unsigned hash, struct rtable *rth,
|
static int rt_intern_hash(unsigned hash, struct rtable *rth,
|
||||||
struct rtable **res);
|
struct rtable **res);
|
||||||
|
Loading…
Reference in New Issue
Block a user