2008-01-30 12:30:38 +00:00
|
|
|
/*
|
|
|
|
* Written by: Matthew Dobson, IBM Corporation
|
|
|
|
*
|
|
|
|
* Copyright (C) 2002, IBM Corp.
|
|
|
|
*
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License as published by
|
|
|
|
* the Free Software Foundation; either version 2 of the License, or
|
|
|
|
* (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful, but
|
|
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
|
|
* NON INFRINGEMENT. See the GNU General Public License for more
|
|
|
|
* details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program; if not, write to the Free Software
|
|
|
|
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
|
|
|
|
*
|
|
|
|
* Send feedback to <colpatch@us.ibm.com>
|
|
|
|
*/
|
2008-10-23 05:26:29 +00:00
|
|
|
#ifndef _ASM_X86_TOPOLOGY_H
|
|
|
|
#define _ASM_X86_TOPOLOGY_H
|
2008-01-30 12:30:38 +00:00
|
|
|
|
2009-12-15 01:58:23 +00:00
|
|
|
/*
|
|
|
|
* to preserve the visibility of NUMA_NO_NODE definition,
|
|
|
|
* moved to there from here. May be used independent of
|
|
|
|
* CONFIG_NUMA.
|
|
|
|
*/
|
|
|
|
#include <linux/numa.h>
|
x86: cleanup early per cpu variables/accesses v4
* Introduce a new PER_CPU macro called "EARLY_PER_CPU". This is
used by some per_cpu variables that are initialized and accessed
before there are per_cpu areas allocated.
["Early" in respect to per_cpu variables is "earlier than the per_cpu
areas have been setup".]
This patchset adds these new macros:
DEFINE_EARLY_PER_CPU(_type, _name, _initvalue)
EXPORT_EARLY_PER_CPU_SYMBOL(_name)
DECLARE_EARLY_PER_CPU(_type, _name)
early_per_cpu_ptr(_name)
early_per_cpu_map(_name, _idx)
early_per_cpu(_name, _cpu)
The DEFINE macro defines the per_cpu variable as well as the early
map and pointer. It also initializes the per_cpu variable and map
elements to "_initvalue". The early_* macros provide access to
the initial map (usually setup during system init) and the early
pointer. This pointer is initialized to point to the early map
but is then NULL'ed when the actual per_cpu areas are setup. After
that the per_cpu variable is the correct access to the variable.
The early_per_cpu() macro is not very efficient but does show how to
access the variable if you have a function that can be called both
"early" and "late". It tests the early ptr to be NULL, and if not
then it's still valid. Otherwise, the per_cpu variable is used
instead:
#define early_per_cpu(_name, _cpu) \
(early_per_cpu_ptr(_name) ? \
early_per_cpu_ptr(_name)[_cpu] : \
per_cpu(_name, _cpu))
A better method is to actually check the pointer manually. In the
case below, numa_set_node can be called both "early" and "late":
void __cpuinit numa_set_node(int cpu, int node)
{
int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
if (cpu_to_node_map)
cpu_to_node_map[cpu] = node;
else
per_cpu(x86_cpu_to_node_map, cpu) = node;
}
* Add a flag "arch_provides_topology_pointers" that indicates pointers
to topology cpumask_t maps are available. Otherwise, use the function
returning the cpumask_t value. This is useful if cpumask_t set size
is very large to avoid copying data on to/off of the stack.
* The coverage of CONFIG_DEBUG_PER_CPU_MAPS has been increased while
the non-debug case has been optimized a bit.
* Remove an unreferenced compiler warning in drivers/base/topology.c
* Clean up #ifdef in setup.c
For inclusion into sched-devel/latest tree.
Based on:
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git
+ sched-devel/latest .../mingo/linux-2.6-sched-devel.git
Signed-off-by: Mike Travis <travis@sgi.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2008-05-12 19:21:12 +00:00
|
|
|
|
2008-01-30 12:30:38 +00:00
|
|
|
#ifdef CONFIG_NUMA
|
|
|
|
#include <linux/cpumask.h>
|
2009-12-15 01:58:23 +00:00
|
|
|
|
2008-01-30 12:30:38 +00:00
|
|
|
#include <asm/mpspec.h>
|
2016-07-14 00:18:56 +00:00
|
|
|
#include <asm/percpu.h>
|
2008-01-30 12:30:38 +00:00
|
|
|
|
x86: cleanup early per cpu variables/accesses v4
* Introduce a new PER_CPU macro called "EARLY_PER_CPU". This is
used by some per_cpu variables that are initialized and accessed
before there are per_cpu areas allocated.
["Early" in respect to per_cpu variables is "earlier than the per_cpu
areas have been setup".]
This patchset adds these new macros:
DEFINE_EARLY_PER_CPU(_type, _name, _initvalue)
EXPORT_EARLY_PER_CPU_SYMBOL(_name)
DECLARE_EARLY_PER_CPU(_type, _name)
early_per_cpu_ptr(_name)
early_per_cpu_map(_name, _idx)
early_per_cpu(_name, _cpu)
The DEFINE macro defines the per_cpu variable as well as the early
map and pointer. It also initializes the per_cpu variable and map
elements to "_initvalue". The early_* macros provide access to
the initial map (usually setup during system init) and the early
pointer. This pointer is initialized to point to the early map
but is then NULL'ed when the actual per_cpu areas are setup. After
that the per_cpu variable is the correct access to the variable.
The early_per_cpu() macro is not very efficient but does show how to
access the variable if you have a function that can be called both
"early" and "late". It tests the early ptr to be NULL, and if not
then it's still valid. Otherwise, the per_cpu variable is used
instead:
#define early_per_cpu(_name, _cpu) \
(early_per_cpu_ptr(_name) ? \
early_per_cpu_ptr(_name)[_cpu] : \
per_cpu(_name, _cpu))
A better method is to actually check the pointer manually. In the
case below, numa_set_node can be called both "early" and "late":
void __cpuinit numa_set_node(int cpu, int node)
{
int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
if (cpu_to_node_map)
cpu_to_node_map[cpu] = node;
else
per_cpu(x86_cpu_to_node_map, cpu) = node;
}
* Add a flag "arch_provides_topology_pointers" that indicates pointers
to topology cpumask_t maps are available. Otherwise, use the function
returning the cpumask_t value. This is useful if cpumask_t set size
is very large to avoid copying data on to/off of the stack.
* The coverage of CONFIG_DEBUG_PER_CPU_MAPS has been increased while
the non-debug case has been optimized a bit.
* Remove an unreferenced compiler warning in drivers/base/topology.c
* Clean up #ifdef in setup.c
For inclusion into sched-devel/latest tree.
Based on:
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git
+ sched-devel/latest .../mingo/linux-2.6-sched-devel.git
Signed-off-by: Mike Travis <travis@sgi.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2008-05-12 19:21:12 +00:00
|
|
|
/* Mappings between logical cpu number and node number */
|
|
|
|
DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map);
|
|
|
|
|
|
|
|
#ifdef CONFIG_DEBUG_PER_CPU_MAPS
|
2010-05-26 21:44:58 +00:00
|
|
|
/*
|
|
|
|
* override generic percpu implementation of cpu_to_node
|
|
|
|
*/
|
|
|
|
extern int __cpu_to_node(int cpu);
|
|
|
|
#define cpu_to_node __cpu_to_node
|
|
|
|
|
x86: cleanup early per cpu variables/accesses v4
* Introduce a new PER_CPU macro called "EARLY_PER_CPU". This is
used by some per_cpu variables that are initialized and accessed
before there are per_cpu areas allocated.
["Early" in respect to per_cpu variables is "earlier than the per_cpu
areas have been setup".]
This patchset adds these new macros:
DEFINE_EARLY_PER_CPU(_type, _name, _initvalue)
EXPORT_EARLY_PER_CPU_SYMBOL(_name)
DECLARE_EARLY_PER_CPU(_type, _name)
early_per_cpu_ptr(_name)
early_per_cpu_map(_name, _idx)
early_per_cpu(_name, _cpu)
The DEFINE macro defines the per_cpu variable as well as the early
map and pointer. It also initializes the per_cpu variable and map
elements to "_initvalue". The early_* macros provide access to
the initial map (usually setup during system init) and the early
pointer. This pointer is initialized to point to the early map
but is then NULL'ed when the actual per_cpu areas are setup. After
that the per_cpu variable is the correct access to the variable.
The early_per_cpu() macro is not very efficient but does show how to
access the variable if you have a function that can be called both
"early" and "late". It tests the early ptr to be NULL, and if not
then it's still valid. Otherwise, the per_cpu variable is used
instead:
#define early_per_cpu(_name, _cpu) \
(early_per_cpu_ptr(_name) ? \
early_per_cpu_ptr(_name)[_cpu] : \
per_cpu(_name, _cpu))
A better method is to actually check the pointer manually. In the
case below, numa_set_node can be called both "early" and "late":
void __cpuinit numa_set_node(int cpu, int node)
{
int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
if (cpu_to_node_map)
cpu_to_node_map[cpu] = node;
else
per_cpu(x86_cpu_to_node_map, cpu) = node;
}
* Add a flag "arch_provides_topology_pointers" that indicates pointers
to topology cpumask_t maps are available. Otherwise, use the function
returning the cpumask_t value. This is useful if cpumask_t set size
is very large to avoid copying data on to/off of the stack.
* The coverage of CONFIG_DEBUG_PER_CPU_MAPS has been increased while
the non-debug case has been optimized a bit.
* Remove an unreferenced compiler warning in drivers/base/topology.c
* Clean up #ifdef in setup.c
For inclusion into sched-devel/latest tree.
Based on:
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git
+ sched-devel/latest .../mingo/linux-2.6-sched-devel.git
Signed-off-by: Mike Travis <travis@sgi.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2008-05-12 19:21:12 +00:00
|
|
|
extern int early_cpu_to_node(int cpu);
|
2008-01-30 12:30:38 +00:00
|
|
|
|
x86: cleanup early per cpu variables/accesses v4
* Introduce a new PER_CPU macro called "EARLY_PER_CPU". This is
used by some per_cpu variables that are initialized and accessed
before there are per_cpu areas allocated.
["Early" in respect to per_cpu variables is "earlier than the per_cpu
areas have been setup".]
This patchset adds these new macros:
DEFINE_EARLY_PER_CPU(_type, _name, _initvalue)
EXPORT_EARLY_PER_CPU_SYMBOL(_name)
DECLARE_EARLY_PER_CPU(_type, _name)
early_per_cpu_ptr(_name)
early_per_cpu_map(_name, _idx)
early_per_cpu(_name, _cpu)
The DEFINE macro defines the per_cpu variable as well as the early
map and pointer. It also initializes the per_cpu variable and map
elements to "_initvalue". The early_* macros provide access to
the initial map (usually setup during system init) and the early
pointer. This pointer is initialized to point to the early map
but is then NULL'ed when the actual per_cpu areas are setup. After
that the per_cpu variable is the correct access to the variable.
The early_per_cpu() macro is not very efficient but does show how to
access the variable if you have a function that can be called both
"early" and "late". It tests the early ptr to be NULL, and if not
then it's still valid. Otherwise, the per_cpu variable is used
instead:
#define early_per_cpu(_name, _cpu) \
(early_per_cpu_ptr(_name) ? \
early_per_cpu_ptr(_name)[_cpu] : \
per_cpu(_name, _cpu))
A better method is to actually check the pointer manually. In the
case below, numa_set_node can be called both "early" and "late":
void __cpuinit numa_set_node(int cpu, int node)
{
int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
if (cpu_to_node_map)
cpu_to_node_map[cpu] = node;
else
per_cpu(x86_cpu_to_node_map, cpu) = node;
}
* Add a flag "arch_provides_topology_pointers" that indicates pointers
to topology cpumask_t maps are available. Otherwise, use the function
returning the cpumask_t value. This is useful if cpumask_t set size
is very large to avoid copying data on to/off of the stack.
* The coverage of CONFIG_DEBUG_PER_CPU_MAPS has been increased while
the non-debug case has been optimized a bit.
* Remove an unreferenced compiler warning in drivers/base/topology.c
* Clean up #ifdef in setup.c
For inclusion into sched-devel/latest tree.
Based on:
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git
+ sched-devel/latest .../mingo/linux-2.6-sched-devel.git
Signed-off-by: Mike Travis <travis@sgi.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2008-05-12 19:21:12 +00:00
|
|
|
#else /* !CONFIG_DEBUG_PER_CPU_MAPS */
|
|
|
|
|
|
|
|
/* Same function but used if called before per_cpu areas are setup */
|
|
|
|
static inline int early_cpu_to_node(int cpu)
|
|
|
|
{
|
2009-01-13 11:41:34 +00:00
|
|
|
return early_per_cpu(x86_cpu_to_node_map, cpu);
|
x86: cleanup early per cpu variables/accesses v4
* Introduce a new PER_CPU macro called "EARLY_PER_CPU". This is
used by some per_cpu variables that are initialized and accessed
before there are per_cpu areas allocated.
["Early" in respect to per_cpu variables is "earlier than the per_cpu
areas have been setup".]
This patchset adds these new macros:
DEFINE_EARLY_PER_CPU(_type, _name, _initvalue)
EXPORT_EARLY_PER_CPU_SYMBOL(_name)
DECLARE_EARLY_PER_CPU(_type, _name)
early_per_cpu_ptr(_name)
early_per_cpu_map(_name, _idx)
early_per_cpu(_name, _cpu)
The DEFINE macro defines the per_cpu variable as well as the early
map and pointer. It also initializes the per_cpu variable and map
elements to "_initvalue". The early_* macros provide access to
the initial map (usually setup during system init) and the early
pointer. This pointer is initialized to point to the early map
but is then NULL'ed when the actual per_cpu areas are setup. After
that the per_cpu variable is the correct access to the variable.
The early_per_cpu() macro is not very efficient but does show how to
access the variable if you have a function that can be called both
"early" and "late". It tests the early ptr to be NULL, and if not
then it's still valid. Otherwise, the per_cpu variable is used
instead:
#define early_per_cpu(_name, _cpu) \
(early_per_cpu_ptr(_name) ? \
early_per_cpu_ptr(_name)[_cpu] : \
per_cpu(_name, _cpu))
A better method is to actually check the pointer manually. In the
case below, numa_set_node can be called both "early" and "late":
void __cpuinit numa_set_node(int cpu, int node)
{
int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
if (cpu_to_node_map)
cpu_to_node_map[cpu] = node;
else
per_cpu(x86_cpu_to_node_map, cpu) = node;
}
* Add a flag "arch_provides_topology_pointers" that indicates pointers
to topology cpumask_t maps are available. Otherwise, use the function
returning the cpumask_t value. This is useful if cpumask_t set size
is very large to avoid copying data on to/off of the stack.
* The coverage of CONFIG_DEBUG_PER_CPU_MAPS has been increased while
the non-debug case has been optimized a bit.
* Remove an unreferenced compiler warning in drivers/base/topology.c
* Clean up #ifdef in setup.c
For inclusion into sched-devel/latest tree.
Based on:
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git
+ sched-devel/latest .../mingo/linux-2.6-sched-devel.git
Signed-off-by: Mike Travis <travis@sgi.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2008-05-12 19:21:12 +00:00
|
|
|
}
|
2008-01-30 12:33:21 +00:00
|
|
|
|
2009-03-13 04:19:52 +00:00
|
|
|
#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
|
|
|
|
|
|
|
|
/* Mappings between node number and cpus on that node. */
|
2009-03-13 04:19:53 +00:00
|
|
|
extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
|
2009-03-13 04:19:52 +00:00
|
|
|
|
|
|
|
#ifdef CONFIG_DEBUG_PER_CPU_MAPS
|
2009-03-13 04:19:57 +00:00
|
|
|
extern const struct cpumask *cpumask_of_node(int node);
|
2009-03-13 04:19:52 +00:00
|
|
|
#else
|
x86: cleanup early per cpu variables/accesses v4
* Introduce a new PER_CPU macro called "EARLY_PER_CPU". This is
used by some per_cpu variables that are initialized and accessed
before there are per_cpu areas allocated.
["Early" in respect to per_cpu variables is "earlier than the per_cpu
areas have been setup".]
This patchset adds these new macros:
DEFINE_EARLY_PER_CPU(_type, _name, _initvalue)
EXPORT_EARLY_PER_CPU_SYMBOL(_name)
DECLARE_EARLY_PER_CPU(_type, _name)
early_per_cpu_ptr(_name)
early_per_cpu_map(_name, _idx)
early_per_cpu(_name, _cpu)
The DEFINE macro defines the per_cpu variable as well as the early
map and pointer. It also initializes the per_cpu variable and map
elements to "_initvalue". The early_* macros provide access to
the initial map (usually setup during system init) and the early
pointer. This pointer is initialized to point to the early map
but is then NULL'ed when the actual per_cpu areas are setup. After
that the per_cpu variable is the correct access to the variable.
The early_per_cpu() macro is not very efficient but does show how to
access the variable if you have a function that can be called both
"early" and "late". It tests the early ptr to be NULL, and if not
then it's still valid. Otherwise, the per_cpu variable is used
instead:
#define early_per_cpu(_name, _cpu) \
(early_per_cpu_ptr(_name) ? \
early_per_cpu_ptr(_name)[_cpu] : \
per_cpu(_name, _cpu))
A better method is to actually check the pointer manually. In the
case below, numa_set_node can be called both "early" and "late":
void __cpuinit numa_set_node(int cpu, int node)
{
int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
if (cpu_to_node_map)
cpu_to_node_map[cpu] = node;
else
per_cpu(x86_cpu_to_node_map, cpu) = node;
}
* Add a flag "arch_provides_topology_pointers" that indicates pointers
to topology cpumask_t maps are available. Otherwise, use the function
returning the cpumask_t value. This is useful if cpumask_t set size
is very large to avoid copying data on to/off of the stack.
* The coverage of CONFIG_DEBUG_PER_CPU_MAPS has been increased while
the non-debug case has been optimized a bit.
* Remove an unreferenced compiler warning in drivers/base/topology.c
* Clean up #ifdef in setup.c
For inclusion into sched-devel/latest tree.
Based on:
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git
+ sched-devel/latest .../mingo/linux-2.6-sched-devel.git
Signed-off-by: Mike Travis <travis@sgi.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2008-05-12 19:21:12 +00:00
|
|
|
/* Returns a pointer to the cpumask of CPUs on Node 'node'. */
|
2009-03-13 04:19:53 +00:00
|
|
|
static inline const struct cpumask *cpumask_of_node(int node)
|
2008-01-30 12:30:38 +00:00
|
|
|
{
|
|
|
|
return node_to_cpumask_map[node];
|
|
|
|
}
|
2009-03-13 04:19:52 +00:00
|
|
|
#endif
|
x86: cleanup early per cpu variables/accesses v4
* Introduce a new PER_CPU macro called "EARLY_PER_CPU". This is
used by some per_cpu variables that are initialized and accessed
before there are per_cpu areas allocated.
["Early" in respect to per_cpu variables is "earlier than the per_cpu
areas have been setup".]
This patchset adds these new macros:
DEFINE_EARLY_PER_CPU(_type, _name, _initvalue)
EXPORT_EARLY_PER_CPU_SYMBOL(_name)
DECLARE_EARLY_PER_CPU(_type, _name)
early_per_cpu_ptr(_name)
early_per_cpu_map(_name, _idx)
early_per_cpu(_name, _cpu)
The DEFINE macro defines the per_cpu variable as well as the early
map and pointer. It also initializes the per_cpu variable and map
elements to "_initvalue". The early_* macros provide access to
the initial map (usually setup during system init) and the early
pointer. This pointer is initialized to point to the early map
but is then NULL'ed when the actual per_cpu areas are setup. After
that the per_cpu variable is the correct access to the variable.
The early_per_cpu() macro is not very efficient but does show how to
access the variable if you have a function that can be called both
"early" and "late". It tests the early ptr to be NULL, and if not
then it's still valid. Otherwise, the per_cpu variable is used
instead:
#define early_per_cpu(_name, _cpu) \
(early_per_cpu_ptr(_name) ? \
early_per_cpu_ptr(_name)[_cpu] : \
per_cpu(_name, _cpu))
A better method is to actually check the pointer manually. In the
case below, numa_set_node can be called both "early" and "late":
void __cpuinit numa_set_node(int cpu, int node)
{
int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
if (cpu_to_node_map)
cpu_to_node_map[cpu] = node;
else
per_cpu(x86_cpu_to_node_map, cpu) = node;
}
* Add a flag "arch_provides_topology_pointers" that indicates pointers
to topology cpumask_t maps are available. Otherwise, use the function
returning the cpumask_t value. This is useful if cpumask_t set size
is very large to avoid copying data on to/off of the stack.
* The coverage of CONFIG_DEBUG_PER_CPU_MAPS has been increased while
the non-debug case has been optimized a bit.
* Remove an unreferenced compiler warning in drivers/base/topology.c
* Clean up #ifdef in setup.c
For inclusion into sched-devel/latest tree.
Based on:
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git
+ sched-devel/latest .../mingo/linux-2.6-sched-devel.git
Signed-off-by: Mike Travis <travis@sgi.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2008-05-12 19:21:12 +00:00
|
|
|
|
2009-01-27 03:56:47 +00:00
|
|
|
extern void setup_node_to_cpumask_map(void);
|
|
|
|
|
2008-01-30 12:30:38 +00:00
|
|
|
#define pcibus_to_node(bus) __pcibus_to_node(bus)
|
|
|
|
|
|
|
|
extern int __node_distance(int, int);
|
|
|
|
#define node_distance(a, b) __node_distance(a, b)
|
|
|
|
|
x86: cleanup early per cpu variables/accesses v4
* Introduce a new PER_CPU macro called "EARLY_PER_CPU". This is
used by some per_cpu variables that are initialized and accessed
before there are per_cpu areas allocated.
["Early" in respect to per_cpu variables is "earlier than the per_cpu
areas have been setup".]
This patchset adds these new macros:
DEFINE_EARLY_PER_CPU(_type, _name, _initvalue)
EXPORT_EARLY_PER_CPU_SYMBOL(_name)
DECLARE_EARLY_PER_CPU(_type, _name)
early_per_cpu_ptr(_name)
early_per_cpu_map(_name, _idx)
early_per_cpu(_name, _cpu)
The DEFINE macro defines the per_cpu variable as well as the early
map and pointer. It also initializes the per_cpu variable and map
elements to "_initvalue". The early_* macros provide access to
the initial map (usually setup during system init) and the early
pointer. This pointer is initialized to point to the early map
but is then NULL'ed when the actual per_cpu areas are setup. After
that the per_cpu variable is the correct access to the variable.
The early_per_cpu() macro is not very efficient but does show how to
access the variable if you have a function that can be called both
"early" and "late". It tests the early ptr to be NULL, and if not
then it's still valid. Otherwise, the per_cpu variable is used
instead:
#define early_per_cpu(_name, _cpu) \
(early_per_cpu_ptr(_name) ? \
early_per_cpu_ptr(_name)[_cpu] : \
per_cpu(_name, _cpu))
A better method is to actually check the pointer manually. In the
case below, numa_set_node can be called both "early" and "late":
void __cpuinit numa_set_node(int cpu, int node)
{
int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
if (cpu_to_node_map)
cpu_to_node_map[cpu] = node;
else
per_cpu(x86_cpu_to_node_map, cpu) = node;
}
* Add a flag "arch_provides_topology_pointers" that indicates pointers
to topology cpumask_t maps are available. Otherwise, use the function
returning the cpumask_t value. This is useful if cpumask_t set size
is very large to avoid copying data on to/off of the stack.
* The coverage of CONFIG_DEBUG_PER_CPU_MAPS has been increased while
the non-debug case has been optimized a bit.
* Remove an unreferenced compiler warning in drivers/base/topology.c
* Clean up #ifdef in setup.c
For inclusion into sched-devel/latest tree.
Based on:
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git
+ sched-devel/latest .../mingo/linux-2.6-sched-devel.git
Signed-off-by: Mike Travis <travis@sgi.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2008-05-12 19:21:12 +00:00
|
|
|
#else /* !CONFIG_NUMA */
|
|
|
|
|
2009-01-15 17:19:32 +00:00
|
|
|
static inline int numa_node_id(void)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
2010-05-26 21:44:56 +00:00
|
|
|
/*
|
|
|
|
* indicate override:
|
|
|
|
*/
|
|
|
|
#define numa_node_id numa_node_id
|
2009-01-15 17:19:32 +00:00
|
|
|
|
|
|
|
static inline int early_cpu_to_node(int cpu)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
x86: cleanup early per cpu variables/accesses v4
* Introduce a new PER_CPU macro called "EARLY_PER_CPU". This is
used by some per_cpu variables that are initialized and accessed
before there are per_cpu areas allocated.
["Early" in respect to per_cpu variables is "earlier than the per_cpu
areas have been setup".]
This patchset adds these new macros:
DEFINE_EARLY_PER_CPU(_type, _name, _initvalue)
EXPORT_EARLY_PER_CPU_SYMBOL(_name)
DECLARE_EARLY_PER_CPU(_type, _name)
early_per_cpu_ptr(_name)
early_per_cpu_map(_name, _idx)
early_per_cpu(_name, _cpu)
The DEFINE macro defines the per_cpu variable as well as the early
map and pointer. It also initializes the per_cpu variable and map
elements to "_initvalue". The early_* macros provide access to
the initial map (usually setup during system init) and the early
pointer. This pointer is initialized to point to the early map
but is then NULL'ed when the actual per_cpu areas are setup. After
that the per_cpu variable is the correct access to the variable.
The early_per_cpu() macro is not very efficient but does show how to
access the variable if you have a function that can be called both
"early" and "late". It tests the early ptr to be NULL, and if not
then it's still valid. Otherwise, the per_cpu variable is used
instead:
#define early_per_cpu(_name, _cpu) \
(early_per_cpu_ptr(_name) ? \
early_per_cpu_ptr(_name)[_cpu] : \
per_cpu(_name, _cpu))
A better method is to actually check the pointer manually. In the
case below, numa_set_node can be called both "early" and "late":
void __cpuinit numa_set_node(int cpu, int node)
{
int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
if (cpu_to_node_map)
cpu_to_node_map[cpu] = node;
else
per_cpu(x86_cpu_to_node_map, cpu) = node;
}
* Add a flag "arch_provides_topology_pointers" that indicates pointers
to topology cpumask_t maps are available. Otherwise, use the function
returning the cpumask_t value. This is useful if cpumask_t set size
is very large to avoid copying data on to/off of the stack.
* The coverage of CONFIG_DEBUG_PER_CPU_MAPS has been increased while
the non-debug case has been optimized a bit.
* Remove an unreferenced compiler warning in drivers/base/topology.c
* Clean up #ifdef in setup.c
For inclusion into sched-devel/latest tree.
Based on:
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git
+ sched-devel/latest .../mingo/linux-2.6-sched-devel.git
Signed-off-by: Mike Travis <travis@sgi.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2008-05-12 19:21:12 +00:00
|
|
|
|
2009-01-27 03:56:47 +00:00
|
|
|
static inline void setup_node_to_cpumask_map(void) { }
|
|
|
|
|
2008-01-30 12:30:38 +00:00
|
|
|
#endif
|
|
|
|
|
2008-03-31 15:41:55 +00:00
|
|
|
#include <asm-generic/topology.h>
|
|
|
|
|
2008-12-26 11:53:41 +00:00
|
|
|
extern const struct cpumask *cpu_coregroup_mask(int cpu);
|
2008-01-30 12:30:38 +00:00
|
|
|
|
2016-02-22 22:19:15 +00:00
|
|
|
#define topology_logical_package_id(cpu) (cpu_data(cpu).logical_proc_id)
|
2008-01-30 12:30:38 +00:00
|
|
|
#define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id)
|
2019-05-13 17:58:49 +00:00
|
|
|
#define topology_logical_die_id(cpu) (cpu_data(cpu).logical_die_id)
|
2019-05-13 17:58:48 +00:00
|
|
|
#define topology_die_id(cpu) (cpu_data(cpu).cpu_die_id)
|
2008-01-30 12:30:38 +00:00
|
|
|
#define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id)
|
2014-03-28 20:33:39 +00:00
|
|
|
|
2016-05-04 16:50:59 +00:00
|
|
|
#ifdef CONFIG_SMP
|
2019-05-13 17:58:56 +00:00
|
|
|
#define topology_die_cpumask(cpu) (per_cpu(cpu_die_map, cpu))
|
2009-03-13 04:19:50 +00:00
|
|
|
#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
|
2015-05-26 13:11:28 +00:00
|
|
|
#define topology_sibling_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
|
2016-02-22 22:19:15 +00:00
|
|
|
|
|
|
|
extern unsigned int __max_logical_packages;
|
|
|
|
#define topology_max_packages() (__max_logical_packages)
|
2016-05-20 00:09:55 +00:00
|
|
|
|
2019-05-13 17:58:46 +00:00
|
|
|
extern unsigned int __max_die_per_package;
|
|
|
|
|
|
|
|
static inline int topology_max_die_per_package(void)
|
|
|
|
{
|
|
|
|
return __max_die_per_package;
|
|
|
|
}
|
|
|
|
|
2016-05-20 00:09:55 +00:00
|
|
|
extern int __max_smt_threads;
|
|
|
|
|
|
|
|
static inline int topology_max_smt_threads(void)
|
|
|
|
{
|
|
|
|
return __max_smt_threads;
|
|
|
|
}
|
|
|
|
|
2016-02-22 22:19:15 +00:00
|
|
|
int topology_update_package_map(unsigned int apicid, unsigned int cpu);
|
2019-05-13 17:58:49 +00:00
|
|
|
int topology_update_die_map(unsigned int dieid, unsigned int cpu);
|
2018-05-29 15:50:22 +00:00
|
|
|
int topology_phys_to_logical_pkg(unsigned int pkg);
|
2019-05-13 17:58:49 +00:00
|
|
|
int topology_phys_to_logical_die(unsigned int die, unsigned int cpu);
|
2018-05-29 15:50:22 +00:00
|
|
|
bool topology_is_primary_thread(unsigned int cpu);
|
2018-06-21 08:37:20 +00:00
|
|
|
bool topology_smt_supported(void);
|
2016-02-22 22:19:15 +00:00
|
|
|
#else
|
|
|
|
#define topology_max_packages() (1)
|
|
|
|
static inline int
|
|
|
|
topology_update_package_map(unsigned int apicid, unsigned int cpu) { return 0; }
|
2019-05-13 17:58:49 +00:00
|
|
|
static inline int
|
|
|
|
topology_update_die_map(unsigned int dieid, unsigned int cpu) { return 0; }
|
2016-02-22 22:19:15 +00:00
|
|
|
static inline int topology_phys_to_logical_pkg(unsigned int pkg) { return 0; }
|
2019-05-13 17:58:46 +00:00
|
|
|
static inline int topology_phys_to_logical_die(unsigned int die,
|
|
|
|
unsigned int cpu) { return 0; }
|
|
|
|
static inline int topology_max_die_per_package(void) { return 1; }
|
2016-05-20 00:09:55 +00:00
|
|
|
static inline int topology_max_smt_threads(void) { return 1; }
|
2018-05-29 15:50:22 +00:00
|
|
|
static inline bool topology_is_primary_thread(unsigned int cpu) { return true; }
|
2018-06-21 08:37:20 +00:00
|
|
|
static inline bool topology_smt_supported(void) { return false; }
|
2008-01-30 12:30:38 +00:00
|
|
|
#endif
|
|
|
|
|
2008-04-29 22:05:29 +00:00
|
|
|
static inline void arch_fix_phys_package_id(int num, u32 slot)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2008-02-19 11:21:20 +00:00
|
|
|
struct pci_bus;
|
2014-01-24 18:54:36 +00:00
|
|
|
int x86_pci_root_bus_node(int bus);
|
2011-10-28 22:28:14 +00:00
|
|
|
void x86_pci_root_bus_resources(int bus, struct list_head *resources);
|
2008-02-19 11:21:20 +00:00
|
|
|
|
2016-11-22 20:23:54 +00:00
|
|
|
extern bool x86_topology_update;
|
2016-11-22 20:23:55 +00:00
|
|
|
|
2016-11-29 18:43:27 +00:00
|
|
|
#ifdef CONFIG_SCHED_MC_PRIO
|
2016-11-22 20:23:55 +00:00
|
|
|
#include <asm/percpu.h>
|
|
|
|
|
|
|
|
DECLARE_PER_CPU_READ_MOSTLY(int, sched_core_priority);
|
2016-11-22 20:23:56 +00:00
|
|
|
extern unsigned int __read_mostly sysctl_sched_itmt_enabled;
|
2016-11-22 20:23:55 +00:00
|
|
|
|
|
|
|
/* Interface to set priority of a cpu */
|
|
|
|
void sched_set_itmt_core_prio(int prio, int core_cpu);
|
|
|
|
|
|
|
|
/* Interface to notify scheduler that system supports ITMT */
|
2016-11-22 20:23:56 +00:00
|
|
|
int sched_set_itmt_support(void);
|
2016-11-22 20:23:55 +00:00
|
|
|
|
|
|
|
/* Interface to notify scheduler that system revokes ITMT support */
|
|
|
|
void sched_clear_itmt_support(void);
|
|
|
|
|
2016-11-29 18:43:27 +00:00
|
|
|
#else /* CONFIG_SCHED_MC_PRIO */
|
2016-11-22 20:23:55 +00:00
|
|
|
|
2016-11-22 20:23:56 +00:00
|
|
|
#define sysctl_sched_itmt_enabled 0
|
2016-11-22 20:23:55 +00:00
|
|
|
static inline void sched_set_itmt_core_prio(int prio, int core_cpu)
|
|
|
|
{
|
|
|
|
}
|
2016-11-22 20:23:56 +00:00
|
|
|
static inline int sched_set_itmt_support(void)
|
2016-11-22 20:23:55 +00:00
|
|
|
{
|
2016-11-22 20:23:56 +00:00
|
|
|
return 0;
|
2016-11-22 20:23:55 +00:00
|
|
|
}
|
|
|
|
static inline void sched_clear_itmt_support(void)
|
|
|
|
{
|
|
|
|
}
|
2016-11-29 18:43:27 +00:00
|
|
|
#endif /* CONFIG_SCHED_MC_PRIO */
|
2016-11-22 20:23:55 +00:00
|
|
|
|
2008-10-23 05:26:29 +00:00
|
|
|
#endif /* _ASM_X86_TOPOLOGY_H */
|