mirror of
https://github.com/torvalds/linux.git
synced 2024-12-18 00:53:40 +00:00
c29a7baf09
NUMA emulation (aka fake NUMA) distributes the available memory to nodes without using real topology information about the physical memory of the machine. Splitting the system memory into nodes replicates the memory management structures for each node. Particularly each node has its own "mm locks" and its own "kswapd" task. For large systems, under certain conditions, this results in improved system performance and/or latency based on reduced pressure on the mm locks and the kswapd tasks. NUMA emulation distributes CPUs to nodes while respecting the original machine topology information. This is done by trying to avoid to separate CPUs which reside on the same book or even on the same MC. Because the current Linux scheduler code requires a stable cpu to node mapping, cores are pinned to nodes when the first CPU thread is set online. This patch is based on the initial implementation from Philipp Hachtmann. Signed-off-by: Michael Holzheu <holzheu@linux.vnet.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
25 lines
718 B
C
25 lines
718 B
C
/*
|
|
* NUMA support for s390
|
|
*
|
|
* Define declarations used for communication between NUMA mode
|
|
* implementations and NUMA core functionality.
|
|
*
|
|
* Copyright IBM Corp. 2015
|
|
*/
|
|
#ifndef __S390_NUMA_MODE_H
|
|
#define __S390_NUMA_MODE_H
|
|
|
|
struct numa_mode {
|
|
char *name; /* Name of mode */
|
|
void (*setup)(void); /* Initizalize mode */
|
|
void (*update_cpu_topology)(void); /* Called by topology code */
|
|
int (*__pfn_to_nid)(unsigned long pfn); /* PFN to node ID */
|
|
unsigned long (*align)(void); /* Minimum node alignment */
|
|
int (*distance)(int a, int b); /* Distance between two nodes */
|
|
};
|
|
|
|
extern const struct numa_mode numa_mode_plain;
|
|
extern const struct numa_mode numa_mode_emu;
|
|
|
|
#endif /* __S390_NUMA_MODE_H */
|