2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/kernel.h>
|
|
|
|
|
|
|
|
#include <linux/string.h>
|
|
|
|
#include <linux/bitops.h>
|
|
|
|
#include <linux/smp.h>
|
2009-02-26 19:16:58 +00:00
|
|
|
#include <linux/sched.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/thread_info.h>
|
2005-11-14 00:07:23 +00:00
|
|
|
#include <linux/module.h>
|
2009-07-03 23:35:45 +00:00
|
|
|
#include <linux/uaccess.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
#include <asm/processor.h>
|
2007-10-17 16:04:33 +00:00
|
|
|
#include <asm/pgtable.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <asm/msr.h>
|
2008-02-04 15:48:04 +00:00
|
|
|
#include <asm/bugs.h>
|
2009-03-08 07:46:26 +00:00
|
|
|
#include <asm/cpu.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-09-09 23:40:35 +00:00
|
|
|
#ifdef CONFIG_X86_64
|
2009-07-03 23:35:45 +00:00
|
|
|
#include <linux/topology.h>
|
2008-09-09 23:40:35 +00:00
|
|
|
#endif
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
#include "cpu.h"
|
|
|
|
|
|
|
|
#ifdef CONFIG_X86_LOCAL_APIC
|
|
|
|
#include <asm/mpspec.h>
|
|
|
|
#include <asm/apic.h>
|
|
|
|
#endif
|
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-18 22:23:59 +00:00
|
|
|
static void early_init_intel(struct cpuinfo_x86 *c)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2011-05-17 22:29:11 +00:00
|
|
|
u64 misc_enable;
|
|
|
|
|
2009-01-26 03:30:41 +00:00
|
|
|
/* Unmask CPUID levels if masked: */
|
2009-01-26 17:40:58 +00:00
|
|
|
if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
|
2014-03-13 22:40:52 +00:00
|
|
|
if (msr_clear_bit(MSR_IA32_MISC_ENABLE,
|
|
|
|
MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT) > 0) {
|
2009-01-26 03:30:41 +00:00
|
|
|
c->cpuid_level = cpuid_eax(0);
|
2010-09-28 22:35:01 +00:00
|
|
|
get_cpu_cap(c);
|
2009-01-26 03:30:41 +00:00
|
|
|
}
|
2009-01-21 23:04:32 +00:00
|
|
|
}
|
|
|
|
|
2008-01-30 12:32:40 +00:00
|
|
|
if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
|
|
|
|
(c->x86 == 0x6 && c->x86_model >= 0x0e))
|
|
|
|
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
|
2008-09-09 23:40:35 +00:00
|
|
|
|
2011-10-13 00:46:33 +00:00
|
|
|
if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64)) {
|
|
|
|
unsigned lower_word;
|
|
|
|
|
|
|
|
wrmsr(MSR_IA32_UCODE_REV, 0, 0);
|
|
|
|
/* Required by the SDM */
|
|
|
|
sync_core();
|
|
|
|
rdmsr(MSR_IA32_UCODE_REV, lower_word, c->microcode);
|
|
|
|
}
|
|
|
|
|
2010-04-13 21:40:54 +00:00
|
|
|
/*
|
|
|
|
* Atom erratum AAE44/AAF40/AAG38/AAH41:
|
|
|
|
*
|
|
|
|
* A race condition between speculative fetches and invalidating
|
|
|
|
* a large page. This is worked around in microcode, but we
|
|
|
|
* need the microcode to have already been loaded... so if it is
|
|
|
|
* not, recommend a BIOS update and disable large pages.
|
|
|
|
*/
|
2011-10-13 00:46:34 +00:00
|
|
|
if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_mask <= 2 &&
|
|
|
|
c->microcode < 0x20e) {
|
|
|
|
printk(KERN_WARNING "Atom PSE erratum detected, BIOS microcode update recommended\n");
|
|
|
|
clear_cpu_cap(c, X86_FEATURE_PSE);
|
2010-04-13 21:40:54 +00:00
|
|
|
}
|
|
|
|
|
2008-09-09 23:40:35 +00:00
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
set_cpu_cap(c, X86_FEATURE_SYSENTER32);
|
|
|
|
#else
|
|
|
|
/* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
|
|
|
|
if (c->x86 == 15 && c->x86_cache_alignment == 64)
|
|
|
|
c->x86_cache_alignment = 128;
|
|
|
|
#endif
|
2008-11-18 00:11:37 +00:00
|
|
|
|
2009-03-12 12:37:34 +00:00
|
|
|
/* CPUID workaround for 0F33/0F34 CPU */
|
|
|
|
if (c->x86 == 0xF && c->x86_model == 0x3
|
|
|
|
&& (c->x86_mask == 0x3 || c->x86_mask == 0x4))
|
|
|
|
c->x86_phys_bits = 36;
|
|
|
|
|
2008-11-18 00:11:37 +00:00
|
|
|
/*
|
|
|
|
* c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
|
2009-02-26 19:16:58 +00:00
|
|
|
* with P/T states and does not stop in deep C-states.
|
|
|
|
*
|
|
|
|
* It is also reliable across cores and sockets. (but not across
|
|
|
|
* cabinets - we turn it off in that case explicitly.)
|
2008-11-18 00:11:37 +00:00
|
|
|
*/
|
|
|
|
if (c->x86_power & (1 << 8)) {
|
|
|
|
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
|
|
|
|
set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
|
2010-03-01 17:48:15 +00:00
|
|
|
if (!check_tsc_unstable())
|
2013-11-28 18:38:42 +00:00
|
|
|
set_sched_clock_stable();
|
2008-11-18 00:11:37 +00:00
|
|
|
}
|
|
|
|
|
2013-03-12 03:56:45 +00:00
|
|
|
/* Penwell and Cloverview have the TSC which doesn't sleep on S3 */
|
|
|
|
if (c->x86 == 6) {
|
|
|
|
switch (c->x86_model) {
|
|
|
|
case 0x27: /* Penwell */
|
|
|
|
case 0x35: /* Cloverview */
|
|
|
|
set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC_S3);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-01-23 00:17:05 +00:00
|
|
|
/*
|
|
|
|
* There is a known erratum on Pentium III and Core Solo
|
|
|
|
* and Core Duo CPUs.
|
|
|
|
* " Page with PAT set to WC while associated MTRR is UC
|
|
|
|
* may consolidate to UC "
|
|
|
|
* Because of this erratum, it is better to stick with
|
|
|
|
* setting WC in MTRR rather than using PAT on these CPUs.
|
|
|
|
*
|
|
|
|
* Enable PAT WC only on P4, Core 2 or later CPUs.
|
|
|
|
*/
|
|
|
|
if (c->x86 == 6 && c->x86_model < 15)
|
|
|
|
clear_cpu_cap(c, X86_FEATURE_PAT);
|
2008-04-03 22:53:23 +00:00
|
|
|
|
|
|
|
#ifdef CONFIG_KMEMCHECK
|
|
|
|
/*
|
|
|
|
* P4s have a "fast strings" feature which causes single-
|
|
|
|
* stepping REP instructions to only generate a #DB on
|
|
|
|
* cache-line boundaries.
|
|
|
|
*
|
|
|
|
* Ingo Molnar reported a Pentium D (model 6) and a Xeon
|
|
|
|
* (model 2) with the same problem.
|
|
|
|
*/
|
2014-03-09 17:05:25 +00:00
|
|
|
if (c->x86 == 15)
|
2014-03-13 22:40:52 +00:00
|
|
|
if (msr_clear_bit(MSR_IA32_MISC_ENABLE,
|
|
|
|
MSR_IA32_MISC_ENABLE_FAST_STRING_BIT) > 0)
|
2014-03-09 17:05:25 +00:00
|
|
|
pr_info("kmemcheck: Disabling fast string operations\n");
|
2008-04-03 22:53:23 +00:00
|
|
|
#endif
|
2011-05-17 22:29:11 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If fast string is not enabled in IA32_MISC_ENABLE for any reason,
|
|
|
|
* clear the fast string and enhanced fast string CPU capabilities.
|
|
|
|
*/
|
|
|
|
if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
|
|
|
|
rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
|
|
|
|
if (!(misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING)) {
|
|
|
|
printk(KERN_INFO "Disabled fast string operations\n");
|
|
|
|
setup_clear_cpu_cap(X86_FEATURE_REP_GOOD);
|
|
|
|
setup_clear_cpu_cap(X86_FEATURE_ERMS);
|
|
|
|
}
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2008-09-09 23:40:35 +00:00
|
|
|
#ifdef CONFIG_X86_32
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Early probe support logic for ppro memory erratum #50
|
|
|
|
*
|
|
|
|
* This is called before we do cpu ident work
|
|
|
|
*/
|
2008-02-22 22:09:42 +00:00
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-18 22:23:59 +00:00
|
|
|
int ppro_with_ram_bug(void)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
/* Uses data from early_cpu_detect now */
|
|
|
|
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
|
|
|
|
boot_cpu_data.x86 == 6 &&
|
|
|
|
boot_cpu_data.x86_model == 1 &&
|
|
|
|
boot_cpu_data.x86_mask < 8) {
|
|
|
|
printk(KERN_INFO "Pentium Pro with Errata#50 detected. Taking evasive action.\n");
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
2008-02-22 22:09:42 +00:00
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-18 22:23:59 +00:00
|
|
|
static void intel_smp_check(struct cpuinfo_x86 *c)
|
2009-03-08 07:46:26 +00:00
|
|
|
{
|
|
|
|
/* calling is from identify_secondary_cpu() ? */
|
2010-07-21 17:03:58 +00:00
|
|
|
if (!c->cpu_index)
|
2009-03-08 07:46:26 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Mask B, Pentium, but not Pentium MMX
|
|
|
|
*/
|
|
|
|
if (c->x86 == 5 &&
|
|
|
|
c->x86_mask >= 1 && c->x86_mask <= 4 &&
|
|
|
|
c->x86_model <= 3) {
|
|
|
|
/*
|
|
|
|
* Remember we have B step Pentia with bugs
|
|
|
|
*/
|
|
|
|
WARN_ONCE(1, "WARNING: SMP operation may be unreliable"
|
|
|
|
"with B stepping processors.\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-03-07 11:40:42 +00:00
|
|
|
static int forcepae;
|
|
|
|
static int __init forcepae_setup(char *__unused)
|
|
|
|
{
|
|
|
|
forcepae = 1;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
__setup("forcepae", forcepae_setup);
|
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-18 22:23:59 +00:00
|
|
|
static void intel_workarounds(struct cpuinfo_x86 *c)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2008-09-09 23:40:38 +00:00
|
|
|
#ifdef CONFIG_X86_F00F_BUG
|
|
|
|
/*
|
|
|
|
* All current models of Pentium and Pentium with MMX technology CPUs
|
2009-07-03 23:35:45 +00:00
|
|
|
* have the F0 0F bug, which lets nonprivileged users lock up the
|
2013-04-10 19:24:22 +00:00
|
|
|
* system. Announce that the fault handler will be checking for it.
|
2008-09-09 23:40:38 +00:00
|
|
|
*/
|
2013-03-20 14:07:24 +00:00
|
|
|
clear_cpu_bug(c, X86_BUG_F00F);
|
2008-09-09 23:40:38 +00:00
|
|
|
if (!paravirt_enabled() && c->x86 == 5) {
|
|
|
|
static int f00f_workaround_enabled;
|
|
|
|
|
2013-03-20 14:07:24 +00:00
|
|
|
set_cpu_bug(c, X86_BUG_F00F);
|
2008-09-09 23:40:38 +00:00
|
|
|
if (!f00f_workaround_enabled) {
|
|
|
|
printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n");
|
|
|
|
f00f_workaround_enabled = 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until
|
|
|
|
* model 3 mask 3
|
|
|
|
*/
|
|
|
|
if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633)
|
|
|
|
clear_cpu_cap(c, X86_FEATURE_SEP);
|
|
|
|
|
2014-03-07 11:40:42 +00:00
|
|
|
/*
|
|
|
|
* PAE CPUID issue: many Pentium M report no PAE but may have a
|
|
|
|
* functionally usable PAE implementation.
|
|
|
|
* Forcefully enable PAE if kernel parameter "forcepae" is present.
|
|
|
|
*/
|
|
|
|
if (forcepae) {
|
|
|
|
printk(KERN_WARNING "PAE forced!\n");
|
|
|
|
set_cpu_cap(c, X86_FEATURE_PAE);
|
|
|
|
add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
|
|
|
|
}
|
|
|
|
|
2008-09-09 23:40:38 +00:00
|
|
|
/*
|
|
|
|
* P4 Xeon errata 037 workaround.
|
|
|
|
* Hardware prefetcher may cause stale data to be loaded into the cache.
|
|
|
|
*/
|
2005-04-16 22:20:36 +00:00
|
|
|
if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) {
|
2014-03-13 22:40:52 +00:00
|
|
|
if (msr_set_bit(MSR_IA32_MISC_ENABLE,
|
|
|
|
MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT)
|
|
|
|
> 0) {
|
2014-03-09 17:05:25 +00:00
|
|
|
pr_info("CPU: C0 stepping P4 Xeon detected.\n");
|
|
|
|
pr_info("CPU: Disabling hardware prefetching (Errata 037)\n");
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-09-09 23:40:38 +00:00
|
|
|
/*
|
|
|
|
* See if we have a good local APIC by checking for buggy Pentia,
|
|
|
|
* i.e. all B steppings and the C2 stepping of P54C when using their
|
|
|
|
* integrated APIC (see 11AP erratum in "Pentium Processor
|
|
|
|
* Specification Update").
|
|
|
|
*/
|
|
|
|
if (cpu_has_apic && (c->x86<<8 | c->x86_model<<4) == 0x520 &&
|
|
|
|
(c->x86_mask < 0x6 || c->x86_mask == 0xb))
|
|
|
|
set_cpu_cap(c, X86_FEATURE_11AP);
|
2008-09-09 23:40:35 +00:00
|
|
|
|
|
|
|
|
2008-09-09 23:40:38 +00:00
|
|
|
#ifdef CONFIG_X86_INTEL_USERCOPY
|
2008-09-09 23:40:35 +00:00
|
|
|
/*
|
2008-09-09 23:40:38 +00:00
|
|
|
* Set up the preferred alignment for movsl bulk memory moves
|
2008-09-09 23:40:35 +00:00
|
|
|
*/
|
2008-09-09 23:40:38 +00:00
|
|
|
switch (c->x86) {
|
|
|
|
case 4: /* 486: untested */
|
|
|
|
break;
|
|
|
|
case 5: /* Old Pentia: untested */
|
|
|
|
break;
|
|
|
|
case 6: /* PII/PIII only like movsl with 8-byte alignment */
|
|
|
|
movsl_mask.mask = 7;
|
|
|
|
break;
|
|
|
|
case 15: /* P4 is OK down to 8-byte alignment */
|
|
|
|
movsl_mask.mask = 7;
|
|
|
|
break;
|
|
|
|
}
|
2008-09-09 23:40:35 +00:00
|
|
|
#endif
|
2008-09-09 23:40:38 +00:00
|
|
|
|
2009-03-08 07:46:26 +00:00
|
|
|
intel_smp_check(c);
|
2008-09-09 23:40:38 +00:00
|
|
|
}
|
|
|
|
#else
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-18 22:23:59 +00:00
|
|
|
static void intel_workarounds(struct cpuinfo_x86 *c)
|
2008-09-09 23:40:38 +00:00
|
|
|
{
|
|
|
|
}
|
2008-09-09 23:40:35 +00:00
|
|
|
#endif
|
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-18 22:23:59 +00:00
|
|
|
static void srat_detect_node(struct cpuinfo_x86 *c)
|
2008-09-09 23:40:35 +00:00
|
|
|
{
|
2011-01-23 13:37:40 +00:00
|
|
|
#ifdef CONFIG_NUMA
|
2008-09-09 23:40:35 +00:00
|
|
|
unsigned node;
|
|
|
|
int cpu = smp_processor_id();
|
|
|
|
|
|
|
|
/* Don't do the funky fallback heuristics the AMD version employs
|
|
|
|
for now. */
|
2011-01-23 13:37:39 +00:00
|
|
|
node = numa_cpu_node(cpu);
|
2010-09-30 12:04:10 +00:00
|
|
|
if (node == NUMA_NO_NODE || !node_online(node)) {
|
2009-11-21 08:23:37 +00:00
|
|
|
/* reuse the value from init_cpu_to_node() */
|
|
|
|
node = cpu_to_node(cpu);
|
|
|
|
}
|
2008-09-09 23:40:35 +00:00
|
|
|
numa_set_node(cpu, node);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:25:15 +00:00
|
|
|
/*
|
|
|
|
* find out the number of processor cores on the die
|
|
|
|
*/
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-18 22:23:59 +00:00
|
|
|
static int intel_num_cpu_cores(struct cpuinfo_x86 *c)
|
2005-04-16 22:25:15 +00:00
|
|
|
{
|
2005-09-03 22:56:42 +00:00
|
|
|
unsigned int eax, ebx, ecx, edx;
|
2005-04-16 22:25:15 +00:00
|
|
|
|
|
|
|
if (c->cpuid_level < 4)
|
|
|
|
return 1;
|
|
|
|
|
2005-09-03 22:56:42 +00:00
|
|
|
/* Intel has a non-standard dependency on %ecx for this CPUID level. */
|
|
|
|
cpuid_count(4, 0, &eax, &ebx, &ecx, &edx);
|
2005-04-16 22:25:15 +00:00
|
|
|
if (eax & 0x1f)
|
2009-07-03 23:35:45 +00:00
|
|
|
return (eax >> 26) + 1;
|
2005-04-16 22:25:15 +00:00
|
|
|
else
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-18 22:23:59 +00:00
|
|
|
static void detect_vmx_virtcap(struct cpuinfo_x86 *c)
|
2008-09-10 10:53:34 +00:00
|
|
|
{
|
|
|
|
/* Intel VMX MSR indicated features */
|
|
|
|
#define X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW 0x00200000
|
|
|
|
#define X86_VMX_FEATURE_PROC_CTLS_VNMI 0x00400000
|
|
|
|
#define X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS 0x80000000
|
|
|
|
#define X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC 0x00000001
|
|
|
|
#define X86_VMX_FEATURE_PROC_CTLS2_EPT 0x00000002
|
|
|
|
#define X86_VMX_FEATURE_PROC_CTLS2_VPID 0x00000020
|
|
|
|
|
|
|
|
u32 vmx_msr_low, vmx_msr_high, msr_ctl, msr_ctl2;
|
|
|
|
|
|
|
|
clear_cpu_cap(c, X86_FEATURE_TPR_SHADOW);
|
|
|
|
clear_cpu_cap(c, X86_FEATURE_VNMI);
|
|
|
|
clear_cpu_cap(c, X86_FEATURE_FLEXPRIORITY);
|
|
|
|
clear_cpu_cap(c, X86_FEATURE_EPT);
|
|
|
|
clear_cpu_cap(c, X86_FEATURE_VPID);
|
|
|
|
|
|
|
|
rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, vmx_msr_low, vmx_msr_high);
|
|
|
|
msr_ctl = vmx_msr_high | vmx_msr_low;
|
|
|
|
if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW)
|
|
|
|
set_cpu_cap(c, X86_FEATURE_TPR_SHADOW);
|
|
|
|
if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_VNMI)
|
|
|
|
set_cpu_cap(c, X86_FEATURE_VNMI);
|
|
|
|
if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS) {
|
|
|
|
rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
|
|
|
|
vmx_msr_low, vmx_msr_high);
|
|
|
|
msr_ctl2 = vmx_msr_high | vmx_msr_low;
|
|
|
|
if ((msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC) &&
|
|
|
|
(msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW))
|
|
|
|
set_cpu_cap(c, X86_FEATURE_FLEXPRIORITY);
|
|
|
|
if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_EPT)
|
|
|
|
set_cpu_cap(c, X86_FEATURE_EPT);
|
|
|
|
if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VPID)
|
|
|
|
set_cpu_cap(c, X86_FEATURE_VPID);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-18 22:23:59 +00:00
|
|
|
static void init_intel(struct cpuinfo_x86 *c)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
unsigned int l2 = 0;
|
|
|
|
|
2008-01-30 12:32:40 +00:00
|
|
|
early_init_intel(c);
|
|
|
|
|
2008-09-09 23:40:38 +00:00
|
|
|
intel_workarounds(c);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-12-19 02:09:21 +00:00
|
|
|
/*
|
|
|
|
* Detect the extended topology information if available. This
|
|
|
|
* will reinitialise the initial_apicid which will be used
|
|
|
|
* in init_intel_cacheinfo()
|
|
|
|
*/
|
|
|
|
detect_extended_topology(c);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
l2 = init_intel_cacheinfo(c);
|
2008-02-22 22:09:42 +00:00
|
|
|
if (c->cpuid_level > 9) {
|
2006-06-26 11:59:59 +00:00
|
|
|
unsigned eax = cpuid_eax(10);
|
|
|
|
/* Check for version and the number of counters */
|
|
|
|
if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
|
2008-02-26 07:52:33 +00:00
|
|
|
set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
|
2006-06-26 11:59:59 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-09-09 23:40:38 +00:00
|
|
|
if (cpu_has_xmm2)
|
|
|
|
set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
|
|
|
|
if (cpu_has_ds) {
|
|
|
|
unsigned int l1;
|
|
|
|
rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
|
|
|
|
if (!(l1 & (1<<11)))
|
|
|
|
set_cpu_cap(c, X86_FEATURE_BTS);
|
|
|
|
if (!(l1 & (1<<12)))
|
|
|
|
set_cpu_cap(c, X86_FEATURE_PEBS);
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
x86 idle: Repair large-server 50-watt idle-power regression
Linux 3.10 changed the timing of how thread_info->flags is touched:
x86: Use generic idle loop
(7d1a941731fabf27e5fb6edbebb79fe856edb4e5)
This caused Intel NHM-EX and WSM-EX servers to experience a large number
of immediate MONITOR/MWAIT break wakeups, which caused cpuidle to demote
from deep C-states to shallow C-states, which caused these platforms
to experience a significant increase in idle power.
Note that this issue was already present before the commit above,
however, it wasn't seen often enough to be noticed in power measurements.
Here we extend an errata workaround from the Core2 EX "Dunnington"
to extend to NHM-EX and WSM-EX, to prevent these immediate
returns from MWAIT, reducing idle power on these platforms.
While only acpi_idle ran on Dunnington, intel_idle
may also run on these two newer systems.
As of today, there are no other models that are known
to need this tweak.
Link: http://lkml.kernel.org/r/CAJvTdK=%2BaNN66mYpCGgbHGCHhYQAKx-vB0kJSWjVpsNb_hOAtQ@mail.gmail.com
Signed-off-by: Len Brown <len.brown@intel.com>
Link: http://lkml.kernel.org/r/baff264285f6e585df757d58b17788feabc68918.1387403066.git.len.brown@intel.com
Cc: <stable@vger.kernel.org> # 3.12.x, 3.11.x, 3.10.x
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
2013-12-18 21:44:57 +00:00
|
|
|
if (c->x86 == 6 && cpu_has_clflush &&
|
|
|
|
(c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47))
|
2009-02-07 00:52:05 +00:00
|
|
|
set_cpu_cap(c, X86_FEATURE_CLFLUSH_MONITOR);
|
|
|
|
|
2008-09-09 23:40:38 +00:00
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
if (c->x86 == 15)
|
|
|
|
c->x86_cache_alignment = c->x86_clflush_size * 2;
|
|
|
|
if (c->x86 == 6)
|
|
|
|
set_cpu_cap(c, X86_FEATURE_REP_GOOD);
|
|
|
|
#else
|
2008-02-22 22:09:42 +00:00
|
|
|
/*
|
|
|
|
* Names for the Pentium II/Celeron processors
|
|
|
|
* detectable only by also checking the cache size.
|
|
|
|
* Dixon is NOT a Celeron.
|
|
|
|
*/
|
2005-04-16 22:20:36 +00:00
|
|
|
if (c->x86 == 6) {
|
2008-09-09 23:40:38 +00:00
|
|
|
char *p = NULL;
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
switch (c->x86_model) {
|
|
|
|
case 5:
|
2011-05-16 19:38:08 +00:00
|
|
|
if (l2 == 0)
|
|
|
|
p = "Celeron (Covington)";
|
|
|
|
else if (l2 == 256)
|
|
|
|
p = "Mobile Pentium II (Dixon)";
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
2008-02-22 22:09:42 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
case 6:
|
|
|
|
if (l2 == 128)
|
|
|
|
p = "Celeron (Mendocino)";
|
|
|
|
else if (c->x86_mask == 0 || c->x86_mask == 5)
|
|
|
|
p = "Celeron-A";
|
|
|
|
break;
|
2008-02-22 22:09:42 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
case 8:
|
|
|
|
if (l2 == 128)
|
|
|
|
p = "Celeron (Coppermine)";
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2008-09-09 23:40:38 +00:00
|
|
|
if (p)
|
|
|
|
strcpy(c->x86_model_id, p);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2008-09-09 23:40:35 +00:00
|
|
|
if (c->x86 == 15)
|
|
|
|
set_cpu_cap(c, X86_FEATURE_P4);
|
|
|
|
if (c->x86 == 6)
|
|
|
|
set_cpu_cap(c, X86_FEATURE_P3);
|
2008-11-09 13:29:21 +00:00
|
|
|
#endif
|
2008-09-09 23:40:35 +00:00
|
|
|
|
|
|
|
if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
|
|
|
|
/*
|
|
|
|
* let's use the legacy cpuid vector 0x1 and 0x4 for topology
|
|
|
|
* detection.
|
|
|
|
*/
|
|
|
|
c->x86_max_cores = intel_num_cpu_cores(c);
|
|
|
|
#ifdef CONFIG_X86_32
|
|
|
|
detect_ht(c);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Work around errata */
|
2009-05-15 20:05:16 +00:00
|
|
|
srat_detect_node(c);
|
2008-09-10 10:53:34 +00:00
|
|
|
|
|
|
|
if (cpu_has(c, X86_FEATURE_VMX))
|
|
|
|
detect_vmx_virtcap(c);
|
x86, intel, power: Initialize MSR_IA32_ENERGY_PERF_BIAS
Since 2.6.36 (23016bf0d25), Linux prints the existence of "epb" in /proc/cpuinfo,
Since 2.6.38 (d5532ee7b40), the x86_energy_perf_policy(8) utility has
been available in-tree to update MSR_IA32_ENERGY_PERF_BIAS.
However, the typical BIOS fails to initialize the MSR, presumably
because this is handled by high-volume shrink-wrap operating systems...
Linux distros, on the other hand, do not yet invoke x86_energy_perf_policy(8).
As a result, WSM-EP, SNB, and later hardware from Intel will run in its
default hardware power-on state (performance), which assumes that users
care for performance at all costs and not for energy efficiency.
While that is fine for performance benchmarks, the hardware's intended default
operating point is "normal" mode...
Initialize the MSR to the "normal" by default during kernel boot.
x86_energy_perf_policy(8) is available to change the default after boot,
should the user have a different preference.
Signed-off-by: Len Brown <len.brown@intel.com>
Link: http://lkml.kernel.org/r/alpine.LFD.2.02.1107140051020.18606@x980
Acked-by: Rafael J. Wysocki <rjw@sisk.pl>
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Cc: <stable@kernel.org>
2011-07-14 04:53:24 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialize MSR_IA32_ENERGY_PERF_BIAS if BIOS did not.
|
|
|
|
* x86_energy_perf_policy(8) is available to change it at run-time
|
|
|
|
*/
|
|
|
|
if (cpu_has(c, X86_FEATURE_EPB)) {
|
|
|
|
u64 epb;
|
|
|
|
|
|
|
|
rdmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
|
2011-07-15 21:37:15 +00:00
|
|
|
if ((epb & 0xF) == ENERGY_PERF_BIAS_PERFORMANCE) {
|
|
|
|
printk_once(KERN_WARNING "ENERGY_PERF_BIAS:"
|
|
|
|
" Set to 'normal', was 'performance'\n"
|
|
|
|
"ENERGY_PERF_BIAS: View and update with"
|
|
|
|
" x86_energy_perf_policy(8)\n");
|
x86, intel, power: Initialize MSR_IA32_ENERGY_PERF_BIAS
Since 2.6.36 (23016bf0d25), Linux prints the existence of "epb" in /proc/cpuinfo,
Since 2.6.38 (d5532ee7b40), the x86_energy_perf_policy(8) utility has
been available in-tree to update MSR_IA32_ENERGY_PERF_BIAS.
However, the typical BIOS fails to initialize the MSR, presumably
because this is handled by high-volume shrink-wrap operating systems...
Linux distros, on the other hand, do not yet invoke x86_energy_perf_policy(8).
As a result, WSM-EP, SNB, and later hardware from Intel will run in its
default hardware power-on state (performance), which assumes that users
care for performance at all costs and not for energy efficiency.
While that is fine for performance benchmarks, the hardware's intended default
operating point is "normal" mode...
Initialize the MSR to the "normal" by default during kernel boot.
x86_energy_perf_policy(8) is available to change the default after boot,
should the user have a different preference.
Signed-off-by: Len Brown <len.brown@intel.com>
Link: http://lkml.kernel.org/r/alpine.LFD.2.02.1107140051020.18606@x980
Acked-by: Rafael J. Wysocki <rjw@sisk.pl>
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Cc: <stable@kernel.org>
2011-07-14 04:53:24 +00:00
|
|
|
epb = (epb & ~0xF) | ENERGY_PERF_BIAS_NORMAL;
|
|
|
|
wrmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
|
|
|
|
}
|
|
|
|
}
|
2006-12-07 01:14:01 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-09-09 23:40:35 +00:00
|
|
|
#ifdef CONFIG_X86_32
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-18 22:23:59 +00:00
|
|
|
static unsigned int intel_size_cache(struct cpuinfo_x86 *c, unsigned int size)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2008-02-22 22:09:42 +00:00
|
|
|
/*
|
|
|
|
* Intel PIII Tualatin. This comes in two flavours.
|
2005-04-16 22:20:36 +00:00
|
|
|
* One has 256kb of cache, the other 512. We have no way
|
|
|
|
* to determine which, so we use a boottime override
|
|
|
|
* for the 512kb model, and assume 256 otherwise.
|
|
|
|
*/
|
|
|
|
if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0))
|
|
|
|
size = 256;
|
|
|
|
return size;
|
|
|
|
}
|
2008-09-09 23:40:35 +00:00
|
|
|
#endif
|
2005-04-16 22:20:36 +00:00
|
|
|
|
x86/tlb_info: get last level TLB entry number of CPU
For 4KB pages, x86 CPU has 2 or 1 level TLB, first level is data TLB and
instruction TLB, second level is shared TLB for both data and instructions.
For hupe page TLB, usually there is just one level and seperated by 2MB/4MB
and 1GB.
Although each levels TLB size is important for performance tuning, but for
genernal and rude optimizing, last level TLB entry number is suitable. And
in fact, last level TLB always has the biggest entry number.
This patch will get the biggest TLB entry number and use it in furture TLB
optimizing.
Accroding Borislav's suggestion, except tlb_ll[i/d]_* array, other
function and data will be released after system boot up.
For all kinds of x86 vendor friendly, vendor specific code was moved to its
specific files.
Signed-off-by: Alex Shi <alex.shi@intel.com>
Link: http://lkml.kernel.org/r/1340845344-27557-2-git-send-email-alex.shi@intel.com
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2012-06-28 01:02:16 +00:00
|
|
|
#define TLB_INST_4K 0x01
|
|
|
|
#define TLB_INST_4M 0x02
|
|
|
|
#define TLB_INST_2M_4M 0x03
|
|
|
|
|
|
|
|
#define TLB_INST_ALL 0x05
|
|
|
|
#define TLB_INST_1G 0x06
|
|
|
|
|
|
|
|
#define TLB_DATA_4K 0x11
|
|
|
|
#define TLB_DATA_4M 0x12
|
|
|
|
#define TLB_DATA_2M_4M 0x13
|
|
|
|
#define TLB_DATA_4K_4M 0x14
|
|
|
|
|
|
|
|
#define TLB_DATA_1G 0x16
|
|
|
|
|
|
|
|
#define TLB_DATA0_4K 0x21
|
|
|
|
#define TLB_DATA0_4M 0x22
|
|
|
|
#define TLB_DATA0_2M_4M 0x23
|
|
|
|
|
|
|
|
#define STLB_4K 0x41
|
x86, cpu: Detect more TLB configuration
The Intel Software Developer’s Manual covers few more TLB
configurations exposed as CPUID 2 descriptors:
61H Instruction TLB: 4 KByte pages, fully associative, 48 entries
63H Data TLB: 1 GByte pages, 4-way set associative, 4 entries
76H Instruction TLB: 2M/4M pages, fully associative, 8 entries
B5H Instruction TLB: 4KByte pages, 8-way set associative, 64 entries
B6H Instruction TLB: 4KByte pages, 8-way set associative, 128 entries
C1H Shared 2nd-Level TLB: 4 KByte/2MByte pages, 8-way associative, 1024 entries
C2H DTLB DTLB: 2 MByte/$MByte pages, 4-way associative, 16 entries
Let's detect them as well.
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Link: http://lkml.kernel.org/r/1387801018-14499-1-git-send-email-kirill.shutemov@linux.intel.com
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
2013-12-23 12:16:58 +00:00
|
|
|
#define STLB_4K_2M 0x42
|
x86/tlb_info: get last level TLB entry number of CPU
For 4KB pages, x86 CPU has 2 or 1 level TLB, first level is data TLB and
instruction TLB, second level is shared TLB for both data and instructions.
For hupe page TLB, usually there is just one level and seperated by 2MB/4MB
and 1GB.
Although each levels TLB size is important for performance tuning, but for
genernal and rude optimizing, last level TLB entry number is suitable. And
in fact, last level TLB always has the biggest entry number.
This patch will get the biggest TLB entry number and use it in furture TLB
optimizing.
Accroding Borislav's suggestion, except tlb_ll[i/d]_* array, other
function and data will be released after system boot up.
For all kinds of x86 vendor friendly, vendor specific code was moved to its
specific files.
Signed-off-by: Alex Shi <alex.shi@intel.com>
Link: http://lkml.kernel.org/r/1340845344-27557-2-git-send-email-alex.shi@intel.com
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2012-06-28 01:02:16 +00:00
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-18 22:23:59 +00:00
|
|
|
static const struct _tlb_table intel_tlb_table[] = {
|
x86/tlb_info: get last level TLB entry number of CPU
For 4KB pages, x86 CPU has 2 or 1 level TLB, first level is data TLB and
instruction TLB, second level is shared TLB for both data and instructions.
For hupe page TLB, usually there is just one level and seperated by 2MB/4MB
and 1GB.
Although each levels TLB size is important for performance tuning, but for
genernal and rude optimizing, last level TLB entry number is suitable. And
in fact, last level TLB always has the biggest entry number.
This patch will get the biggest TLB entry number and use it in furture TLB
optimizing.
Accroding Borislav's suggestion, except tlb_ll[i/d]_* array, other
function and data will be released after system boot up.
For all kinds of x86 vendor friendly, vendor specific code was moved to its
specific files.
Signed-off-by: Alex Shi <alex.shi@intel.com>
Link: http://lkml.kernel.org/r/1340845344-27557-2-git-send-email-alex.shi@intel.com
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2012-06-28 01:02:16 +00:00
|
|
|
{ 0x01, TLB_INST_4K, 32, " TLB_INST 4 KByte pages, 4-way set associative" },
|
|
|
|
{ 0x02, TLB_INST_4M, 2, " TLB_INST 4 MByte pages, full associative" },
|
|
|
|
{ 0x03, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way set associative" },
|
|
|
|
{ 0x04, TLB_DATA_4M, 8, " TLB_DATA 4 MByte pages, 4-way set associative" },
|
|
|
|
{ 0x05, TLB_DATA_4M, 32, " TLB_DATA 4 MByte pages, 4-way set associative" },
|
|
|
|
{ 0x0b, TLB_INST_4M, 4, " TLB_INST 4 MByte pages, 4-way set associative" },
|
|
|
|
{ 0x4f, TLB_INST_4K, 32, " TLB_INST 4 KByte pages */" },
|
|
|
|
{ 0x50, TLB_INST_ALL, 64, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
|
|
|
|
{ 0x51, TLB_INST_ALL, 128, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
|
|
|
|
{ 0x52, TLB_INST_ALL, 256, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
|
|
|
|
{ 0x55, TLB_INST_2M_4M, 7, " TLB_INST 2-MByte or 4-MByte pages, fully associative" },
|
|
|
|
{ 0x56, TLB_DATA0_4M, 16, " TLB_DATA0 4 MByte pages, 4-way set associative" },
|
|
|
|
{ 0x57, TLB_DATA0_4K, 16, " TLB_DATA0 4 KByte pages, 4-way associative" },
|
|
|
|
{ 0x59, TLB_DATA0_4K, 16, " TLB_DATA0 4 KByte pages, fully associative" },
|
|
|
|
{ 0x5a, TLB_DATA0_2M_4M, 32, " TLB_DATA0 2-MByte or 4 MByte pages, 4-way set associative" },
|
|
|
|
{ 0x5b, TLB_DATA_4K_4M, 64, " TLB_DATA 4 KByte and 4 MByte pages" },
|
|
|
|
{ 0x5c, TLB_DATA_4K_4M, 128, " TLB_DATA 4 KByte and 4 MByte pages" },
|
|
|
|
{ 0x5d, TLB_DATA_4K_4M, 256, " TLB_DATA 4 KByte and 4 MByte pages" },
|
x86, cpu: Detect more TLB configuration
The Intel Software Developer’s Manual covers few more TLB
configurations exposed as CPUID 2 descriptors:
61H Instruction TLB: 4 KByte pages, fully associative, 48 entries
63H Data TLB: 1 GByte pages, 4-way set associative, 4 entries
76H Instruction TLB: 2M/4M pages, fully associative, 8 entries
B5H Instruction TLB: 4KByte pages, 8-way set associative, 64 entries
B6H Instruction TLB: 4KByte pages, 8-way set associative, 128 entries
C1H Shared 2nd-Level TLB: 4 KByte/2MByte pages, 8-way associative, 1024 entries
C2H DTLB DTLB: 2 MByte/$MByte pages, 4-way associative, 16 entries
Let's detect them as well.
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Link: http://lkml.kernel.org/r/1387801018-14499-1-git-send-email-kirill.shutemov@linux.intel.com
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
2013-12-23 12:16:58 +00:00
|
|
|
{ 0x61, TLB_INST_4K, 48, " TLB_INST 4 KByte pages, full associative" },
|
|
|
|
{ 0x63, TLB_DATA_1G, 4, " TLB_DATA 1 GByte pages, 4-way set associative" },
|
|
|
|
{ 0x76, TLB_INST_2M_4M, 8, " TLB_INST 2-MByte or 4-MByte pages, fully associative" },
|
x86/tlb_info: get last level TLB entry number of CPU
For 4KB pages, x86 CPU has 2 or 1 level TLB, first level is data TLB and
instruction TLB, second level is shared TLB for both data and instructions.
For hupe page TLB, usually there is just one level and seperated by 2MB/4MB
and 1GB.
Although each levels TLB size is important for performance tuning, but for
genernal and rude optimizing, last level TLB entry number is suitable. And
in fact, last level TLB always has the biggest entry number.
This patch will get the biggest TLB entry number and use it in furture TLB
optimizing.
Accroding Borislav's suggestion, except tlb_ll[i/d]_* array, other
function and data will be released after system boot up.
For all kinds of x86 vendor friendly, vendor specific code was moved to its
specific files.
Signed-off-by: Alex Shi <alex.shi@intel.com>
Link: http://lkml.kernel.org/r/1340845344-27557-2-git-send-email-alex.shi@intel.com
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2012-06-28 01:02:16 +00:00
|
|
|
{ 0xb0, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 4-way set associative" },
|
|
|
|
{ 0xb1, TLB_INST_2M_4M, 4, " TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" },
|
|
|
|
{ 0xb2, TLB_INST_4K, 64, " TLB_INST 4KByte pages, 4-way set associative" },
|
|
|
|
{ 0xb3, TLB_DATA_4K, 128, " TLB_DATA 4 KByte pages, 4-way set associative" },
|
|
|
|
{ 0xb4, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 4-way associative" },
|
x86, cpu: Detect more TLB configuration
The Intel Software Developer’s Manual covers few more TLB
configurations exposed as CPUID 2 descriptors:
61H Instruction TLB: 4 KByte pages, fully associative, 48 entries
63H Data TLB: 1 GByte pages, 4-way set associative, 4 entries
76H Instruction TLB: 2M/4M pages, fully associative, 8 entries
B5H Instruction TLB: 4KByte pages, 8-way set associative, 64 entries
B6H Instruction TLB: 4KByte pages, 8-way set associative, 128 entries
C1H Shared 2nd-Level TLB: 4 KByte/2MByte pages, 8-way associative, 1024 entries
C2H DTLB DTLB: 2 MByte/$MByte pages, 4-way associative, 16 entries
Let's detect them as well.
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Link: http://lkml.kernel.org/r/1387801018-14499-1-git-send-email-kirill.shutemov@linux.intel.com
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
2013-12-23 12:16:58 +00:00
|
|
|
{ 0xb5, TLB_INST_4K, 64, " TLB_INST 4 KByte pages, 8-way set ssociative" },
|
|
|
|
{ 0xb6, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 8-way set ssociative" },
|
x86/tlb_info: get last level TLB entry number of CPU
For 4KB pages, x86 CPU has 2 or 1 level TLB, first level is data TLB and
instruction TLB, second level is shared TLB for both data and instructions.
For hupe page TLB, usually there is just one level and seperated by 2MB/4MB
and 1GB.
Although each levels TLB size is important for performance tuning, but for
genernal and rude optimizing, last level TLB entry number is suitable. And
in fact, last level TLB always has the biggest entry number.
This patch will get the biggest TLB entry number and use it in furture TLB
optimizing.
Accroding Borislav's suggestion, except tlb_ll[i/d]_* array, other
function and data will be released after system boot up.
For all kinds of x86 vendor friendly, vendor specific code was moved to its
specific files.
Signed-off-by: Alex Shi <alex.shi@intel.com>
Link: http://lkml.kernel.org/r/1340845344-27557-2-git-send-email-alex.shi@intel.com
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2012-06-28 01:02:16 +00:00
|
|
|
{ 0xba, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way associative" },
|
|
|
|
{ 0xc0, TLB_DATA_4K_4M, 8, " TLB_DATA 4 KByte and 4 MByte pages, 4-way associative" },
|
x86, cpu: Detect more TLB configuration
The Intel Software Developer’s Manual covers few more TLB
configurations exposed as CPUID 2 descriptors:
61H Instruction TLB: 4 KByte pages, fully associative, 48 entries
63H Data TLB: 1 GByte pages, 4-way set associative, 4 entries
76H Instruction TLB: 2M/4M pages, fully associative, 8 entries
B5H Instruction TLB: 4KByte pages, 8-way set associative, 64 entries
B6H Instruction TLB: 4KByte pages, 8-way set associative, 128 entries
C1H Shared 2nd-Level TLB: 4 KByte/2MByte pages, 8-way associative, 1024 entries
C2H DTLB DTLB: 2 MByte/$MByte pages, 4-way associative, 16 entries
Let's detect them as well.
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Link: http://lkml.kernel.org/r/1387801018-14499-1-git-send-email-kirill.shutemov@linux.intel.com
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
2013-12-23 12:16:58 +00:00
|
|
|
{ 0xc1, STLB_4K_2M, 1024, " STLB 4 KByte and 2 MByte pages, 8-way associative" },
|
|
|
|
{ 0xc2, TLB_DATA_2M_4M, 16, " DTLB 2 MByte/4MByte pages, 4-way associative" },
|
x86/tlb_info: get last level TLB entry number of CPU
For 4KB pages, x86 CPU has 2 or 1 level TLB, first level is data TLB and
instruction TLB, second level is shared TLB for both data and instructions.
For hupe page TLB, usually there is just one level and seperated by 2MB/4MB
and 1GB.
Although each levels TLB size is important for performance tuning, but for
genernal and rude optimizing, last level TLB entry number is suitable. And
in fact, last level TLB always has the biggest entry number.
This patch will get the biggest TLB entry number and use it in furture TLB
optimizing.
Accroding Borislav's suggestion, except tlb_ll[i/d]_* array, other
function and data will be released after system boot up.
For all kinds of x86 vendor friendly, vendor specific code was moved to its
specific files.
Signed-off-by: Alex Shi <alex.shi@intel.com>
Link: http://lkml.kernel.org/r/1340845344-27557-2-git-send-email-alex.shi@intel.com
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2012-06-28 01:02:16 +00:00
|
|
|
{ 0xca, STLB_4K, 512, " STLB 4 KByte pages, 4-way associative" },
|
|
|
|
{ 0x00, 0, 0 }
|
|
|
|
};
|
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-18 22:23:59 +00:00
|
|
|
static void intel_tlb_lookup(const unsigned char desc)
|
x86/tlb_info: get last level TLB entry number of CPU
For 4KB pages, x86 CPU has 2 or 1 level TLB, first level is data TLB and
instruction TLB, second level is shared TLB for both data and instructions.
For hupe page TLB, usually there is just one level and seperated by 2MB/4MB
and 1GB.
Although each levels TLB size is important for performance tuning, but for
genernal and rude optimizing, last level TLB entry number is suitable. And
in fact, last level TLB always has the biggest entry number.
This patch will get the biggest TLB entry number and use it in furture TLB
optimizing.
Accroding Borislav's suggestion, except tlb_ll[i/d]_* array, other
function and data will be released after system boot up.
For all kinds of x86 vendor friendly, vendor specific code was moved to its
specific files.
Signed-off-by: Alex Shi <alex.shi@intel.com>
Link: http://lkml.kernel.org/r/1340845344-27557-2-git-send-email-alex.shi@intel.com
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2012-06-28 01:02:16 +00:00
|
|
|
{
|
|
|
|
unsigned char k;
|
|
|
|
if (desc == 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* look up this descriptor in the table */
|
|
|
|
for (k = 0; intel_tlb_table[k].descriptor != desc && \
|
|
|
|
intel_tlb_table[k].descriptor != 0; k++)
|
|
|
|
;
|
|
|
|
|
|
|
|
if (intel_tlb_table[k].tlb_type == 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
switch (intel_tlb_table[k].tlb_type) {
|
|
|
|
case STLB_4K:
|
|
|
|
if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
|
|
|
|
tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
|
|
|
|
if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
|
|
|
|
tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
|
|
|
|
break;
|
x86, cpu: Detect more TLB configuration
The Intel Software Developer’s Manual covers few more TLB
configurations exposed as CPUID 2 descriptors:
61H Instruction TLB: 4 KByte pages, fully associative, 48 entries
63H Data TLB: 1 GByte pages, 4-way set associative, 4 entries
76H Instruction TLB: 2M/4M pages, fully associative, 8 entries
B5H Instruction TLB: 4KByte pages, 8-way set associative, 64 entries
B6H Instruction TLB: 4KByte pages, 8-way set associative, 128 entries
C1H Shared 2nd-Level TLB: 4 KByte/2MByte pages, 8-way associative, 1024 entries
C2H DTLB DTLB: 2 MByte/$MByte pages, 4-way associative, 16 entries
Let's detect them as well.
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Link: http://lkml.kernel.org/r/1387801018-14499-1-git-send-email-kirill.shutemov@linux.intel.com
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
2013-12-23 12:16:58 +00:00
|
|
|
case STLB_4K_2M:
|
|
|
|
if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
|
|
|
|
tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
|
|
|
|
if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
|
|
|
|
tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
|
|
|
|
if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
|
|
|
|
tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
|
|
|
|
if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries)
|
|
|
|
tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries;
|
|
|
|
if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
|
|
|
|
tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
|
|
|
|
if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
|
|
|
|
tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
|
|
|
|
break;
|
x86/tlb_info: get last level TLB entry number of CPU
For 4KB pages, x86 CPU has 2 or 1 level TLB, first level is data TLB and
instruction TLB, second level is shared TLB for both data and instructions.
For hupe page TLB, usually there is just one level and seperated by 2MB/4MB
and 1GB.
Although each levels TLB size is important for performance tuning, but for
genernal and rude optimizing, last level TLB entry number is suitable. And
in fact, last level TLB always has the biggest entry number.
This patch will get the biggest TLB entry number and use it in furture TLB
optimizing.
Accroding Borislav's suggestion, except tlb_ll[i/d]_* array, other
function and data will be released after system boot up.
For all kinds of x86 vendor friendly, vendor specific code was moved to its
specific files.
Signed-off-by: Alex Shi <alex.shi@intel.com>
Link: http://lkml.kernel.org/r/1340845344-27557-2-git-send-email-alex.shi@intel.com
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2012-06-28 01:02:16 +00:00
|
|
|
case TLB_INST_ALL:
|
|
|
|
if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
|
|
|
|
tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
|
|
|
|
if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
|
|
|
|
tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
|
|
|
|
if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
|
|
|
|
tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
|
|
|
|
break;
|
|
|
|
case TLB_INST_4K:
|
|
|
|
if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
|
|
|
|
tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
|
|
|
|
break;
|
|
|
|
case TLB_INST_4M:
|
|
|
|
if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
|
|
|
|
tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
|
|
|
|
break;
|
|
|
|
case TLB_INST_2M_4M:
|
|
|
|
if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
|
|
|
|
tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
|
|
|
|
if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
|
|
|
|
tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
|
|
|
|
break;
|
|
|
|
case TLB_DATA_4K:
|
|
|
|
case TLB_DATA0_4K:
|
|
|
|
if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
|
|
|
|
tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
|
|
|
|
break;
|
|
|
|
case TLB_DATA_4M:
|
|
|
|
case TLB_DATA0_4M:
|
|
|
|
if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
|
|
|
|
tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
|
|
|
|
break;
|
|
|
|
case TLB_DATA_2M_4M:
|
|
|
|
case TLB_DATA0_2M_4M:
|
|
|
|
if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries)
|
|
|
|
tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries;
|
|
|
|
if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
|
|
|
|
tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
|
|
|
|
break;
|
|
|
|
case TLB_DATA_4K_4M:
|
|
|
|
if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
|
|
|
|
tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
|
|
|
|
if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
|
|
|
|
tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
|
|
|
|
break;
|
x86, cpu: Detect more TLB configuration
The Intel Software Developer’s Manual covers few more TLB
configurations exposed as CPUID 2 descriptors:
61H Instruction TLB: 4 KByte pages, fully associative, 48 entries
63H Data TLB: 1 GByte pages, 4-way set associative, 4 entries
76H Instruction TLB: 2M/4M pages, fully associative, 8 entries
B5H Instruction TLB: 4KByte pages, 8-way set associative, 64 entries
B6H Instruction TLB: 4KByte pages, 8-way set associative, 128 entries
C1H Shared 2nd-Level TLB: 4 KByte/2MByte pages, 8-way associative, 1024 entries
C2H DTLB DTLB: 2 MByte/$MByte pages, 4-way associative, 16 entries
Let's detect them as well.
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Link: http://lkml.kernel.org/r/1387801018-14499-1-git-send-email-kirill.shutemov@linux.intel.com
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
2013-12-23 12:16:58 +00:00
|
|
|
case TLB_DATA_1G:
|
|
|
|
if (tlb_lld_1g[ENTRIES] < intel_tlb_table[k].entries)
|
|
|
|
tlb_lld_1g[ENTRIES] = intel_tlb_table[k].entries;
|
x86/tlb_info: get last level TLB entry number of CPU
For 4KB pages, x86 CPU has 2 or 1 level TLB, first level is data TLB and
instruction TLB, second level is shared TLB for both data and instructions.
For hupe page TLB, usually there is just one level and seperated by 2MB/4MB
and 1GB.
Although each levels TLB size is important for performance tuning, but for
genernal and rude optimizing, last level TLB entry number is suitable. And
in fact, last level TLB always has the biggest entry number.
This patch will get the biggest TLB entry number and use it in furture TLB
optimizing.
Accroding Borislav's suggestion, except tlb_ll[i/d]_* array, other
function and data will be released after system boot up.
For all kinds of x86 vendor friendly, vendor specific code was moved to its
specific files.
Signed-off-by: Alex Shi <alex.shi@intel.com>
Link: http://lkml.kernel.org/r/1340845344-27557-2-git-send-email-alex.shi@intel.com
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2012-06-28 01:02:16 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-18 22:23:59 +00:00
|
|
|
static void intel_tlb_flushall_shift_set(struct cpuinfo_x86 *c)
|
2012-06-28 01:02:19 +00:00
|
|
|
{
|
|
|
|
switch ((c->x86 << 8) + c->x86_model) {
|
|
|
|
case 0x60f: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
|
|
|
|
case 0x616: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
|
|
|
|
case 0x617: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
|
|
|
|
case 0x61d: /* six-core 45 nm xeon "Dunnington" */
|
|
|
|
tlb_flushall_shift = -1;
|
|
|
|
break;
|
2014-01-21 22:33:22 +00:00
|
|
|
case 0x63a: /* Ivybridge */
|
|
|
|
tlb_flushall_shift = 2;
|
|
|
|
break;
|
2012-06-28 01:02:19 +00:00
|
|
|
case 0x61a: /* 45 nm nehalem, "Bloomfield" */
|
|
|
|
case 0x61e: /* 45 nm nehalem, "Lynnfield" */
|
|
|
|
case 0x625: /* 32 nm nehalem, "Clarkdale" */
|
|
|
|
case 0x62c: /* 32 nm nehalem, "Gulftown" */
|
|
|
|
case 0x62e: /* 45 nm nehalem-ex, "Beckton" */
|
|
|
|
case 0x62f: /* 32 nm Xeon E7 */
|
|
|
|
case 0x62a: /* SandyBridge */
|
|
|
|
case 0x62d: /* SandyBridge, "Romely-EP" */
|
|
|
|
default:
|
|
|
|
tlb_flushall_shift = 6;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-18 22:23:59 +00:00
|
|
|
static void intel_detect_tlb(struct cpuinfo_x86 *c)
|
x86/tlb_info: get last level TLB entry number of CPU
For 4KB pages, x86 CPU has 2 or 1 level TLB, first level is data TLB and
instruction TLB, second level is shared TLB for both data and instructions.
For hupe page TLB, usually there is just one level and seperated by 2MB/4MB
and 1GB.
Although each levels TLB size is important for performance tuning, but for
genernal and rude optimizing, last level TLB entry number is suitable. And
in fact, last level TLB always has the biggest entry number.
This patch will get the biggest TLB entry number and use it in furture TLB
optimizing.
Accroding Borislav's suggestion, except tlb_ll[i/d]_* array, other
function and data will be released after system boot up.
For all kinds of x86 vendor friendly, vendor specific code was moved to its
specific files.
Signed-off-by: Alex Shi <alex.shi@intel.com>
Link: http://lkml.kernel.org/r/1340845344-27557-2-git-send-email-alex.shi@intel.com
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2012-06-28 01:02:16 +00:00
|
|
|
{
|
|
|
|
int i, j, n;
|
|
|
|
unsigned int regs[4];
|
|
|
|
unsigned char *desc = (unsigned char *)regs;
|
2012-08-06 17:00:37 +00:00
|
|
|
|
|
|
|
if (c->cpuid_level < 2)
|
|
|
|
return;
|
|
|
|
|
x86/tlb_info: get last level TLB entry number of CPU
For 4KB pages, x86 CPU has 2 or 1 level TLB, first level is data TLB and
instruction TLB, second level is shared TLB for both data and instructions.
For hupe page TLB, usually there is just one level and seperated by 2MB/4MB
and 1GB.
Although each levels TLB size is important for performance tuning, but for
genernal and rude optimizing, last level TLB entry number is suitable. And
in fact, last level TLB always has the biggest entry number.
This patch will get the biggest TLB entry number and use it in furture TLB
optimizing.
Accroding Borislav's suggestion, except tlb_ll[i/d]_* array, other
function and data will be released after system boot up.
For all kinds of x86 vendor friendly, vendor specific code was moved to its
specific files.
Signed-off-by: Alex Shi <alex.shi@intel.com>
Link: http://lkml.kernel.org/r/1340845344-27557-2-git-send-email-alex.shi@intel.com
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2012-06-28 01:02:16 +00:00
|
|
|
/* Number of times to iterate */
|
|
|
|
n = cpuid_eax(2) & 0xFF;
|
|
|
|
|
|
|
|
for (i = 0 ; i < n ; i++) {
|
|
|
|
cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]);
|
|
|
|
|
|
|
|
/* If bit 31 is set, this is an unknown format */
|
|
|
|
for (j = 0 ; j < 3 ; j++)
|
|
|
|
if (regs[j] & (1 << 31))
|
|
|
|
regs[j] = 0;
|
|
|
|
|
|
|
|
/* Byte 0 is level count, not a descriptor */
|
|
|
|
for (j = 1 ; j < 16 ; j++)
|
|
|
|
intel_tlb_lookup(desc[j]);
|
|
|
|
}
|
2012-06-28 01:02:19 +00:00
|
|
|
intel_tlb_flushall_shift_set(c);
|
x86/tlb_info: get last level TLB entry number of CPU
For 4KB pages, x86 CPU has 2 or 1 level TLB, first level is data TLB and
instruction TLB, second level is shared TLB for both data and instructions.
For hupe page TLB, usually there is just one level and seperated by 2MB/4MB
and 1GB.
Although each levels TLB size is important for performance tuning, but for
genernal and rude optimizing, last level TLB entry number is suitable. And
in fact, last level TLB always has the biggest entry number.
This patch will get the biggest TLB entry number and use it in furture TLB
optimizing.
Accroding Borislav's suggestion, except tlb_ll[i/d]_* array, other
function and data will be released after system boot up.
For all kinds of x86 vendor friendly, vendor specific code was moved to its
specific files.
Signed-off-by: Alex Shi <alex.shi@intel.com>
Link: http://lkml.kernel.org/r/1340845344-27557-2-git-send-email-alex.shi@intel.com
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2012-06-28 01:02:16 +00:00
|
|
|
}
|
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-18 22:23:59 +00:00
|
|
|
static const struct cpu_dev intel_cpu_dev = {
|
2005-04-16 22:20:36 +00:00
|
|
|
.c_vendor = "Intel",
|
2008-02-22 22:09:42 +00:00
|
|
|
.c_ident = { "GenuineIntel" },
|
2008-09-09 23:40:35 +00:00
|
|
|
#ifdef CONFIG_X86_32
|
2013-10-21 08:35:20 +00:00
|
|
|
.legacy_models = {
|
|
|
|
{ .family = 4, .model_names =
|
2008-02-22 22:09:42 +00:00
|
|
|
{
|
|
|
|
[0] = "486 DX-25/33",
|
|
|
|
[1] = "486 DX-50",
|
|
|
|
[2] = "486 SX",
|
|
|
|
[3] = "486 DX/2",
|
|
|
|
[4] = "486 SL",
|
|
|
|
[5] = "486 SX/2",
|
|
|
|
[7] = "486 DX/2-WB",
|
|
|
|
[8] = "486 DX/4",
|
2005-04-16 22:20:36 +00:00
|
|
|
[9] = "486 DX/4-WB"
|
|
|
|
}
|
|
|
|
},
|
2013-10-21 08:35:20 +00:00
|
|
|
{ .family = 5, .model_names =
|
2008-02-22 22:09:42 +00:00
|
|
|
{
|
|
|
|
[0] = "Pentium 60/66 A-step",
|
|
|
|
[1] = "Pentium 60/66",
|
2005-04-16 22:20:36 +00:00
|
|
|
[2] = "Pentium 75 - 200",
|
2008-02-22 22:09:42 +00:00
|
|
|
[3] = "OverDrive PODP5V83",
|
2005-04-16 22:20:36 +00:00
|
|
|
[4] = "Pentium MMX",
|
2008-02-22 22:09:42 +00:00
|
|
|
[7] = "Mobile Pentium 75 - 200",
|
2005-04-16 22:20:36 +00:00
|
|
|
[8] = "Mobile Pentium MMX"
|
|
|
|
}
|
|
|
|
},
|
2013-10-21 08:35:20 +00:00
|
|
|
{ .family = 6, .model_names =
|
2008-02-22 22:09:42 +00:00
|
|
|
{
|
2005-04-16 22:20:36 +00:00
|
|
|
[0] = "Pentium Pro A-step",
|
2008-02-22 22:09:42 +00:00
|
|
|
[1] = "Pentium Pro",
|
|
|
|
[3] = "Pentium II (Klamath)",
|
|
|
|
[4] = "Pentium II (Deschutes)",
|
|
|
|
[5] = "Pentium II (Deschutes)",
|
2005-04-16 22:20:36 +00:00
|
|
|
[6] = "Mobile Pentium II",
|
2008-02-22 22:09:42 +00:00
|
|
|
[7] = "Pentium III (Katmai)",
|
|
|
|
[8] = "Pentium III (Coppermine)",
|
2005-04-16 22:20:36 +00:00
|
|
|
[10] = "Pentium III (Cascades)",
|
|
|
|
[11] = "Pentium III (Tualatin)",
|
|
|
|
}
|
|
|
|
},
|
2013-10-21 08:35:20 +00:00
|
|
|
{ .family = 15, .model_names =
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
[0] = "Pentium 4 (Unknown)",
|
|
|
|
[1] = "Pentium 4 (Willamette)",
|
|
|
|
[2] = "Pentium 4 (Northwood)",
|
|
|
|
[4] = "Pentium 4 (Foster)",
|
|
|
|
[5] = "Pentium 4 (Foster)",
|
|
|
|
}
|
|
|
|
},
|
|
|
|
},
|
2013-10-21 08:35:20 +00:00
|
|
|
.legacy_cache_size = intel_size_cache,
|
2008-09-09 23:40:35 +00:00
|
|
|
#endif
|
x86/tlb_info: get last level TLB entry number of CPU
For 4KB pages, x86 CPU has 2 or 1 level TLB, first level is data TLB and
instruction TLB, second level is shared TLB for both data and instructions.
For hupe page TLB, usually there is just one level and seperated by 2MB/4MB
and 1GB.
Although each levels TLB size is important for performance tuning, but for
genernal and rude optimizing, last level TLB entry number is suitable. And
in fact, last level TLB always has the biggest entry number.
This patch will get the biggest TLB entry number and use it in furture TLB
optimizing.
Accroding Borislav's suggestion, except tlb_ll[i/d]_* array, other
function and data will be released after system boot up.
For all kinds of x86 vendor friendly, vendor specific code was moved to its
specific files.
Signed-off-by: Alex Shi <alex.shi@intel.com>
Link: http://lkml.kernel.org/r/1340845344-27557-2-git-send-email-alex.shi@intel.com
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2012-06-28 01:02:16 +00:00
|
|
|
.c_detect_tlb = intel_detect_tlb,
|
x86: use ELF section to list CPU vendor specific code
Replace the hardcoded list of initialization functions for each CPU
vendor by a list in an ELF section, which is read at initialization in
arch/x86/kernel/cpu/cpu.c to fill the cpu_devs[] array. The ELF
section, named .x86cpuvendor.init, is reclaimed after boot, and
contains entries of type "struct cpu_vendor_dev" which associates a
vendor number with a pointer to a "struct cpu_dev" structure.
This first modification allows to remove all the VENDOR_init_cpu()
functions.
This patch also removes the hardcoded calls to early_init_amd() and
early_init_intel(). Instead, we add a "c_early_init" member to the
cpu_dev structure, which is then called if not NULL by the generic CPU
initialization code. Unfortunately, in early_cpu_detect(), this_cpu is
not yet set, so we have to use the cpu_devs[] array directly.
This patch is part of the Linux Tiny project, and is needed for
further patch that will allow to disable compilation of unused CPU
support code.
Signed-off-by: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2008-02-15 11:00:23 +00:00
|
|
|
.c_early_init = early_init_intel,
|
2005-04-16 22:20:36 +00:00
|
|
|
.c_init = init_intel,
|
2008-09-04 19:09:45 +00:00
|
|
|
.c_x86_vendor = X86_VENDOR_INTEL,
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
|
2008-09-04 19:09:45 +00:00
|
|
|
cpu_dev_register(intel_cpu_dev);
|
2005-04-16 22:20:36 +00:00
|
|
|
|