forked from Minki/linux
87a1c441e1
when try to make hpet_enable use io_remap instead fixmap got ioremap: invalid physical address fed00000 ------------[ cut here ]------------ WARNING: at arch/x86/mm/ioremap.c:161 __ioremap_caller+0x8c/0x2f3() Modules linked in: Pid: 0, comm: swapper Not tainted 2.6.26-rc9-tip-01873-ga9827e7-dirty #358 Call Trace: [<ffffffff8026615e>] warn_on_slowpath+0x6c/0xa7 [<ffffffff802e2313>] ? __slab_alloc+0x20a/0x3fb [<ffffffff802d85c5>] ? mpol_new+0x88/0x17d [<ffffffff8022a4f4>] ? mcount_call+0x5/0x31 [<ffffffff8022a4f4>] ? mcount_call+0x5/0x31 [<ffffffff8024b0d2>] __ioremap_caller+0x8c/0x2f3 [<ffffffff80e86dbd>] ? hpet_enable+0x39/0x241 [<ffffffff8022a4f4>] ? mcount_call+0x5/0x31 [<ffffffff8024b466>] ioremap_nocache+0x2a/0x40 [<ffffffff80e86dbd>] hpet_enable+0x39/0x241 [<ffffffff80e7a1f6>] hpet_time_init+0x21/0x4e [<ffffffff80e730e9>] start_kernel+0x302/0x395 [<ffffffff80e722aa>] x86_64_start_reservations+0xb9/0xd4 [<ffffffff80e722fe>] ? x86_64_init_pda+0x39/0x4f [<ffffffff80e72400>] x86_64_start_kernel+0xec/0x107 ---[ end trace a7919e7f17c0a725 ]--- it seems for amd system that is set later... try to move setting early in early_identify_cpu. and remove same code for intel and centaur. Signed-off-by: Yinghai Lu <yhlu.kernel@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
96 lines
2.1 KiB
C
96 lines
2.1 KiB
C
#include <linux/init.h>
|
|
#include <linux/smp.h>
|
|
#include <asm/processor.h>
|
|
#include <asm/ptrace.h>
|
|
#include <asm/topology.h>
|
|
#include <asm/numa_64.h>
|
|
|
|
#include "cpu.h"
|
|
|
|
static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
|
|
{
|
|
if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
|
|
(c->x86 == 0x6 && c->x86_model >= 0x0e))
|
|
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
|
|
|
|
set_cpu_cap(c, X86_FEATURE_SYSENTER32);
|
|
}
|
|
|
|
/*
|
|
* find out the number of processor cores on the die
|
|
*/
|
|
static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
|
|
{
|
|
unsigned int eax, t;
|
|
|
|
if (c->cpuid_level < 4)
|
|
return 1;
|
|
|
|
cpuid_count(4, 0, &eax, &t, &t, &t);
|
|
|
|
if (eax & 0x1f)
|
|
return ((eax >> 26) + 1);
|
|
else
|
|
return 1;
|
|
}
|
|
|
|
static void __cpuinit srat_detect_node(void)
|
|
{
|
|
#ifdef CONFIG_NUMA
|
|
unsigned node;
|
|
int cpu = smp_processor_id();
|
|
int apicid = hard_smp_processor_id();
|
|
|
|
/* Don't do the funky fallback heuristics the AMD version employs
|
|
for now. */
|
|
node = apicid_to_node[apicid];
|
|
if (node == NUMA_NO_NODE || !node_online(node))
|
|
node = first_node(node_online_map);
|
|
numa_set_node(cpu, node);
|
|
|
|
printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
|
|
#endif
|
|
}
|
|
|
|
static void __cpuinit init_intel(struct cpuinfo_x86 *c)
|
|
{
|
|
init_intel_cacheinfo(c);
|
|
if (c->cpuid_level > 9) {
|
|
unsigned eax = cpuid_eax(10);
|
|
/* Check for version and the number of counters */
|
|
if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
|
|
set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
|
|
}
|
|
|
|
if (cpu_has_ds) {
|
|
unsigned int l1, l2;
|
|
rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
|
|
if (!(l1 & (1<<11)))
|
|
set_cpu_cap(c, X86_FEATURE_BTS);
|
|
if (!(l1 & (1<<12)))
|
|
set_cpu_cap(c, X86_FEATURE_PEBS);
|
|
}
|
|
|
|
|
|
if (cpu_has_bts)
|
|
ds_init_intel(c);
|
|
|
|
if (c->x86 == 15)
|
|
c->x86_cache_alignment = c->x86_clflush_size * 2;
|
|
if (c->x86 == 6)
|
|
set_cpu_cap(c, X86_FEATURE_REP_GOOD);
|
|
set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
|
|
c->x86_max_cores = intel_num_cpu_cores(c);
|
|
|
|
srat_detect_node();
|
|
}
|
|
|
|
static struct cpu_dev intel_cpu_dev __cpuinitdata = {
|
|
.c_vendor = "Intel",
|
|
.c_ident = { "GenuineIntel" },
|
|
.c_early_init = early_init_intel,
|
|
.c_init = init_intel,
|
|
};
|
|
cpu_vendor_dev_register(X86_VENDOR_INTEL, &intel_cpu_dev);
|
|
|