2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Local APIC handling, local APIC timers
|
|
|
|
*
|
|
|
|
* (c) 1999, 2000 Ingo Molnar <mingo@redhat.com>
|
|
|
|
*
|
|
|
|
* Fixes
|
|
|
|
* Maciej W. Rozycki : Bits for genuine 82489DX APICs;
|
|
|
|
* thanks to Eric Gilmore
|
|
|
|
* and Rolf G. Tews
|
|
|
|
* for testing these extensively.
|
|
|
|
* Maciej W. Rozycki : Various updates and fixes.
|
|
|
|
* Mikael Pettersson : Power Management for UP-APIC.
|
|
|
|
* Pavel Machek and
|
|
|
|
* Mikael Pettersson : PM converted to driver model.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/init.h>
|
|
|
|
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/bootmem.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/mc146818rtc.h>
|
|
|
|
#include <linux/kernel_stat.h>
|
|
|
|
#include <linux/sysdev.h>
|
2006-12-07 01:14:01 +00:00
|
|
|
#include <linux/ioport.h>
|
2008-08-24 09:01:52 +00:00
|
|
|
#include <linux/cpu.h>
|
2007-10-12 21:04:07 +00:00
|
|
|
#include <linux/clockchips.h>
|
2008-01-30 12:30:18 +00:00
|
|
|
#include <linux/acpi_pmtmr.h>
|
2008-01-30 12:32:35 +00:00
|
|
|
#include <linux/module.h>
|
2008-08-24 09:01:52 +00:00
|
|
|
#include <linux/dmi.h>
|
2008-07-10 18:16:58 +00:00
|
|
|
#include <linux/dmar.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
#include <asm/atomic.h>
|
|
|
|
#include <asm/smp.h>
|
|
|
|
#include <asm/mtrr.h>
|
|
|
|
#include <asm/mpspec.h>
|
2008-08-20 03:50:36 +00:00
|
|
|
#include <asm/desc.h>
|
2008-08-24 09:01:52 +00:00
|
|
|
#include <asm/arch_hooks.h>
|
2008-01-30 12:32:35 +00:00
|
|
|
#include <asm/hpet.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <asm/pgalloc.h>
|
2008-08-24 09:01:52 +00:00
|
|
|
#include <asm/i8253.h>
|
2005-05-17 04:53:34 +00:00
|
|
|
#include <asm/nmi.h>
|
2006-01-11 21:44:36 +00:00
|
|
|
#include <asm/idle.h>
|
2006-02-03 20:50:50 +00:00
|
|
|
#include <asm/proto.h>
|
|
|
|
#include <asm/timex.h>
|
2006-09-26 08:52:32 +00:00
|
|
|
#include <asm/apic.h>
|
2008-07-10 18:16:58 +00:00
|
|
|
#include <asm/i8259.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-03-25 21:10:46 +00:00
|
|
|
#include <mach_apic.h>
|
2008-08-24 09:01:52 +00:00
|
|
|
#include <mach_apicdef.h>
|
|
|
|
#include <mach_ipi.h>
|
2008-03-25 16:28:56 +00:00
|
|
|
|
2008-08-24 09:01:42 +00:00
|
|
|
/*
|
|
|
|
* Sanity check
|
|
|
|
*/
|
|
|
|
#if ((SPURIOUS_APIC_VECTOR & 0x0F) != 0x0F)
|
|
|
|
# error SPURIOUS_APIC_VECTOR definition error
|
|
|
|
#endif
|
|
|
|
|
2008-08-24 09:01:46 +00:00
|
|
|
#ifdef CONFIG_X86_32
|
|
|
|
/*
|
|
|
|
* Knob to control our willingness to enable the local APIC.
|
|
|
|
*
|
|
|
|
* +1=force-enable
|
|
|
|
*/
|
|
|
|
static int force_enable_local_apic;
|
|
|
|
/*
|
|
|
|
* APIC command line parameters
|
|
|
|
*/
|
|
|
|
static int __init parse_lapic(char *arg)
|
|
|
|
{
|
|
|
|
force_enable_local_apic = 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
early_param("lapic", parse_lapic);
|
2008-08-24 09:01:49 +00:00
|
|
|
/* Local APIC was disabled by the BIOS and enabled by the kernel */
|
|
|
|
static int enabled_via_apicbase;
|
|
|
|
|
2008-08-24 09:01:46 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef CONFIG_X86_64
|
2007-10-12 21:04:23 +00:00
|
|
|
static int apic_calibrate_pmtmr __initdata;
|
2008-08-24 09:01:46 +00:00
|
|
|
static __init int setup_apicpmtimer(char *s)
|
|
|
|
{
|
|
|
|
apic_calibrate_pmtmr = 1;
|
|
|
|
notsc_setup(NULL);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
__setup("apicpmtimer", setup_apicpmtimer);
|
|
|
|
#endif
|
|
|
|
|
2008-08-24 09:01:47 +00:00
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
#define HAVE_X2APIC
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef HAVE_X2APIC
|
x64, x2apic/intr-remap: IO-APIC support for interrupt-remapping
IO-APIC support in the presence of interrupt-remapping infrastructure.
IO-APIC RTE will be programmed with interrupt-remapping table entry(IRTE)
index and the IRTE will contain information about the vector, cpu destination,
trigger mode etc, which traditionally was present in the IO-APIC RTE.
Introduce a new irq_chip for cleaner irq migration (in the process
context as opposed to the current irq migration in the context of an interrupt.
interrupt-remapping infrastructure will help us achieve this cleanly).
For edge triggered, irq migration is a simple atomic update(of vector
and cpu destination) of IRTE and flush the hardware cache.
For level triggered, we need to modify the io-apic RTE aswell with the update
vector information, along with modifying IRTE with vector and cpu destination.
So irq migration for level triggered is little bit more complex compared to
edge triggered migration. But the good news is, we use the same algorithm
for level triggered migration as we have today, only difference being,
we now initiate the irq migration from process context instead of the
interrupt context.
In future, when we do a directed EOI (combined with cpu EOI broadcast
suppression) to the IO-APIC, level triggered irq migration will also be
as simple as edge triggered migration and we can do the irq migration
with a simple atomic update to IO-APIC RTE.
TBD: some tests/changes needed in the presence of fixup_irqs() for
level triggered irq migration.
Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
Cc: akpm@linux-foundation.org
Cc: arjan@linux.intel.com
Cc: andi@firstfloor.org
Cc: ebiederm@xmission.com
Cc: jbarnes@virtuousgeek.org
Cc: steiner@sgi.com
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-07-10 18:16:56 +00:00
|
|
|
int x2apic;
|
2008-07-10 18:16:58 +00:00
|
|
|
/* x2apic enabled before OS handover */
|
|
|
|
int x2apic_preenabled;
|
2008-08-24 09:01:47 +00:00
|
|
|
int disable_x2apic;
|
|
|
|
static __init int setup_nox2apic(char *str)
|
|
|
|
{
|
|
|
|
disable_x2apic = 1;
|
|
|
|
setup_clear_cpu_cap(X86_FEATURE_X2APIC);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
early_param("nox2apic", setup_nox2apic);
|
|
|
|
#endif
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-08-24 09:01:46 +00:00
|
|
|
unsigned long mp_lapic_addr;
|
|
|
|
int disable_apic;
|
|
|
|
/* Disable local APIC timer from the kernel commandline or via dmi quirk */
|
|
|
|
static int disable_apic_timer __cpuinitdata;
|
2008-01-30 12:32:35 +00:00
|
|
|
/* Local APIC timer works in C2 */
|
2007-03-23 18:32:31 +00:00
|
|
|
int local_apic_timer_c2_ok;
|
|
|
|
EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok);
|
|
|
|
|
2008-08-20 03:50:36 +00:00
|
|
|
int first_system_vector = 0xfe;
|
|
|
|
|
|
|
|
char system_vectors[NR_VECTORS] = { [0 ... NR_VECTORS-1] = SYS_VECTOR_FREE};
|
|
|
|
|
2008-01-30 12:32:35 +00:00
|
|
|
/*
|
|
|
|
* Debug level, exported for io_apic.c
|
|
|
|
*/
|
2008-07-14 17:44:51 +00:00
|
|
|
unsigned int apic_verbosity;
|
2008-01-30 12:32:35 +00:00
|
|
|
|
2008-08-24 09:01:43 +00:00
|
|
|
int pic_mode;
|
|
|
|
|
2008-05-19 15:47:03 +00:00
|
|
|
/* Have we found an MP table */
|
|
|
|
int smp_found_config;
|
|
|
|
|
2006-12-07 01:14:01 +00:00
|
|
|
static struct resource lapic_resource = {
|
|
|
|
.name = "Local APIC",
|
|
|
|
.flags = IORESOURCE_MEM | IORESOURCE_BUSY,
|
|
|
|
};
|
|
|
|
|
2007-10-12 21:04:06 +00:00
|
|
|
static unsigned int calibration_result;
|
|
|
|
|
2007-10-12 21:04:07 +00:00
|
|
|
static int lapic_next_event(unsigned long delta,
|
|
|
|
struct clock_event_device *evt);
|
|
|
|
static void lapic_timer_setup(enum clock_event_mode mode,
|
|
|
|
struct clock_event_device *evt);
|
|
|
|
static void lapic_timer_broadcast(cpumask_t mask);
|
2008-01-30 12:30:20 +00:00
|
|
|
static void apic_pm_activate(void);
|
2007-10-12 21:04:07 +00:00
|
|
|
|
2008-08-16 19:21:53 +00:00
|
|
|
/*
|
|
|
|
* The local apic timer can be used for any function which is CPU local.
|
|
|
|
*/
|
2007-10-12 21:04:07 +00:00
|
|
|
static struct clock_event_device lapic_clockevent = {
|
|
|
|
.name = "lapic",
|
|
|
|
.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT
|
|
|
|
| CLOCK_EVT_FEAT_C3STOP | CLOCK_EVT_FEAT_DUMMY,
|
|
|
|
.shift = 32,
|
|
|
|
.set_mode = lapic_timer_setup,
|
|
|
|
.set_next_event = lapic_next_event,
|
|
|
|
.broadcast = lapic_timer_broadcast,
|
|
|
|
.rating = 100,
|
|
|
|
.irq = -1,
|
|
|
|
};
|
|
|
|
static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
|
|
|
|
|
2008-01-30 12:33:17 +00:00
|
|
|
static unsigned long apic_phys;
|
|
|
|
|
2008-01-30 12:30:20 +00:00
|
|
|
/*
|
|
|
|
* Get the LAPIC version
|
|
|
|
*/
|
|
|
|
static inline int lapic_get_version(void)
|
2007-10-12 21:04:07 +00:00
|
|
|
{
|
2008-01-30 12:30:20 +00:00
|
|
|
return GET_APIC_VERSION(apic_read(APIC_LVR));
|
2007-10-12 21:04:07 +00:00
|
|
|
}
|
|
|
|
|
2008-01-30 12:30:20 +00:00
|
|
|
/*
|
2008-08-16 19:21:54 +00:00
|
|
|
* Check, if the APIC is integrated or a separate chip
|
2008-01-30 12:30:20 +00:00
|
|
|
*/
|
|
|
|
static inline int lapic_is_integrated(void)
|
2007-10-12 21:04:07 +00:00
|
|
|
{
|
2008-08-16 19:21:54 +00:00
|
|
|
#ifdef CONFIG_X86_64
|
2008-01-30 12:30:20 +00:00
|
|
|
return 1;
|
2008-08-16 19:21:54 +00:00
|
|
|
#else
|
|
|
|
return APIC_INTEGRATED(lapic_get_version());
|
|
|
|
#endif
|
2007-10-12 21:04:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2008-01-30 12:30:20 +00:00
|
|
|
* Check, whether this is a modern or a first generation APIC
|
2007-10-12 21:04:07 +00:00
|
|
|
*/
|
2008-01-30 12:30:20 +00:00
|
|
|
static int modern_apic(void)
|
2007-10-12 21:04:07 +00:00
|
|
|
{
|
2008-01-30 12:30:20 +00:00
|
|
|
/* AMD systems use old APIC versions, so check the CPU */
|
|
|
|
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
|
|
|
|
boot_cpu_data.x86 >= 0xf)
|
|
|
|
return 1;
|
|
|
|
return lapic_get_version() >= 0x14;
|
2007-10-12 21:04:07 +00:00
|
|
|
}
|
|
|
|
|
2008-08-16 19:21:53 +00:00
|
|
|
/*
|
|
|
|
* Paravirt kernels also might be using these below ops. So we still
|
|
|
|
* use generic apic_read()/apic_write(), which might be pointing to different
|
|
|
|
* ops in PARAVIRT case.
|
|
|
|
*/
|
2008-07-10 18:16:49 +00:00
|
|
|
void xapic_wait_icr_idle(void)
|
[PATCH] x86-64: safe_apic_wait_icr_idle - x86_64
apic_wait_icr_idle looks like this:
static __inline__ void apic_wait_icr_idle(void)
{
while (apic_read(APIC_ICR) & APIC_ICR_BUSY)
cpu_relax();
}
The busy loop in this function would not be problematic if the
corresponding status bit in the ICR were always updated, but that does
not seem to be the case under certain crash scenarios. Kdump uses an IPI
to stop the other CPUs in the event of a crash, but when any of the
other CPUs are locked-up inside the NMI handler the CPU that sends the
IPI will end up looping forever in the ICR check, effectively
hard-locking the whole system.
Quoting from Intel's "MultiProcessor Specification" (Version 1.4), B-3:
"A local APIC unit indicates successful dispatch of an IPI by
resetting the Delivery Status bit in the Interrupt Command
Register (ICR). The operating system polls the delivery status
bit after sending an INIT or STARTUP IPI until the command has
been dispatched.
A period of 20 microseconds should be sufficient for IPI dispatch
to complete under normal operating conditions. If the IPI is not
successfully dispatched, the operating system can abort the
command. Alternatively, the operating system can retry the IPI by
writing the lower 32-bit double word of the ICR. This “time-out”
mechanism can be implemented through an external interrupt, if
interrupts are enabled on the processor, or through execution of
an instruction or time-stamp counter spin loop."
Intel's documentation suggests the implementation of a time-out
mechanism, which, by the way, is already being open-coded in some parts
of the kernel that tinker with ICR.
Create a apic_wait_icr_idle replacement that implements the time-out
mechanism and that can be used to solve the aforementioned problem.
AK: moved both functions out of line
AK: Added improved loop from Keith Owens
Signed-off-by: Fernando Luis Vazquez Cao <fernando@oss.ntt.co.jp>
Signed-off-by: Andi Kleen <ak@suse.de>
2007-05-02 17:27:17 +00:00
|
|
|
{
|
|
|
|
while (apic_read(APIC_ICR) & APIC_ICR_BUSY)
|
|
|
|
cpu_relax();
|
|
|
|
}
|
|
|
|
|
2008-07-10 18:16:49 +00:00
|
|
|
u32 safe_xapic_wait_icr_idle(void)
|
[PATCH] x86-64: safe_apic_wait_icr_idle - x86_64
apic_wait_icr_idle looks like this:
static __inline__ void apic_wait_icr_idle(void)
{
while (apic_read(APIC_ICR) & APIC_ICR_BUSY)
cpu_relax();
}
The busy loop in this function would not be problematic if the
corresponding status bit in the ICR were always updated, but that does
not seem to be the case under certain crash scenarios. Kdump uses an IPI
to stop the other CPUs in the event of a crash, but when any of the
other CPUs are locked-up inside the NMI handler the CPU that sends the
IPI will end up looping forever in the ICR check, effectively
hard-locking the whole system.
Quoting from Intel's "MultiProcessor Specification" (Version 1.4), B-3:
"A local APIC unit indicates successful dispatch of an IPI by
resetting the Delivery Status bit in the Interrupt Command
Register (ICR). The operating system polls the delivery status
bit after sending an INIT or STARTUP IPI until the command has
been dispatched.
A period of 20 microseconds should be sufficient for IPI dispatch
to complete under normal operating conditions. If the IPI is not
successfully dispatched, the operating system can abort the
command. Alternatively, the operating system can retry the IPI by
writing the lower 32-bit double word of the ICR. This “time-out”
mechanism can be implemented through an external interrupt, if
interrupts are enabled on the processor, or through execution of
an instruction or time-stamp counter spin loop."
Intel's documentation suggests the implementation of a time-out
mechanism, which, by the way, is already being open-coded in some parts
of the kernel that tinker with ICR.
Create a apic_wait_icr_idle replacement that implements the time-out
mechanism and that can be used to solve the aforementioned problem.
AK: moved both functions out of line
AK: Added improved loop from Keith Owens
Signed-off-by: Fernando Luis Vazquez Cao <fernando@oss.ntt.co.jp>
Signed-off-by: Andi Kleen <ak@suse.de>
2007-05-02 17:27:17 +00:00
|
|
|
{
|
2008-01-30 12:30:15 +00:00
|
|
|
u32 send_status;
|
[PATCH] x86-64: safe_apic_wait_icr_idle - x86_64
apic_wait_icr_idle looks like this:
static __inline__ void apic_wait_icr_idle(void)
{
while (apic_read(APIC_ICR) & APIC_ICR_BUSY)
cpu_relax();
}
The busy loop in this function would not be problematic if the
corresponding status bit in the ICR were always updated, but that does
not seem to be the case under certain crash scenarios. Kdump uses an IPI
to stop the other CPUs in the event of a crash, but when any of the
other CPUs are locked-up inside the NMI handler the CPU that sends the
IPI will end up looping forever in the ICR check, effectively
hard-locking the whole system.
Quoting from Intel's "MultiProcessor Specification" (Version 1.4), B-3:
"A local APIC unit indicates successful dispatch of an IPI by
resetting the Delivery Status bit in the Interrupt Command
Register (ICR). The operating system polls the delivery status
bit after sending an INIT or STARTUP IPI until the command has
been dispatched.
A period of 20 microseconds should be sufficient for IPI dispatch
to complete under normal operating conditions. If the IPI is not
successfully dispatched, the operating system can abort the
command. Alternatively, the operating system can retry the IPI by
writing the lower 32-bit double word of the ICR. This “time-out”
mechanism can be implemented through an external interrupt, if
interrupts are enabled on the processor, or through execution of
an instruction or time-stamp counter spin loop."
Intel's documentation suggests the implementation of a time-out
mechanism, which, by the way, is already being open-coded in some parts
of the kernel that tinker with ICR.
Create a apic_wait_icr_idle replacement that implements the time-out
mechanism and that can be used to solve the aforementioned problem.
AK: moved both functions out of line
AK: Added improved loop from Keith Owens
Signed-off-by: Fernando Luis Vazquez Cao <fernando@oss.ntt.co.jp>
Signed-off-by: Andi Kleen <ak@suse.de>
2007-05-02 17:27:17 +00:00
|
|
|
int timeout;
|
|
|
|
|
|
|
|
timeout = 0;
|
|
|
|
do {
|
|
|
|
send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
|
|
|
|
if (!send_status)
|
|
|
|
break;
|
|
|
|
udelay(100);
|
|
|
|
} while (timeout++ < 1000);
|
|
|
|
|
|
|
|
return send_status;
|
|
|
|
}
|
|
|
|
|
2008-07-10 18:16:49 +00:00
|
|
|
void xapic_icr_write(u32 low, u32 id)
|
|
|
|
{
|
2008-08-15 11:51:20 +00:00
|
|
|
apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(id));
|
2008-07-10 18:16:49 +00:00
|
|
|
apic_write(APIC_ICR, low);
|
|
|
|
}
|
|
|
|
|
|
|
|
u64 xapic_icr_read(void)
|
|
|
|
{
|
|
|
|
u32 icr1, icr2;
|
|
|
|
|
|
|
|
icr2 = apic_read(APIC_ICR2);
|
|
|
|
icr1 = apic_read(APIC_ICR);
|
|
|
|
|
2008-08-16 19:21:55 +00:00
|
|
|
return icr1 | ((u64)icr2 << 32);
|
2008-07-10 18:16:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct apic_ops xapic_ops = {
|
|
|
|
.read = native_apic_mem_read,
|
|
|
|
.write = native_apic_mem_write,
|
|
|
|
.icr_read = xapic_icr_read,
|
|
|
|
.icr_write = xapic_icr_write,
|
|
|
|
.wait_icr_idle = xapic_wait_icr_idle,
|
|
|
|
.safe_wait_icr_idle = safe_xapic_wait_icr_idle,
|
|
|
|
};
|
|
|
|
|
|
|
|
struct apic_ops __read_mostly *apic_ops = &xapic_ops;
|
|
|
|
EXPORT_SYMBOL_GPL(apic_ops);
|
|
|
|
|
2008-08-24 09:01:47 +00:00
|
|
|
#ifdef HAVE_X2APIC
|
2008-07-10 18:16:52 +00:00
|
|
|
static void x2apic_wait_icr_idle(void)
|
|
|
|
{
|
|
|
|
/* no need to wait for icr idle in x2apic */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
static u32 safe_x2apic_wait_icr_idle(void)
|
|
|
|
{
|
|
|
|
/* no need to wait for icr idle in x2apic */
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void x2apic_icr_write(u32 low, u32 id)
|
|
|
|
{
|
|
|
|
wrmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), ((__u64) id) << 32 | low);
|
|
|
|
}
|
|
|
|
|
|
|
|
u64 x2apic_icr_read(void)
|
|
|
|
{
|
|
|
|
unsigned long val;
|
|
|
|
|
|
|
|
rdmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), val);
|
|
|
|
return val;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct apic_ops x2apic_ops = {
|
|
|
|
.read = native_apic_msr_read,
|
|
|
|
.write = native_apic_msr_write,
|
|
|
|
.icr_read = x2apic_icr_read,
|
|
|
|
.icr_write = x2apic_icr_write,
|
|
|
|
.wait_icr_idle = x2apic_wait_icr_idle,
|
|
|
|
.safe_wait_icr_idle = safe_x2apic_wait_icr_idle,
|
|
|
|
};
|
2008-08-24 09:01:47 +00:00
|
|
|
#endif
|
2008-07-10 18:16:52 +00:00
|
|
|
|
2008-01-30 12:30:20 +00:00
|
|
|
/**
|
|
|
|
* enable_NMI_through_LVT0 - enable NMI through local vector table 0
|
|
|
|
*/
|
2008-01-30 12:31:24 +00:00
|
|
|
void __cpuinit enable_NMI_through_LVT0(void)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2006-01-11 21:46:51 +00:00
|
|
|
unsigned int v;
|
2007-07-21 15:10:17 +00:00
|
|
|
|
|
|
|
/* unmask and set to NMI */
|
|
|
|
v = APIC_DM_NMI;
|
2008-07-24 11:52:29 +00:00
|
|
|
|
|
|
|
/* Level triggered for 82489DX (32bit mode) */
|
|
|
|
if (!lapic_is_integrated())
|
|
|
|
v |= APIC_LVT_LEVEL_TRIGGER;
|
|
|
|
|
2006-01-11 21:46:51 +00:00
|
|
|
apic_write(APIC_LVT0, v);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2008-08-24 09:01:40 +00:00
|
|
|
#ifdef CONFIG_X86_32
|
|
|
|
/**
|
|
|
|
* get_physical_broadcast - Get number of physical broadcast IDs
|
|
|
|
*/
|
|
|
|
int get_physical_broadcast(void)
|
|
|
|
{
|
|
|
|
return modern_apic() ? 0xff : 0xf;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2008-01-30 12:30:20 +00:00
|
|
|
/**
|
|
|
|
* lapic_get_maxlvt - get the maximum number of local vector table entries
|
|
|
|
*/
|
2008-01-30 12:30:14 +00:00
|
|
|
int lapic_get_maxlvt(void)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2008-07-24 11:52:28 +00:00
|
|
|
unsigned int v;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
v = apic_read(APIC_LVR);
|
2008-07-24 11:52:28 +00:00
|
|
|
/*
|
|
|
|
* - we always have APIC integrated on 64bit mode
|
|
|
|
* - 82489DXs do not report # of LVT entries
|
|
|
|
*/
|
|
|
|
return APIC_INTEGRATED(GET_APIC_VERSION(v)) ? GET_APIC_MAXLVT(v) : 2;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2008-08-16 19:21:53 +00:00
|
|
|
/*
|
|
|
|
* Local APIC timer
|
|
|
|
*/
|
|
|
|
|
2008-08-18 16:45:55 +00:00
|
|
|
/* Clock divisor */
|
|
|
|
#ifdef CONFG_X86_64
|
2008-08-15 11:51:21 +00:00
|
|
|
#define APIC_DIVISOR 1
|
2008-08-18 16:45:55 +00:00
|
|
|
#else
|
|
|
|
#define APIC_DIVISOR 16
|
|
|
|
#endif
|
2008-08-15 11:51:21 +00:00
|
|
|
|
2008-01-30 12:30:20 +00:00
|
|
|
/*
|
|
|
|
* This function sets up the local APIC timer, with a timeout of
|
|
|
|
* 'clocks' APIC bus clock. During calibration we actually call
|
|
|
|
* this function twice on the boot CPU, once with a bogus timeout
|
|
|
|
* value, second time for real. The other (noncalibrating) CPUs
|
|
|
|
* call this function only once, with the real, calibrated value.
|
|
|
|
*
|
|
|
|
* We do reads before writes even if unnecessary, to get around the
|
|
|
|
* P5 APIC double write bug.
|
|
|
|
*/
|
|
|
|
static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2008-01-30 12:30:20 +00:00
|
|
|
unsigned int lvtt_value, tmp_value;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-01-30 12:30:20 +00:00
|
|
|
lvtt_value = LOCAL_TIMER_VECTOR;
|
|
|
|
if (!oneshot)
|
|
|
|
lvtt_value |= APIC_LVT_TIMER_PERIODIC;
|
2008-08-15 11:51:21 +00:00
|
|
|
if (!lapic_is_integrated())
|
|
|
|
lvtt_value |= SET_APIC_TIMER_BASE(APIC_TIMER_BASE_DIV);
|
|
|
|
|
2008-01-30 12:30:20 +00:00
|
|
|
if (!irqen)
|
|
|
|
lvtt_value |= APIC_LVT_MASKED;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-01-30 12:30:20 +00:00
|
|
|
apic_write(APIC_LVTT, lvtt_value);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
2008-01-30 12:30:20 +00:00
|
|
|
* Divide PICLK by 16
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2008-01-30 12:30:20 +00:00
|
|
|
tmp_value = apic_read(APIC_TDCR);
|
2008-08-18 16:45:55 +00:00
|
|
|
apic_write(APIC_TDCR,
|
|
|
|
(tmp_value & ~(APIC_TDR_DIV_1 | APIC_TDR_DIV_TMBASE)) |
|
|
|
|
APIC_TDR_DIV_16);
|
2008-01-30 12:30:20 +00:00
|
|
|
|
|
|
|
if (!oneshot)
|
2008-08-15 11:51:21 +00:00
|
|
|
apic_write(APIC_TMICT, clocks / APIC_DIVISOR);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2008-01-30 12:30:20 +00:00
|
|
|
/*
|
2008-01-30 12:30:40 +00:00
|
|
|
* Setup extended LVT, AMD specific (K8, family 10h)
|
|
|
|
*
|
|
|
|
* Vector mappings are hard coded. On K8 only offset 0 (APIC500) and
|
|
|
|
* MCE interrupts are supported. Thus MCE offset must be set to 0.
|
2008-07-22 19:08:46 +00:00
|
|
|
*
|
|
|
|
* If mask=1, the LVT entry does not generate interrupts while mask=0
|
|
|
|
* enables the vector. See also the BKDGs.
|
2008-01-30 12:30:20 +00:00
|
|
|
*/
|
2008-01-30 12:30:40 +00:00
|
|
|
|
|
|
|
#define APIC_EILVT_LVTOFF_MCE 0
|
|
|
|
#define APIC_EILVT_LVTOFF_IBS 1
|
|
|
|
|
|
|
|
static void setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2008-01-30 12:30:40 +00:00
|
|
|
unsigned long reg = (lvt_off << 4) + APIC_EILVT0;
|
2008-01-30 12:30:20 +00:00
|
|
|
unsigned int v = (mask << 16) | (msg_type << 8) | vector;
|
2006-09-26 08:52:30 +00:00
|
|
|
|
2008-01-30 12:30:20 +00:00
|
|
|
apic_write(reg, v);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2008-01-30 12:30:40 +00:00
|
|
|
u8 setup_APIC_eilvt_mce(u8 vector, u8 msg_type, u8 mask)
|
|
|
|
{
|
|
|
|
setup_APIC_eilvt(APIC_EILVT_LVTOFF_MCE, vector, msg_type, mask);
|
|
|
|
return APIC_EILVT_LVTOFF_MCE;
|
|
|
|
}
|
|
|
|
|
|
|
|
u8 setup_APIC_eilvt_ibs(u8 vector, u8 msg_type, u8 mask)
|
|
|
|
{
|
|
|
|
setup_APIC_eilvt(APIC_EILVT_LVTOFF_IBS, vector, msg_type, mask);
|
|
|
|
return APIC_EILVT_LVTOFF_IBS;
|
|
|
|
}
|
2008-07-23 13:28:14 +00:00
|
|
|
EXPORT_SYMBOL_GPL(setup_APIC_eilvt_ibs);
|
2008-01-30 12:30:40 +00:00
|
|
|
|
2008-01-30 12:30:20 +00:00
|
|
|
/*
|
|
|
|
* Program the next event, relative to now
|
|
|
|
*/
|
|
|
|
static int lapic_next_event(unsigned long delta,
|
|
|
|
struct clock_event_device *evt)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2008-01-30 12:30:20 +00:00
|
|
|
apic_write(APIC_TMICT, delta);
|
|
|
|
return 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2008-01-30 12:30:20 +00:00
|
|
|
/*
|
|
|
|
* Setup the lapic timer in periodic or oneshot mode
|
|
|
|
*/
|
|
|
|
static void lapic_timer_setup(enum clock_event_mode mode,
|
|
|
|
struct clock_event_device *evt)
|
2007-10-20 01:21:11 +00:00
|
|
|
{
|
|
|
|
unsigned long flags;
|
2008-01-30 12:30:20 +00:00
|
|
|
unsigned int v;
|
2007-10-20 01:21:11 +00:00
|
|
|
|
2008-01-30 12:30:20 +00:00
|
|
|
/* Lapic used as dummy for broadcast ? */
|
|
|
|
if (evt->features & CLOCK_EVT_FEAT_DUMMY)
|
2007-10-20 01:21:11 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
|
|
2008-01-30 12:30:20 +00:00
|
|
|
switch (mode) {
|
|
|
|
case CLOCK_EVT_MODE_PERIODIC:
|
|
|
|
case CLOCK_EVT_MODE_ONESHOT:
|
|
|
|
__setup_APIC_LVTT(calibration_result,
|
|
|
|
mode != CLOCK_EVT_MODE_PERIODIC, 1);
|
|
|
|
break;
|
|
|
|
case CLOCK_EVT_MODE_UNUSED:
|
|
|
|
case CLOCK_EVT_MODE_SHUTDOWN:
|
|
|
|
v = apic_read(APIC_LVTT);
|
|
|
|
v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
|
|
|
|
apic_write(APIC_LVTT, v);
|
|
|
|
break;
|
|
|
|
case CLOCK_EVT_MODE_RESUME:
|
|
|
|
/* Nothing to do here */
|
|
|
|
break;
|
|
|
|
}
|
2007-10-20 01:21:11 +00:00
|
|
|
|
|
|
|
local_irq_restore(flags);
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
2008-01-30 12:30:20 +00:00
|
|
|
* Local APIC timer broadcast function
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2008-01-30 12:30:20 +00:00
|
|
|
static void lapic_timer_broadcast(cpumask_t mask)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2008-01-30 12:30:20 +00:00
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
|
|
|
|
#endif
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-01-30 12:30:20 +00:00
|
|
|
/*
|
|
|
|
* Setup the local APIC timer for this CPU. Copy the initilized values
|
|
|
|
* of the boot CPU and register the clock event in the framework.
|
|
|
|
*/
|
2008-08-24 09:01:39 +00:00
|
|
|
static void __cpuinit setup_APIC_timer(void)
|
2008-01-30 12:30:20 +00:00
|
|
|
{
|
|
|
|
struct clock_event_device *levt = &__get_cpu_var(lapic_events);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-01-30 12:30:20 +00:00
|
|
|
memcpy(levt, &lapic_clockevent, sizeof(*levt));
|
|
|
|
levt->cpumask = cpumask_of_cpu(smp_processor_id());
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-01-30 12:30:20 +00:00
|
|
|
clockevents_register_device(levt);
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-01-30 12:30:20 +00:00
|
|
|
/*
|
|
|
|
* In this function we calibrate APIC bus clocks to the external
|
|
|
|
* timer. Unfortunately we cannot use jiffies and the timer irq
|
|
|
|
* to calibrate, since some later bootup code depends on getting
|
|
|
|
* the first irq? Ugh.
|
|
|
|
*
|
|
|
|
* We want to do the calibration only once since we
|
|
|
|
* want to have local timer irqs syncron. CPUs connected
|
|
|
|
* by the same APIC bus have the very same bus frequency.
|
|
|
|
* And we want to have irqs off anyways, no accidental
|
|
|
|
* APIC irq that way.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define TICK_COUNT 100000000
|
|
|
|
|
2008-07-15 17:02:54 +00:00
|
|
|
static int __init calibrate_APIC_clock(void)
|
2008-01-30 12:30:20 +00:00
|
|
|
{
|
|
|
|
unsigned apic, apic_start;
|
|
|
|
unsigned long tsc, tsc_start;
|
|
|
|
int result;
|
|
|
|
|
|
|
|
local_irq_disable();
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Put whatever arbitrary (but long enough) timeout
|
|
|
|
* value into the APIC clock, we just want to get the
|
|
|
|
* counter running for calibration.
|
|
|
|
*
|
|
|
|
* No interrupt enable !
|
|
|
|
*/
|
|
|
|
__setup_APIC_LVTT(250000000, 0, 0);
|
|
|
|
|
|
|
|
apic_start = apic_read(APIC_TMCCT);
|
|
|
|
#ifdef CONFIG_X86_PM_TIMER
|
|
|
|
if (apic_calibrate_pmtmr && pmtmr_ioport) {
|
|
|
|
pmtimer_wait(5000); /* 5ms wait */
|
|
|
|
apic = apic_read(APIC_TMCCT);
|
|
|
|
result = (apic_start - apic) * 1000L / 5;
|
|
|
|
} else
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
rdtscll(tsc_start);
|
|
|
|
|
|
|
|
do {
|
|
|
|
apic = apic_read(APIC_TMCCT);
|
|
|
|
rdtscll(tsc);
|
|
|
|
} while ((tsc - tsc_start) < TICK_COUNT &&
|
|
|
|
(apic_start - apic) < TICK_COUNT);
|
|
|
|
|
|
|
|
result = (apic_start - apic) * 1000L * tsc_khz /
|
|
|
|
(tsc - tsc_start);
|
|
|
|
}
|
|
|
|
|
|
|
|
local_irq_enable();
|
|
|
|
|
|
|
|
printk(KERN_DEBUG "APIC timer calibration result %d\n", result);
|
|
|
|
|
|
|
|
printk(KERN_INFO "Detected %d.%03d MHz APIC timer.\n",
|
|
|
|
result / 1000 / 1000, result / 1000 % 1000);
|
|
|
|
|
|
|
|
/* Calculate the scaled math multiplication factor */
|
2008-04-19 14:55:16 +00:00
|
|
|
lapic_clockevent.mult = div_sc(result, NSEC_PER_SEC,
|
|
|
|
lapic_clockevent.shift);
|
2008-01-30 12:30:20 +00:00
|
|
|
lapic_clockevent.max_delta_ns =
|
|
|
|
clockevent_delta2ns(0x7FFFFF, &lapic_clockevent);
|
|
|
|
lapic_clockevent.min_delta_ns =
|
|
|
|
clockevent_delta2ns(0xF, &lapic_clockevent);
|
|
|
|
|
2008-08-15 11:51:21 +00:00
|
|
|
calibration_result = (result * APIC_DIVISOR) / HZ;
|
2008-07-15 17:02:54 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Do a sanity check on the APIC calibration result
|
|
|
|
*/
|
|
|
|
if (calibration_result < (1000000 / HZ)) {
|
|
|
|
printk(KERN_WARNING
|
|
|
|
"APIC frequency too slow, disabling apic timer\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2008-01-30 12:30:20 +00:00
|
|
|
}
|
|
|
|
|
2008-01-30 12:32:35 +00:00
|
|
|
/*
|
|
|
|
* Setup the boot APIC
|
|
|
|
*
|
|
|
|
* Calibrate and verify the result.
|
|
|
|
*/
|
2008-01-30 12:30:20 +00:00
|
|
|
void __init setup_boot_APIC_clock(void)
|
|
|
|
{
|
|
|
|
/*
|
2008-08-16 19:21:53 +00:00
|
|
|
* The local apic timer can be disabled via the kernel
|
|
|
|
* commandline or from the CPU detection code. Register the lapic
|
|
|
|
* timer as a dummy clock event source on SMP systems, so the
|
|
|
|
* broadcast mechanism is used. On UP systems simply ignore it.
|
2008-01-30 12:30:20 +00:00
|
|
|
*/
|
|
|
|
if (disable_apic_timer) {
|
|
|
|
printk(KERN_INFO "Disabling APIC timer\n");
|
|
|
|
/* No broadcast on UP ! */
|
2008-01-30 12:33:04 +00:00
|
|
|
if (num_possible_cpus() > 1) {
|
|
|
|
lapic_clockevent.mult = 1;
|
2008-01-30 12:30:20 +00:00
|
|
|
setup_APIC_timer();
|
2008-01-30 12:33:04 +00:00
|
|
|
}
|
2008-01-30 12:30:20 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2008-08-16 19:21:53 +00:00
|
|
|
apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n"
|
|
|
|
"calibrating APIC timer ...\n");
|
|
|
|
|
2008-07-15 17:02:54 +00:00
|
|
|
if (calibrate_APIC_clock()) {
|
2008-01-30 12:33:04 +00:00
|
|
|
/* No broadcast on UP ! */
|
|
|
|
if (num_possible_cpus() > 1)
|
|
|
|
setup_APIC_timer();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2008-01-30 12:30:20 +00:00
|
|
|
/*
|
|
|
|
* If nmi_watchdog is set to IO_APIC, we need the
|
|
|
|
* PIT/HPET going. Otherwise register lapic as a dummy
|
|
|
|
* device.
|
|
|
|
*/
|
|
|
|
if (nmi_watchdog != NMI_IO_APIC)
|
|
|
|
lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY;
|
|
|
|
else
|
|
|
|
printk(KERN_WARNING "APIC timer registered as dummy,"
|
2008-06-24 20:52:04 +00:00
|
|
|
" due to nmi_watchdog=%d!\n", nmi_watchdog);
|
2008-01-30 12:30:20 +00:00
|
|
|
|
2008-08-16 19:21:53 +00:00
|
|
|
/* Setup the lapic or request the broadcast */
|
2008-01-30 12:30:20 +00:00
|
|
|
setup_APIC_timer();
|
|
|
|
}
|
|
|
|
|
|
|
|
void __cpuinit setup_secondary_APIC_clock(void)
|
|
|
|
{
|
|
|
|
setup_APIC_timer();
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The guts of the apic timer interrupt
|
|
|
|
*/
|
|
|
|
static void local_apic_timer_interrupt(void)
|
|
|
|
{
|
|
|
|
int cpu = smp_processor_id();
|
|
|
|
struct clock_event_device *evt = &per_cpu(lapic_events, cpu);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Normally we should not be here till LAPIC has been initialized but
|
|
|
|
* in some cases like kdump, its possible that there is a pending LAPIC
|
|
|
|
* timer interrupt from previous kernel's context and is delivered in
|
|
|
|
* new kernel the moment interrupts are enabled.
|
|
|
|
*
|
|
|
|
* Interrupts are enabled early and LAPIC is setup much later, hence
|
|
|
|
* its possible that when we get here evt->event_handler is NULL.
|
|
|
|
* Check for event_handler being NULL and discard the interrupt as
|
|
|
|
* spurious.
|
|
|
|
*/
|
|
|
|
if (!evt->event_handler) {
|
|
|
|
printk(KERN_WARNING
|
|
|
|
"Spurious LAPIC timer interrupt on cpu %d\n", cpu);
|
|
|
|
/* Switch it off */
|
|
|
|
lapic_timer_setup(CLOCK_EVT_MODE_SHUTDOWN, evt);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* the NMI deadlock-detector uses this.
|
|
|
|
*/
|
2008-08-18 16:45:59 +00:00
|
|
|
#ifdef CONFIG_X86_64
|
2008-01-30 12:30:20 +00:00
|
|
|
add_pda(apic_timer_irqs, 1);
|
2008-08-18 16:45:59 +00:00
|
|
|
#else
|
|
|
|
per_cpu(irq_stat, cpu).apic_timer_irqs++;
|
|
|
|
#endif
|
2008-01-30 12:30:20 +00:00
|
|
|
|
|
|
|
evt->event_handler(evt);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Local APIC timer interrupt. This is the most natural way for doing
|
|
|
|
* local interrupts, but local timer interrupts can be emulated by
|
|
|
|
* broadcast interrupts too. [in case the hw doesn't support APIC timers]
|
|
|
|
*
|
|
|
|
* [ if a single-CPU system runs an SMP kernel then we call the local
|
|
|
|
* interrupt as well. Thus we cannot inline the local irq ... ]
|
|
|
|
*/
|
|
|
|
void smp_apic_timer_interrupt(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
struct pt_regs *old_regs = set_irq_regs(regs);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* NOTE! We'd better ACK the irq immediately,
|
|
|
|
* because timer handling can be slow.
|
|
|
|
*/
|
|
|
|
ack_APIC_irq();
|
|
|
|
/*
|
|
|
|
* update_process_times() expects us to have done irq_enter().
|
|
|
|
* Besides, if we don't timer interrupts ignore the global
|
|
|
|
* interrupt lock, which is the WrongThing (tm) to do.
|
|
|
|
*/
|
2008-08-24 09:01:45 +00:00
|
|
|
#ifdef CONFIG_X86_64
|
2008-01-30 12:30:20 +00:00
|
|
|
exit_idle();
|
2008-08-24 09:01:45 +00:00
|
|
|
#endif
|
2008-01-30 12:30:20 +00:00
|
|
|
irq_enter();
|
|
|
|
local_apic_timer_interrupt();
|
|
|
|
irq_exit();
|
2008-08-16 19:21:53 +00:00
|
|
|
|
2008-01-30 12:30:20 +00:00
|
|
|
set_irq_regs(old_regs);
|
|
|
|
}
|
|
|
|
|
|
|
|
int setup_profiling_timer(unsigned int multiplier)
|
|
|
|
{
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Local APIC start and shutdown
|
|
|
|
*/
|
|
|
|
|
|
|
|
/**
|
|
|
|
* clear_local_APIC - shutdown the local APIC
|
|
|
|
*
|
|
|
|
* This is called, when a CPU is disabled and before rebooting, so the state of
|
|
|
|
* the local APIC has no dangling leftovers. Also used to cleanout any BIOS
|
|
|
|
* leftovers during boot.
|
|
|
|
*/
|
|
|
|
void clear_local_APIC(void)
|
|
|
|
{
|
2008-05-20 22:18:12 +00:00
|
|
|
int maxlvt;
|
2008-01-30 12:30:20 +00:00
|
|
|
u32 v;
|
|
|
|
|
2008-01-30 12:33:17 +00:00
|
|
|
/* APIC hasn't been mapped yet */
|
|
|
|
if (!apic_phys)
|
|
|
|
return;
|
|
|
|
|
|
|
|
maxlvt = lapic_get_maxlvt();
|
2008-01-30 12:30:20 +00:00
|
|
|
/*
|
|
|
|
* Masking an LVT entry can trigger a local APIC error
|
|
|
|
* if the vector is zero. Mask LVTERR first to prevent this.
|
|
|
|
*/
|
|
|
|
if (maxlvt >= 3) {
|
|
|
|
v = ERROR_APIC_VECTOR; /* any non-zero vector will do */
|
|
|
|
apic_write(APIC_LVTERR, v | APIC_LVT_MASKED);
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Careful: we have to set masks only first to deassert
|
|
|
|
* any level-triggered sources.
|
|
|
|
*/
|
|
|
|
v = apic_read(APIC_LVTT);
|
|
|
|
apic_write(APIC_LVTT, v | APIC_LVT_MASKED);
|
|
|
|
v = apic_read(APIC_LVT0);
|
|
|
|
apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
|
|
|
|
v = apic_read(APIC_LVT1);
|
|
|
|
apic_write(APIC_LVT1, v | APIC_LVT_MASKED);
|
|
|
|
if (maxlvt >= 4) {
|
|
|
|
v = apic_read(APIC_LVTPC);
|
|
|
|
apic_write(APIC_LVTPC, v | APIC_LVT_MASKED);
|
|
|
|
}
|
|
|
|
|
2008-08-16 19:21:50 +00:00
|
|
|
/* lets not touch this if we didn't frob it */
|
|
|
|
#if defined(CONFIG_X86_MCE_P4THERMAL) || defined(X86_MCE_INTEL)
|
|
|
|
if (maxlvt >= 5) {
|
|
|
|
v = apic_read(APIC_LVTTHMR);
|
|
|
|
apic_write(APIC_LVTTHMR, v | APIC_LVT_MASKED);
|
|
|
|
}
|
|
|
|
#endif
|
2008-01-30 12:30:20 +00:00
|
|
|
/*
|
|
|
|
* Clean APIC state for other OSs:
|
|
|
|
*/
|
|
|
|
apic_write(APIC_LVTT, APIC_LVT_MASKED);
|
|
|
|
apic_write(APIC_LVT0, APIC_LVT_MASKED);
|
|
|
|
apic_write(APIC_LVT1, APIC_LVT_MASKED);
|
|
|
|
if (maxlvt >= 3)
|
|
|
|
apic_write(APIC_LVTERR, APIC_LVT_MASKED);
|
|
|
|
if (maxlvt >= 4)
|
|
|
|
apic_write(APIC_LVTPC, APIC_LVT_MASKED);
|
2008-08-16 19:21:50 +00:00
|
|
|
|
|
|
|
/* Integrated APIC (!82489DX) ? */
|
|
|
|
if (lapic_is_integrated()) {
|
|
|
|
if (maxlvt > 3)
|
|
|
|
/* Clear ESR due to Pentium errata 3AP and 11AP */
|
|
|
|
apic_write(APIC_ESR, 0);
|
|
|
|
apic_read(APIC_ESR);
|
|
|
|
}
|
2008-01-30 12:30:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* disable_local_APIC - clear and disable the local APIC
|
|
|
|
*/
|
|
|
|
void disable_local_APIC(void)
|
|
|
|
{
|
|
|
|
unsigned int value;
|
|
|
|
|
|
|
|
clear_local_APIC();
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Disable APIC (implies clearing of registers
|
|
|
|
* for 82489DX!).
|
|
|
|
*/
|
|
|
|
value = apic_read(APIC_SPIV);
|
|
|
|
value &= ~APIC_SPIV_APIC_ENABLED;
|
|
|
|
apic_write(APIC_SPIV, value);
|
2008-08-18 16:45:51 +00:00
|
|
|
|
|
|
|
#ifdef CONFIG_X86_32
|
|
|
|
/*
|
|
|
|
* When LAPIC was disabled by the BIOS and enabled by the kernel,
|
|
|
|
* restore the disabled state.
|
|
|
|
*/
|
|
|
|
if (enabled_via_apicbase) {
|
|
|
|
unsigned int l, h;
|
|
|
|
|
|
|
|
rdmsr(MSR_IA32_APICBASE, l, h);
|
|
|
|
l &= ~MSR_IA32_APICBASE_ENABLE;
|
|
|
|
wrmsr(MSR_IA32_APICBASE, l, h);
|
|
|
|
}
|
|
|
|
#endif
|
2008-01-30 12:30:20 +00:00
|
|
|
}
|
|
|
|
|
2008-08-18 16:45:52 +00:00
|
|
|
/*
|
|
|
|
* If Linux enabled the LAPIC against the BIOS default disable it down before
|
|
|
|
* re-entering the BIOS on shutdown. Otherwise the BIOS may get confused and
|
|
|
|
* not power-off. Additionally clear all LVT entries before disable_local_APIC
|
|
|
|
* for the case where Linux didn't enable the LAPIC.
|
|
|
|
*/
|
2008-01-30 12:30:20 +00:00
|
|
|
void lapic_shutdown(void)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
if (!cpu_has_apic)
|
|
|
|
return;
|
|
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
|
|
2008-08-18 16:45:52 +00:00
|
|
|
#ifdef CONFIG_X86_32
|
|
|
|
if (!enabled_via_apicbase)
|
|
|
|
clear_local_APIC();
|
|
|
|
else
|
|
|
|
#endif
|
|
|
|
disable_local_APIC();
|
|
|
|
|
2008-01-30 12:30:20 +00:00
|
|
|
|
|
|
|
local_irq_restore(flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is to verify that we're looking at a real local APIC.
|
|
|
|
* Check these against your board if the CPUs aren't getting
|
|
|
|
* started for no apparent reason.
|
|
|
|
*/
|
|
|
|
int __init verify_local_APIC(void)
|
|
|
|
{
|
|
|
|
unsigned int reg0, reg1;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The version register is read-only in a real APIC.
|
|
|
|
*/
|
|
|
|
reg0 = apic_read(APIC_LVR);
|
|
|
|
apic_printk(APIC_DEBUG, "Getting VERSION: %x\n", reg0);
|
|
|
|
apic_write(APIC_LVR, reg0 ^ APIC_LVR_MASK);
|
|
|
|
reg1 = apic_read(APIC_LVR);
|
|
|
|
apic_printk(APIC_DEBUG, "Getting VERSION: %x\n", reg1);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The two version reads above should print the same
|
|
|
|
* numbers. If the second one is different, then we
|
|
|
|
* poke at a non-APIC.
|
|
|
|
*/
|
|
|
|
if (reg1 != reg0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check if the version looks reasonably.
|
|
|
|
*/
|
|
|
|
reg1 = GET_APIC_VERSION(reg0);
|
|
|
|
if (reg1 == 0x00 || reg1 == 0xff)
|
|
|
|
return 0;
|
|
|
|
reg1 = lapic_get_maxlvt();
|
|
|
|
if (reg1 < 0x02 || reg1 == 0xff)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The ID register is read/write in a real APIC.
|
|
|
|
*/
|
2008-07-11 21:24:19 +00:00
|
|
|
reg0 = apic_read(APIC_ID);
|
2008-01-30 12:30:20 +00:00
|
|
|
apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg0);
|
|
|
|
apic_write(APIC_ID, reg0 ^ APIC_ID_MASK);
|
2008-07-11 21:24:19 +00:00
|
|
|
reg1 = apic_read(APIC_ID);
|
2008-01-30 12:30:20 +00:00
|
|
|
apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg1);
|
|
|
|
apic_write(APIC_ID, reg0);
|
|
|
|
if (reg1 != (reg0 ^ APIC_ID_MASK))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/*
|
2005-04-16 22:20:36 +00:00
|
|
|
* The next two are just to see if we have sane values.
|
|
|
|
* They're only really relevant if we're in Virtual Wire
|
|
|
|
* compatibility mode, but most boxes are anymore.
|
|
|
|
*/
|
|
|
|
reg0 = apic_read(APIC_LVT0);
|
2008-01-30 12:30:20 +00:00
|
|
|
apic_printk(APIC_DEBUG, "Getting LVT0: %x\n", reg0);
|
2005-04-16 22:20:36 +00:00
|
|
|
reg1 = apic_read(APIC_LVT1);
|
|
|
|
apic_printk(APIC_DEBUG, "Getting LVT1: %x\n", reg1);
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2008-01-30 12:30:20 +00:00
|
|
|
/**
|
|
|
|
* sync_Arb_IDs - synchronize APIC bus arbitration IDs
|
|
|
|
*/
|
2005-04-16 22:20:36 +00:00
|
|
|
void __init sync_Arb_IDs(void)
|
|
|
|
{
|
2008-08-15 11:51:23 +00:00
|
|
|
/*
|
|
|
|
* Unsupported on P4 - see Intel Dev. Manual Vol. 3, Ch. 8.6.1 And not
|
|
|
|
* needed on AMD.
|
|
|
|
*/
|
|
|
|
if (modern_apic() || boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
|
2005-04-16 22:20:36 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Wait for idle.
|
|
|
|
*/
|
|
|
|
apic_wait_icr_idle();
|
|
|
|
|
|
|
|
apic_printk(APIC_DEBUG, "Synchronizing Arb IDs.\n");
|
2008-08-15 19:05:19 +00:00
|
|
|
apic_write(APIC_ICR, APIC_DEST_ALLINC |
|
|
|
|
APIC_INT_LEVELTRIG | APIC_DM_INIT);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* An initial setup of the virtual wire mode.
|
|
|
|
*/
|
|
|
|
void __init init_bsp_APIC(void)
|
|
|
|
{
|
2006-01-11 21:46:51 +00:00
|
|
|
unsigned int value;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Don't do the setup now if we have a SMP BIOS as the
|
|
|
|
* through-I/O-APIC virtual wire mode might be active.
|
|
|
|
*/
|
|
|
|
if (smp_found_config || !cpu_has_apic)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Do not trust the local APIC being empty at bootup.
|
|
|
|
*/
|
|
|
|
clear_local_APIC();
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Enable APIC.
|
|
|
|
*/
|
|
|
|
value = apic_read(APIC_SPIV);
|
|
|
|
value &= ~APIC_VECTOR_MASK;
|
|
|
|
value |= APIC_SPIV_APIC_ENABLED;
|
2008-08-15 19:05:18 +00:00
|
|
|
|
|
|
|
#ifdef CONFIG_X86_32
|
|
|
|
/* This bit is reserved on P4/Xeon and should be cleared */
|
|
|
|
if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
|
|
|
|
(boot_cpu_data.x86 == 15))
|
|
|
|
value &= ~APIC_SPIV_FOCUS_DISABLED;
|
|
|
|
else
|
|
|
|
#endif
|
|
|
|
value |= APIC_SPIV_FOCUS_DISABLED;
|
2005-04-16 22:20:36 +00:00
|
|
|
value |= SPURIOUS_APIC_VECTOR;
|
2006-01-11 21:46:51 +00:00
|
|
|
apic_write(APIC_SPIV, value);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Set up the virtual wire mode.
|
|
|
|
*/
|
2006-01-11 21:46:51 +00:00
|
|
|
apic_write(APIC_LVT0, APIC_DM_EXTINT);
|
2005-04-16 22:20:36 +00:00
|
|
|
value = APIC_DM_NMI;
|
2008-08-15 19:05:18 +00:00
|
|
|
if (!lapic_is_integrated()) /* 82489DX */
|
|
|
|
value |= APIC_LVT_LEVEL_TRIGGER;
|
2006-01-11 21:46:51 +00:00
|
|
|
apic_write(APIC_LVT1, value);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2008-08-18 16:45:54 +00:00
|
|
|
static void __cpuinit lapic_setup_esr(void)
|
|
|
|
{
|
|
|
|
unsigned long oldvalue, value, maxlvt;
|
|
|
|
if (lapic_is_integrated() && !esr_disable) {
|
|
|
|
if (esr_disable) {
|
|
|
|
/*
|
|
|
|
* Something untraceable is creating bad interrupts on
|
|
|
|
* secondary quads ... for the moment, just leave the
|
|
|
|
* ESR disabled - we can't do anything useful with the
|
|
|
|
* errors anyway - mbligh
|
|
|
|
*/
|
|
|
|
printk(KERN_INFO "Leaving ESR disabled.\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
/* !82489DX */
|
|
|
|
maxlvt = lapic_get_maxlvt();
|
|
|
|
if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
|
|
|
|
apic_write(APIC_ESR, 0);
|
|
|
|
oldvalue = apic_read(APIC_ESR);
|
|
|
|
|
|
|
|
/* enables sending errors */
|
|
|
|
value = ERROR_APIC_VECTOR;
|
|
|
|
apic_write(APIC_LVTERR, value);
|
|
|
|
/*
|
|
|
|
* spec says clear errors after enabling vector.
|
|
|
|
*/
|
|
|
|
if (maxlvt > 3)
|
|
|
|
apic_write(APIC_ESR, 0);
|
|
|
|
value = apic_read(APIC_ESR);
|
|
|
|
if (value != oldvalue)
|
|
|
|
apic_printk(APIC_VERBOSE, "ESR value before enabling "
|
|
|
|
"vector: 0x%08lx after: 0x%08lx\n",
|
|
|
|
oldvalue, value);
|
|
|
|
} else {
|
|
|
|
printk(KERN_INFO "No ESR for 82489DX.\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-01-30 12:30:20 +00:00
|
|
|
/**
|
|
|
|
* setup_local_APIC - setup the local APIC
|
|
|
|
*/
|
|
|
|
void __cpuinit setup_local_APIC(void)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2008-01-30 12:30:40 +00:00
|
|
|
unsigned int value;
|
2006-03-25 15:31:16 +00:00
|
|
|
int i, j;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-08-24 09:01:43 +00:00
|
|
|
#ifdef CONFIG_X86_32
|
|
|
|
/* Pound the ESR really hard over the head with a big hammer - mbligh */
|
|
|
|
if (esr_disable) {
|
|
|
|
apic_write(APIC_ESR, 0);
|
|
|
|
apic_write(APIC_ESR, 0);
|
|
|
|
apic_write(APIC_ESR, 0);
|
|
|
|
apic_write(APIC_ESR, 0);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2008-03-28 19:12:16 +00:00
|
|
|
preempt_disable();
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Double-check whether this APIC is really registered.
|
|
|
|
* This is meaningless in clustered apic mode, so we skip it.
|
|
|
|
*/
|
|
|
|
if (!apic_id_registered())
|
|
|
|
BUG();
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Intel recommends to set DFR, LDR and TPR before enabling
|
|
|
|
* an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
|
|
|
|
* document number 292116). So here it goes...
|
|
|
|
*/
|
|
|
|
init_apic_ldr();
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set Task Priority to 'accept all'. We never change this
|
|
|
|
* later on.
|
|
|
|
*/
|
|
|
|
value = apic_read(APIC_TASKPRI);
|
|
|
|
value &= ~APIC_TPRI_MASK;
|
2006-01-11 21:46:51 +00:00
|
|
|
apic_write(APIC_TASKPRI, value);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-03-25 15:31:16 +00:00
|
|
|
/*
|
|
|
|
* After a crash, we no longer service the interrupts and a pending
|
|
|
|
* interrupt from previous kernel might still have ISR bit set.
|
|
|
|
*
|
|
|
|
* Most probably by now CPU has serviced that pending interrupt and
|
|
|
|
* it might not have done the ack_APIC_irq() because it thought,
|
|
|
|
* interrupt came from i8259 as ExtInt. LAPIC did not get EOI so it
|
|
|
|
* does not clear the ISR bit and cpu thinks it has already serivced
|
|
|
|
* the interrupt. Hence a vector might get locked. It was noticed
|
|
|
|
* for timer irq (vector 0x31). Issue an extra EOI to clear ISR.
|
|
|
|
*/
|
|
|
|
for (i = APIC_ISR_NR - 1; i >= 0; i--) {
|
|
|
|
value = apic_read(APIC_ISR + i*0x10);
|
|
|
|
for (j = 31; j >= 0; j--) {
|
|
|
|
if (value & (1<<j))
|
|
|
|
ack_APIC_irq();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Now that we are all set up, enable the APIC
|
|
|
|
*/
|
|
|
|
value = apic_read(APIC_SPIV);
|
|
|
|
value &= ~APIC_VECTOR_MASK;
|
|
|
|
/*
|
|
|
|
* Enable APIC
|
|
|
|
*/
|
|
|
|
value |= APIC_SPIV_APIC_ENABLED;
|
|
|
|
|
2008-08-24 09:01:43 +00:00
|
|
|
#ifdef CONFIG_X86_32
|
|
|
|
/*
|
|
|
|
* Some unknown Intel IO/APIC (or APIC) errata is biting us with
|
|
|
|
* certain networking cards. If high frequency interrupts are
|
|
|
|
* happening on a particular IOAPIC pin, plus the IOAPIC routing
|
|
|
|
* entry is masked/unmasked at a high rate as well then sooner or
|
|
|
|
* later IOAPIC line gets 'stuck', no more interrupts are received
|
|
|
|
* from the device. If focus CPU is disabled then the hang goes
|
|
|
|
* away, oh well :-(
|
|
|
|
*
|
|
|
|
* [ This bug can be reproduced easily with a level-triggered
|
|
|
|
* PCI Ne2000 networking cards and PII/PIII processors, dual
|
|
|
|
* BX chipset. ]
|
|
|
|
*/
|
|
|
|
/*
|
|
|
|
* Actually disabling the focus CPU check just makes the hang less
|
|
|
|
* frequent as it makes the interrupt distributon model be more
|
|
|
|
* like LRU than MRU (the short-term load is more even across CPUs).
|
|
|
|
* See also the comment in end_level_ioapic_irq(). --macro
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* - enable focus processor (bit==0)
|
|
|
|
* - 64bit mode always use processor focus
|
|
|
|
* so no need to set it
|
|
|
|
*/
|
|
|
|
value &= ~APIC_SPIV_FOCUS_DISABLED;
|
|
|
|
#endif
|
2006-09-26 08:52:29 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Set spurious IRQ vector
|
|
|
|
*/
|
|
|
|
value |= SPURIOUS_APIC_VECTOR;
|
2006-01-11 21:46:51 +00:00
|
|
|
apic_write(APIC_SPIV, value);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Set up LVT0, LVT1:
|
|
|
|
*
|
|
|
|
* set up through-local-APIC on the BP's LINT0. This is not
|
|
|
|
* strictly necessary in pure symmetric-IO mode, but sometimes
|
|
|
|
* we delegate interrupts to the 8259A.
|
|
|
|
*/
|
|
|
|
/*
|
|
|
|
* TODO: set up through-local-APIC from through-I/O-APIC? --macro
|
|
|
|
*/
|
|
|
|
value = apic_read(APIC_LVT0) & APIC_LVT_MASKED;
|
2008-08-24 09:01:43 +00:00
|
|
|
if (!smp_processor_id() && (pic_mode || !value)) {
|
2005-04-16 22:20:36 +00:00
|
|
|
value = APIC_DM_EXTINT;
|
2007-10-12 21:04:23 +00:00
|
|
|
apic_printk(APIC_VERBOSE, "enabled ExtINT on CPU#%d\n",
|
2008-08-24 09:01:43 +00:00
|
|
|
smp_processor_id());
|
2005-04-16 22:20:36 +00:00
|
|
|
} else {
|
|
|
|
value = APIC_DM_EXTINT | APIC_LVT_MASKED;
|
2007-10-12 21:04:23 +00:00
|
|
|
apic_printk(APIC_VERBOSE, "masked ExtINT on CPU#%d\n",
|
2008-08-24 09:01:43 +00:00
|
|
|
smp_processor_id());
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2006-01-11 21:46:51 +00:00
|
|
|
apic_write(APIC_LVT0, value);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* only the BP should see the LINT1 NMI signal, obviously.
|
|
|
|
*/
|
|
|
|
if (!smp_processor_id())
|
|
|
|
value = APIC_DM_NMI;
|
|
|
|
else
|
|
|
|
value = APIC_DM_NMI | APIC_LVT_MASKED;
|
2008-08-24 09:01:43 +00:00
|
|
|
if (!lapic_is_integrated()) /* 82489DX */
|
|
|
|
value |= APIC_LVT_LEVEL_TRIGGER;
|
2006-01-11 21:46:51 +00:00
|
|
|
apic_write(APIC_LVT1, value);
|
2008-08-24 09:01:43 +00:00
|
|
|
|
2008-03-28 19:12:16 +00:00
|
|
|
preempt_enable();
|
2008-01-30 12:30:40 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-01-30 12:30:40 +00:00
|
|
|
void __cpuinit end_local_APIC_setup(void)
|
|
|
|
{
|
|
|
|
lapic_setup_esr();
|
2008-08-18 16:45:58 +00:00
|
|
|
|
|
|
|
#ifdef CONFIG_X86_32
|
2008-08-18 19:12:33 +00:00
|
|
|
{
|
|
|
|
unsigned int value;
|
|
|
|
/* Disable the local apic timer */
|
|
|
|
value = apic_read(APIC_LVTT);
|
|
|
|
value |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
|
|
|
|
apic_write(APIC_LVTT, value);
|
|
|
|
}
|
2008-08-18 16:45:58 +00:00
|
|
|
#endif
|
|
|
|
|
2006-09-26 08:52:26 +00:00
|
|
|
setup_apic_nmi_watchdog(NULL);
|
2008-01-30 12:30:20 +00:00
|
|
|
apic_pm_activate();
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2008-08-24 09:01:47 +00:00
|
|
|
#ifdef HAVE_X2APIC
|
2008-07-10 18:16:58 +00:00
|
|
|
void check_x2apic(void)
|
|
|
|
{
|
|
|
|
int msr, msr2;
|
|
|
|
|
|
|
|
rdmsr(MSR_IA32_APICBASE, msr, msr2);
|
|
|
|
|
|
|
|
if (msr & X2APIC_ENABLE) {
|
|
|
|
printk("x2apic enabled by BIOS, switching to x2apic ops\n");
|
|
|
|
x2apic_preenabled = x2apic = 1;
|
|
|
|
apic_ops = &x2apic_ops;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void enable_x2apic(void)
|
|
|
|
{
|
|
|
|
int msr, msr2;
|
|
|
|
|
|
|
|
rdmsr(MSR_IA32_APICBASE, msr, msr2);
|
|
|
|
if (!(msr & X2APIC_ENABLE)) {
|
|
|
|
printk("Enabling x2apic\n");
|
|
|
|
wrmsr(MSR_IA32_APICBASE, msr | X2APIC_ENABLE, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void enable_IR_x2apic(void)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_INTR_REMAP
|
|
|
|
int ret;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
if (!cpu_has_x2apic)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (!x2apic_preenabled && disable_x2apic) {
|
|
|
|
printk(KERN_INFO
|
|
|
|
"Skipped enabling x2apic and Interrupt-remapping "
|
|
|
|
"because of nox2apic\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (x2apic_preenabled && disable_x2apic)
|
|
|
|
panic("Bios already enabled x2apic, can't enforce nox2apic");
|
|
|
|
|
|
|
|
if (!x2apic_preenabled && skip_ioapic_setup) {
|
|
|
|
printk(KERN_INFO
|
|
|
|
"Skipped enabling x2apic and Interrupt-remapping "
|
|
|
|
"because of skipping io-apic setup\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = dmar_table_init();
|
|
|
|
if (ret) {
|
|
|
|
printk(KERN_INFO
|
|
|
|
"dmar_table_init() failed with %d:\n", ret);
|
|
|
|
|
|
|
|
if (x2apic_preenabled)
|
|
|
|
panic("x2apic enabled by bios. But IR enabling failed");
|
|
|
|
else
|
|
|
|
printk(KERN_INFO
|
|
|
|
"Not enabling x2apic,Intr-remapping\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
|
mask_8259A();
|
|
|
|
save_mask_IO_APIC_setup();
|
|
|
|
|
|
|
|
ret = enable_intr_remapping(1);
|
|
|
|
|
|
|
|
if (ret && x2apic_preenabled) {
|
|
|
|
local_irq_restore(flags);
|
|
|
|
panic("x2apic enabled by bios. But IR enabling failed");
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
goto end;
|
|
|
|
|
|
|
|
if (!x2apic) {
|
|
|
|
x2apic = 1;
|
|
|
|
apic_ops = &x2apic_ops;
|
|
|
|
enable_x2apic();
|
|
|
|
}
|
|
|
|
end:
|
|
|
|
if (ret)
|
|
|
|
/*
|
|
|
|
* IR enabling failed
|
|
|
|
*/
|
|
|
|
restore_IO_APIC_setup();
|
|
|
|
else
|
|
|
|
reinit_intr_remapped_IO_APIC(x2apic_preenabled);
|
|
|
|
|
|
|
|
unmask_8259A();
|
|
|
|
local_irq_restore(flags);
|
|
|
|
|
|
|
|
if (!ret) {
|
|
|
|
if (!x2apic_preenabled)
|
|
|
|
printk(KERN_INFO
|
|
|
|
"Enabled x2apic and interrupt-remapping\n");
|
|
|
|
else
|
|
|
|
printk(KERN_INFO
|
|
|
|
"Enabled Interrupt-remapping\n");
|
|
|
|
} else
|
|
|
|
printk(KERN_ERR
|
|
|
|
"Failed to enable Interrupt-remapping and x2apic\n");
|
|
|
|
#else
|
|
|
|
if (!cpu_has_x2apic)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (x2apic_preenabled)
|
|
|
|
panic("x2apic enabled prior OS handover,"
|
|
|
|
" enable CONFIG_INTR_REMAP");
|
|
|
|
|
|
|
|
printk(KERN_INFO "Enable CONFIG_INTR_REMAP for enabling intr-remapping "
|
|
|
|
" and x2apic\n");
|
|
|
|
#endif
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
2008-08-24 09:01:47 +00:00
|
|
|
#endif /* HAVE_X2APIC */
|
2008-07-10 18:16:58 +00:00
|
|
|
|
2008-08-24 09:01:51 +00:00
|
|
|
#ifdef CONFIG_X86_64
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Detect and enable local APICs on non-SMP boards.
|
|
|
|
* Original code written by Keir Fraser.
|
|
|
|
* On AMD64 we trust the BIOS - if it says no APIC it is likely
|
2007-07-21 15:10:17 +00:00
|
|
|
* not correctly set up (usually the APIC timer won't work etc.)
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2008-01-30 12:30:20 +00:00
|
|
|
static int __init detect_init_APIC(void)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
if (!cpu_has_apic) {
|
|
|
|
printk(KERN_INFO "No local APIC present\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
|
2008-03-19 17:25:58 +00:00
|
|
|
boot_cpu_physical_apicid = 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2008-08-24 09:01:51 +00:00
|
|
|
#else
|
|
|
|
/*
|
|
|
|
* Detect and initialize APIC
|
|
|
|
*/
|
|
|
|
static int __init detect_init_APIC(void)
|
|
|
|
{
|
|
|
|
u32 h, l, features;
|
|
|
|
|
|
|
|
/* Disabled by kernel option? */
|
|
|
|
if (disable_apic)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
switch (boot_cpu_data.x86_vendor) {
|
|
|
|
case X86_VENDOR_AMD:
|
|
|
|
if ((boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model > 1) ||
|
|
|
|
(boot_cpu_data.x86 == 15))
|
|
|
|
break;
|
|
|
|
goto no_apic;
|
|
|
|
case X86_VENDOR_INTEL:
|
|
|
|
if (boot_cpu_data.x86 == 6 || boot_cpu_data.x86 == 15 ||
|
|
|
|
(boot_cpu_data.x86 == 5 && cpu_has_apic))
|
|
|
|
break;
|
|
|
|
goto no_apic;
|
|
|
|
default:
|
|
|
|
goto no_apic;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!cpu_has_apic) {
|
|
|
|
/*
|
|
|
|
* Over-ride BIOS and try to enable the local APIC only if
|
|
|
|
* "lapic" specified.
|
|
|
|
*/
|
|
|
|
if (!force_enable_local_apic) {
|
|
|
|
printk(KERN_INFO "Local APIC disabled by BIOS -- "
|
|
|
|
"you can enable it with \"lapic\"\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Some BIOSes disable the local APIC in the APIC_BASE
|
|
|
|
* MSR. This can only be done in software for Intel P6 or later
|
|
|
|
* and AMD K7 (Model > 1) or later.
|
|
|
|
*/
|
|
|
|
rdmsr(MSR_IA32_APICBASE, l, h);
|
|
|
|
if (!(l & MSR_IA32_APICBASE_ENABLE)) {
|
|
|
|
printk(KERN_INFO
|
|
|
|
"Local APIC disabled by BIOS -- reenabling.\n");
|
|
|
|
l &= ~MSR_IA32_APICBASE_BASE;
|
|
|
|
l |= MSR_IA32_APICBASE_ENABLE | APIC_DEFAULT_PHYS_BASE;
|
|
|
|
wrmsr(MSR_IA32_APICBASE, l, h);
|
|
|
|
enabled_via_apicbase = 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* The APIC feature bit should now be enabled
|
|
|
|
* in `cpuid'
|
|
|
|
*/
|
|
|
|
features = cpuid_edx(1);
|
|
|
|
if (!(features & (1 << X86_FEATURE_APIC))) {
|
|
|
|
printk(KERN_WARNING "Could not enable APIC!\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
set_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC);
|
|
|
|
mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
|
|
|
|
|
|
|
|
/* The BIOS may have set up the APIC at some other address */
|
|
|
|
rdmsr(MSR_IA32_APICBASE, l, h);
|
|
|
|
if (l & MSR_IA32_APICBASE_ENABLE)
|
|
|
|
mp_lapic_addr = l & MSR_IA32_APICBASE_BASE;
|
|
|
|
|
|
|
|
printk(KERN_INFO "Found and enabled local APIC!\n");
|
|
|
|
|
|
|
|
apic_pm_activate();
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
no_apic:
|
|
|
|
printk(KERN_INFO "No local APIC present or hardware disabled\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
#endif
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-08-24 09:01:49 +00:00
|
|
|
#ifdef CONFIG_X86_64
|
2008-02-19 11:21:06 +00:00
|
|
|
void __init early_init_lapic_mapping(void)
|
|
|
|
{
|
2008-05-12 13:43:35 +00:00
|
|
|
unsigned long phys_addr;
|
2008-02-19 11:21:06 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If no local APIC can be found then go out
|
|
|
|
* : it means there is no mpatable and MADT
|
|
|
|
*/
|
|
|
|
if (!smp_found_config)
|
|
|
|
return;
|
|
|
|
|
2008-05-12 13:43:35 +00:00
|
|
|
phys_addr = mp_lapic_addr;
|
2008-02-19 11:21:06 +00:00
|
|
|
|
2008-05-12 13:43:35 +00:00
|
|
|
set_fixmap_nocache(FIX_APIC_BASE, phys_addr);
|
2008-02-19 11:21:06 +00:00
|
|
|
apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n",
|
2008-05-12 13:43:35 +00:00
|
|
|
APIC_BASE, phys_addr);
|
2008-02-19 11:21:06 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Fetch the APIC ID of the BSP in case we have a
|
|
|
|
* default configuration (or the MP table is broken).
|
|
|
|
*/
|
2008-07-12 01:44:16 +00:00
|
|
|
boot_cpu_physical_apicid = read_apic_id();
|
2008-02-19 11:21:06 +00:00
|
|
|
}
|
2008-08-24 09:01:49 +00:00
|
|
|
#endif
|
2008-02-19 11:21:06 +00:00
|
|
|
|
2008-01-30 12:30:20 +00:00
|
|
|
/**
|
|
|
|
* init_apic_mappings - initialize APIC mappings
|
|
|
|
*/
|
2005-04-16 22:20:36 +00:00
|
|
|
void __init init_apic_mappings(void)
|
|
|
|
{
|
2008-08-24 09:01:47 +00:00
|
|
|
#ifdef HAVE_X2APIC
|
2008-07-10 18:16:58 +00:00
|
|
|
if (x2apic) {
|
2008-07-12 01:44:16 +00:00
|
|
|
boot_cpu_physical_apicid = read_apic_id();
|
2008-07-10 18:16:58 +00:00
|
|
|
return;
|
|
|
|
}
|
2008-08-24 09:01:47 +00:00
|
|
|
#endif
|
2008-07-10 18:16:58 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* If no local APIC can be found then set up a fake all
|
|
|
|
* zeroes page to simulate the local APIC and another
|
|
|
|
* one for the IO-APIC.
|
|
|
|
*/
|
|
|
|
if (!smp_found_config && detect_init_APIC()) {
|
|
|
|
apic_phys = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
|
|
|
|
apic_phys = __pa(apic_phys);
|
|
|
|
} else
|
|
|
|
apic_phys = mp_lapic_addr;
|
|
|
|
|
|
|
|
set_fixmap_nocache(FIX_APIC_BASE, apic_phys);
|
2007-10-12 21:04:06 +00:00
|
|
|
apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n",
|
|
|
|
APIC_BASE, apic_phys);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Fetch the APIC ID of the BSP in case we have a
|
|
|
|
* default configuration (or the MP table is broken).
|
|
|
|
*/
|
2008-08-24 09:01:49 +00:00
|
|
|
if (boot_cpu_physical_apicid == -1U)
|
|
|
|
boot_cpu_physical_apicid = read_apic_id();
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2008-01-30 12:30:20 +00:00
|
|
|
* This initializes the IO-APIC and APIC hardware if this is
|
|
|
|
* a UP kernel.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2008-08-18 16:45:57 +00:00
|
|
|
int apic_version[MAX_APICS];
|
|
|
|
|
2008-01-30 12:30:20 +00:00
|
|
|
int __init APIC_init_uniprocessor(void)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2008-08-24 09:01:50 +00:00
|
|
|
#ifdef CONFIG_X86_64
|
2008-01-30 12:30:20 +00:00
|
|
|
if (disable_apic) {
|
|
|
|
printk(KERN_INFO "Apic disabled\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if (!cpu_has_apic) {
|
|
|
|
disable_apic = 1;
|
|
|
|
printk(KERN_INFO "Apic disabled by BIOS\n");
|
|
|
|
return -1;
|
|
|
|
}
|
2008-08-24 09:01:50 +00:00
|
|
|
#else
|
|
|
|
if (!smp_found_config && !cpu_has_apic)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Complain if the BIOS pretends there is one.
|
|
|
|
*/
|
|
|
|
if (!cpu_has_apic &&
|
|
|
|
APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) {
|
|
|
|
printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
|
|
|
|
boot_cpu_physical_apicid);
|
|
|
|
clear_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2008-08-24 09:01:47 +00:00
|
|
|
#ifdef HAVE_X2APIC
|
2008-07-10 18:16:58 +00:00
|
|
|
enable_IR_x2apic();
|
2008-08-24 09:01:47 +00:00
|
|
|
#endif
|
2008-08-24 09:01:50 +00:00
|
|
|
#ifdef CONFIG_X86_64
|
2008-07-10 18:16:58 +00:00
|
|
|
setup_apic_routing();
|
2008-08-24 09:01:50 +00:00
|
|
|
#endif
|
2008-07-10 18:16:58 +00:00
|
|
|
|
2008-01-30 12:30:20 +00:00
|
|
|
verify_local_APIC();
|
2008-05-28 16:38:28 +00:00
|
|
|
connect_bsp_APIC();
|
|
|
|
|
2008-08-24 09:01:50 +00:00
|
|
|
#ifdef CONFIG_X86_64
|
2008-03-19 17:25:58 +00:00
|
|
|
apic_write(APIC_ID, SET_APIC_ID(boot_cpu_physical_apicid));
|
2008-08-24 09:01:50 +00:00
|
|
|
#else
|
|
|
|
/*
|
|
|
|
* Hack: In case of kdump, after a crash, kernel might be booting
|
|
|
|
* on a cpu with non-zero lapic id. But boot_cpu_physical_apicid
|
|
|
|
* might be zero if read from MP tables. Get it from LAPIC.
|
|
|
|
*/
|
|
|
|
# ifdef CONFIG_CRASH_DUMP
|
|
|
|
boot_cpu_physical_apicid = read_apic_id();
|
|
|
|
# endif
|
|
|
|
#endif
|
|
|
|
physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map);
|
2008-01-30 12:30:20 +00:00
|
|
|
setup_local_APIC();
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-08-24 09:01:50 +00:00
|
|
|
#ifdef CONFIG_X86_64
|
2008-01-30 12:30:40 +00:00
|
|
|
/*
|
|
|
|
* Now enable IO-APICs, actually call clear_IO_APIC
|
|
|
|
* We need clear_IO_APIC before enabling vector on BP
|
|
|
|
*/
|
|
|
|
if (!skip_ioapic_setup && nr_ioapics)
|
|
|
|
enable_IO_APIC();
|
2008-08-24 09:01:50 +00:00
|
|
|
#endif
|
2008-01-30 12:30:40 +00:00
|
|
|
|
2008-08-24 09:01:50 +00:00
|
|
|
#ifdef CONFIG_X86_IO_APIC
|
2008-06-06 02:27:49 +00:00
|
|
|
if (!smp_found_config || skip_ioapic_setup || !nr_ioapics)
|
2008-08-24 09:01:50 +00:00
|
|
|
#endif
|
2008-06-06 02:27:49 +00:00
|
|
|
localise_nmi_watchdog();
|
2008-01-30 12:30:40 +00:00
|
|
|
end_local_APIC_setup();
|
|
|
|
|
2008-08-24 09:01:50 +00:00
|
|
|
#ifdef CONFIG_X86_IO_APIC
|
2008-01-30 12:30:20 +00:00
|
|
|
if (smp_found_config && !skip_ioapic_setup && nr_ioapics)
|
|
|
|
setup_IO_APIC();
|
2008-08-24 09:01:50 +00:00
|
|
|
# ifdef CONFIG_X86_64
|
2008-01-30 12:30:20 +00:00
|
|
|
else
|
|
|
|
nr_ioapics = 0;
|
2008-08-24 09:01:50 +00:00
|
|
|
# endif
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef CONFIG_X86_64
|
2008-01-30 12:30:20 +00:00
|
|
|
setup_boot_APIC_clock();
|
|
|
|
check_nmi_watchdog();
|
2008-08-24 09:01:50 +00:00
|
|
|
#else
|
|
|
|
setup_boot_clock();
|
|
|
|
#endif
|
|
|
|
|
2008-01-30 12:30:20 +00:00
|
|
|
return 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2008-01-30 12:30:20 +00:00
|
|
|
* Local APIC interrupts
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
|
|
|
|
2008-01-30 12:30:20 +00:00
|
|
|
/*
|
|
|
|
* This interrupt should _never_ happen with our APIC/SMP architecture
|
|
|
|
*/
|
|
|
|
asmlinkage void smp_spurious_interrupt(void)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2008-01-30 12:30:20 +00:00
|
|
|
unsigned int v;
|
|
|
|
exit_idle();
|
|
|
|
irq_enter();
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
2008-01-30 12:30:20 +00:00
|
|
|
* Check if this really is a spurious interrupt and ACK it
|
|
|
|
* if it is a vectored one. Just in case...
|
|
|
|
* Spurious interrupts should not be ACKed.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2008-01-30 12:30:20 +00:00
|
|
|
v = apic_read(APIC_ISR + ((SPURIOUS_APIC_VECTOR & ~0x1f) >> 1));
|
|
|
|
if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f)))
|
|
|
|
ack_APIC_irq();
|
2007-10-12 21:04:07 +00:00
|
|
|
|
2008-01-30 12:30:20 +00:00
|
|
|
add_pda(irq_spurious_count, 1);
|
|
|
|
irq_exit();
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-01-30 12:30:20 +00:00
|
|
|
/*
|
|
|
|
* This interrupt should never happen with our APIC/SMP architecture
|
|
|
|
*/
|
|
|
|
asmlinkage void smp_error_interrupt(void)
|
|
|
|
{
|
|
|
|
unsigned int v, v1;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-01-30 12:30:20 +00:00
|
|
|
exit_idle();
|
|
|
|
irq_enter();
|
|
|
|
/* First tickle the hardware, only then report what went on. -- REW */
|
|
|
|
v = apic_read(APIC_ESR);
|
|
|
|
apic_write(APIC_ESR, 0);
|
|
|
|
v1 = apic_read(APIC_ESR);
|
|
|
|
ack_APIC_irq();
|
|
|
|
atomic_inc(&irq_err_count);
|
2007-10-12 21:04:07 +00:00
|
|
|
|
2008-01-30 12:30:20 +00:00
|
|
|
/* Here is what the APIC error bits mean:
|
|
|
|
0: Send CS error
|
|
|
|
1: Receive CS error
|
|
|
|
2: Send accept error
|
|
|
|
3: Receive accept error
|
|
|
|
4: Reserved
|
|
|
|
5: Send illegal vector
|
|
|
|
6: Received illegal vector
|
|
|
|
7: Illegal register address
|
|
|
|
*/
|
|
|
|
printk(KERN_DEBUG "APIC error on CPU%d: %02x(%02x)\n",
|
|
|
|
smp_processor_id(), v , v1);
|
|
|
|
irq_exit();
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2008-05-28 16:38:28 +00:00
|
|
|
/**
|
2008-08-18 16:45:53 +00:00
|
|
|
* connect_bsp_APIC - attach the APIC to the interrupt system
|
|
|
|
*/
|
2008-05-28 16:38:28 +00:00
|
|
|
void __init connect_bsp_APIC(void)
|
|
|
|
{
|
2008-08-18 16:45:53 +00:00
|
|
|
#ifdef CONFIG_X86_32
|
|
|
|
if (pic_mode) {
|
|
|
|
/*
|
|
|
|
* Do not trust the local APIC being empty at bootup.
|
|
|
|
*/
|
|
|
|
clear_local_APIC();
|
|
|
|
/*
|
|
|
|
* PIC mode, enable APIC mode in the IMCR, i.e. connect BSP's
|
|
|
|
* local APIC to INT and NMI lines.
|
|
|
|
*/
|
|
|
|
apic_printk(APIC_VERBOSE, "leaving PIC mode, "
|
|
|
|
"enabling APIC mode.\n");
|
|
|
|
outb(0x70, 0x22);
|
|
|
|
outb(0x01, 0x23);
|
|
|
|
}
|
|
|
|
#endif
|
2008-05-28 16:38:28 +00:00
|
|
|
enable_apic_mode();
|
|
|
|
}
|
|
|
|
|
2008-08-16 19:21:53 +00:00
|
|
|
/**
|
|
|
|
* disconnect_bsp_APIC - detach the APIC from the interrupt system
|
|
|
|
* @virt_wire_setup: indicates, whether virtual wire mode is selected
|
|
|
|
*
|
|
|
|
* Virtual wire mode is necessary to deliver legacy interrupts even when the
|
|
|
|
* APIC is disabled.
|
|
|
|
*/
|
2008-01-30 12:30:20 +00:00
|
|
|
void disconnect_bsp_APIC(int virt_wire_setup)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2008-08-18 19:12:33 +00:00
|
|
|
unsigned int value;
|
|
|
|
|
2008-08-18 16:45:56 +00:00
|
|
|
#ifdef CONFIG_X86_32
|
|
|
|
if (pic_mode) {
|
|
|
|
/*
|
|
|
|
* Put the board back into PIC mode (has an effect only on
|
|
|
|
* certain older boards). Note that APIC interrupts, including
|
|
|
|
* IPIs, won't work beyond this point! The only exception are
|
|
|
|
* INIT IPIs.
|
|
|
|
*/
|
|
|
|
apic_printk(APIC_VERBOSE, "disabling APIC mode, "
|
|
|
|
"entering PIC mode.\n");
|
|
|
|
outb(0x70, 0x22);
|
|
|
|
outb(0x00, 0x23);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2008-01-30 12:30:20 +00:00
|
|
|
/* Go back to Virtual Wire compatibility mode */
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-01-30 12:30:20 +00:00
|
|
|
/* For the spurious interrupt use vector F, and enable it */
|
|
|
|
value = apic_read(APIC_SPIV);
|
|
|
|
value &= ~APIC_VECTOR_MASK;
|
|
|
|
value |= APIC_SPIV_APIC_ENABLED;
|
|
|
|
value |= 0xf;
|
|
|
|
apic_write(APIC_SPIV, value);
|
2007-10-12 21:04:07 +00:00
|
|
|
|
2008-01-30 12:30:20 +00:00
|
|
|
if (!virt_wire_setup) {
|
|
|
|
/*
|
|
|
|
* For LVT0 make it edge triggered, active high,
|
|
|
|
* external and enabled
|
|
|
|
*/
|
|
|
|
value = apic_read(APIC_LVT0);
|
|
|
|
value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
|
|
|
|
APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
|
|
|
|
APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
|
|
|
|
value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
|
|
|
|
value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_EXTINT);
|
|
|
|
apic_write(APIC_LVT0, value);
|
|
|
|
} else {
|
|
|
|
/* Disable LVT0 */
|
|
|
|
apic_write(APIC_LVT0, APIC_LVT_MASKED);
|
|
|
|
}
|
2007-10-12 21:04:07 +00:00
|
|
|
|
2008-08-18 16:45:56 +00:00
|
|
|
/*
|
|
|
|
* For LVT1 make it edge triggered, active high,
|
|
|
|
* nmi and enabled
|
|
|
|
*/
|
2008-01-30 12:30:20 +00:00
|
|
|
value = apic_read(APIC_LVT1);
|
|
|
|
value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
|
|
|
|
APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
|
|
|
|
APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
|
|
|
|
value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
|
|
|
|
value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_NMI);
|
|
|
|
apic_write(APIC_LVT1, value);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2008-03-27 20:56:19 +00:00
|
|
|
void __cpuinit generic_processor_info(int apicid, int version)
|
|
|
|
{
|
|
|
|
int cpu;
|
|
|
|
cpumask_t tmp_map;
|
|
|
|
|
2008-08-18 16:45:57 +00:00
|
|
|
/*
|
|
|
|
* Validate version
|
|
|
|
*/
|
|
|
|
if (version == 0x0) {
|
|
|
|
printk(KERN_WARNING "BIOS bug, APIC version is 0 for CPU#%d! "
|
|
|
|
"fixing up to 0x10. (tell your hw vendor)\n",
|
|
|
|
version);
|
|
|
|
version = 0x10;
|
2008-03-27 20:56:19 +00:00
|
|
|
}
|
2008-08-18 16:45:57 +00:00
|
|
|
apic_version[apicid] = version;
|
2008-03-27 20:56:19 +00:00
|
|
|
|
|
|
|
if (num_processors >= NR_CPUS) {
|
|
|
|
printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached."
|
2008-08-18 16:45:57 +00:00
|
|
|
" Processor ignored.\n", NR_CPUS);
|
2008-03-27 20:56:19 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
num_processors++;
|
|
|
|
cpus_complement(tmp_map, cpu_present_map);
|
|
|
|
cpu = first_cpu(tmp_map);
|
|
|
|
|
|
|
|
physid_set(apicid, phys_cpu_present_map);
|
|
|
|
if (apicid == boot_cpu_physical_apicid) {
|
|
|
|
/*
|
|
|
|
* x86_bios_cpu_apicid is required to have processors listed
|
|
|
|
* in same order as logical cpu numbers. Hence the first
|
|
|
|
* entry is BSP, and so on.
|
|
|
|
*/
|
|
|
|
cpu = 0;
|
|
|
|
}
|
2008-06-09 01:29:22 +00:00
|
|
|
if (apicid > max_physical_apicid)
|
|
|
|
max_physical_apicid = apicid;
|
|
|
|
|
2008-08-18 16:45:57 +00:00
|
|
|
#ifdef CONFIG_X86_32
|
|
|
|
/*
|
|
|
|
* Would be preferable to switch to bigsmp when CONFIG_HOTPLUG_CPU=y
|
|
|
|
* but we need to work other dependencies like SMP_SUSPEND etc
|
|
|
|
* before this can be done without some confusion.
|
|
|
|
* if (CPU_HOTPLUG_ENABLED || num_processors > 8)
|
|
|
|
* - Ashok Raj <ashok.raj@intel.com>
|
|
|
|
*/
|
|
|
|
if (max_physical_apicid >= 8) {
|
|
|
|
switch (boot_cpu_data.x86_vendor) {
|
|
|
|
case X86_VENDOR_INTEL:
|
|
|
|
if (!APIC_XAPIC(version)) {
|
|
|
|
def_to_bigsmp = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* If P4 and above fall through */
|
|
|
|
case X86_VENDOR_AMD:
|
|
|
|
def_to_bigsmp = 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if defined(CONFIG_X86_SMP) || defined(CONFIG_X86_64)
|
2008-03-27 20:56:19 +00:00
|
|
|
/* are we being called early in kernel startup? */
|
x86: cleanup early per cpu variables/accesses v4
* Introduce a new PER_CPU macro called "EARLY_PER_CPU". This is
used by some per_cpu variables that are initialized and accessed
before there are per_cpu areas allocated.
["Early" in respect to per_cpu variables is "earlier than the per_cpu
areas have been setup".]
This patchset adds these new macros:
DEFINE_EARLY_PER_CPU(_type, _name, _initvalue)
EXPORT_EARLY_PER_CPU_SYMBOL(_name)
DECLARE_EARLY_PER_CPU(_type, _name)
early_per_cpu_ptr(_name)
early_per_cpu_map(_name, _idx)
early_per_cpu(_name, _cpu)
The DEFINE macro defines the per_cpu variable as well as the early
map and pointer. It also initializes the per_cpu variable and map
elements to "_initvalue". The early_* macros provide access to
the initial map (usually setup during system init) and the early
pointer. This pointer is initialized to point to the early map
but is then NULL'ed when the actual per_cpu areas are setup. After
that the per_cpu variable is the correct access to the variable.
The early_per_cpu() macro is not very efficient but does show how to
access the variable if you have a function that can be called both
"early" and "late". It tests the early ptr to be NULL, and if not
then it's still valid. Otherwise, the per_cpu variable is used
instead:
#define early_per_cpu(_name, _cpu) \
(early_per_cpu_ptr(_name) ? \
early_per_cpu_ptr(_name)[_cpu] : \
per_cpu(_name, _cpu))
A better method is to actually check the pointer manually. In the
case below, numa_set_node can be called both "early" and "late":
void __cpuinit numa_set_node(int cpu, int node)
{
int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
if (cpu_to_node_map)
cpu_to_node_map[cpu] = node;
else
per_cpu(x86_cpu_to_node_map, cpu) = node;
}
* Add a flag "arch_provides_topology_pointers" that indicates pointers
to topology cpumask_t maps are available. Otherwise, use the function
returning the cpumask_t value. This is useful if cpumask_t set size
is very large to avoid copying data on to/off of the stack.
* The coverage of CONFIG_DEBUG_PER_CPU_MAPS has been increased while
the non-debug case has been optimized a bit.
* Remove an unreferenced compiler warning in drivers/base/topology.c
* Clean up #ifdef in setup.c
For inclusion into sched-devel/latest tree.
Based on:
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git
+ sched-devel/latest .../mingo/linux-2.6-sched-devel.git
Signed-off-by: Mike Travis <travis@sgi.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2008-05-12 19:21:12 +00:00
|
|
|
if (early_per_cpu_ptr(x86_cpu_to_apicid)) {
|
|
|
|
u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);
|
|
|
|
u16 *bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
|
2008-03-27 20:56:19 +00:00
|
|
|
|
|
|
|
cpu_to_apicid[cpu] = apicid;
|
|
|
|
bios_cpu_apicid[cpu] = apicid;
|
|
|
|
} else {
|
|
|
|
per_cpu(x86_cpu_to_apicid, cpu) = apicid;
|
|
|
|
per_cpu(x86_bios_cpu_apicid, cpu) = apicid;
|
|
|
|
}
|
2008-08-18 16:45:57 +00:00
|
|
|
#endif
|
2008-03-27 20:56:19 +00:00
|
|
|
|
|
|
|
cpu_set(cpu, cpu_possible_map);
|
|
|
|
cpu_set(cpu, cpu_present_map);
|
|
|
|
}
|
|
|
|
|
2008-08-24 09:01:48 +00:00
|
|
|
#ifdef CONFIG_X86_64
|
2008-07-10 18:16:48 +00:00
|
|
|
int hard_smp_processor_id(void)
|
|
|
|
{
|
|
|
|
return read_apic_id();
|
|
|
|
}
|
2008-08-24 09:01:48 +00:00
|
|
|
#endif
|
2008-07-10 18:16:48 +00:00
|
|
|
|
2007-10-14 20:57:45 +00:00
|
|
|
/*
|
2008-01-30 12:30:20 +00:00
|
|
|
* Power management
|
2007-10-14 20:57:45 +00:00
|
|
|
*/
|
2008-01-30 12:30:20 +00:00
|
|
|
#ifdef CONFIG_PM
|
|
|
|
|
|
|
|
static struct {
|
2008-08-16 19:21:53 +00:00
|
|
|
/*
|
|
|
|
* 'active' is true if the local APIC was enabled by us and
|
|
|
|
* not the BIOS; this signifies that we are also responsible
|
|
|
|
* for disabling it before entering apm/acpi suspend
|
|
|
|
*/
|
2008-01-30 12:30:20 +00:00
|
|
|
int active;
|
|
|
|
/* r/w apic fields */
|
|
|
|
unsigned int apic_id;
|
|
|
|
unsigned int apic_taskpri;
|
|
|
|
unsigned int apic_ldr;
|
|
|
|
unsigned int apic_dfr;
|
|
|
|
unsigned int apic_spiv;
|
|
|
|
unsigned int apic_lvtt;
|
|
|
|
unsigned int apic_lvtpc;
|
|
|
|
unsigned int apic_lvt0;
|
|
|
|
unsigned int apic_lvt1;
|
|
|
|
unsigned int apic_lvterr;
|
|
|
|
unsigned int apic_tmict;
|
|
|
|
unsigned int apic_tdcr;
|
|
|
|
unsigned int apic_thmr;
|
|
|
|
} apic_pm_state;
|
|
|
|
|
|
|
|
static int lapic_suspend(struct sys_device *dev, pm_message_t state)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
int maxlvt;
|
2007-10-14 20:57:45 +00:00
|
|
|
|
2008-01-30 12:30:20 +00:00
|
|
|
if (!apic_pm_state.active)
|
|
|
|
return 0;
|
2007-10-14 20:57:45 +00:00
|
|
|
|
2008-01-30 12:30:20 +00:00
|
|
|
maxlvt = lapic_get_maxlvt();
|
2007-10-14 20:57:45 +00:00
|
|
|
|
2008-07-11 21:24:19 +00:00
|
|
|
apic_pm_state.apic_id = apic_read(APIC_ID);
|
2008-01-30 12:30:20 +00:00
|
|
|
apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI);
|
|
|
|
apic_pm_state.apic_ldr = apic_read(APIC_LDR);
|
|
|
|
apic_pm_state.apic_dfr = apic_read(APIC_DFR);
|
|
|
|
apic_pm_state.apic_spiv = apic_read(APIC_SPIV);
|
|
|
|
apic_pm_state.apic_lvtt = apic_read(APIC_LVTT);
|
|
|
|
if (maxlvt >= 4)
|
|
|
|
apic_pm_state.apic_lvtpc = apic_read(APIC_LVTPC);
|
|
|
|
apic_pm_state.apic_lvt0 = apic_read(APIC_LVT0);
|
|
|
|
apic_pm_state.apic_lvt1 = apic_read(APIC_LVT1);
|
|
|
|
apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR);
|
|
|
|
apic_pm_state.apic_tmict = apic_read(APIC_TMICT);
|
|
|
|
apic_pm_state.apic_tdcr = apic_read(APIC_TDCR);
|
2008-08-16 19:21:52 +00:00
|
|
|
#if defined(CONFIG_X86_MCE_P4THERMAL) || defined(CONFIG_X86_MCE_INTEL)
|
2008-01-30 12:30:20 +00:00
|
|
|
if (maxlvt >= 5)
|
|
|
|
apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR);
|
|
|
|
#endif
|
2008-08-16 19:21:52 +00:00
|
|
|
|
2008-01-30 12:30:20 +00:00
|
|
|
local_irq_save(flags);
|
|
|
|
disable_local_APIC();
|
|
|
|
local_irq_restore(flags);
|
|
|
|
return 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2008-01-30 12:30:20 +00:00
|
|
|
static int lapic_resume(struct sys_device *dev)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2008-01-30 12:30:20 +00:00
|
|
|
unsigned int l, h;
|
|
|
|
unsigned long flags;
|
|
|
|
int maxlvt;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-01-30 12:30:20 +00:00
|
|
|
if (!apic_pm_state.active)
|
|
|
|
return 0;
|
2005-11-05 16:25:53 +00:00
|
|
|
|
2008-01-30 12:30:20 +00:00
|
|
|
maxlvt = lapic_get_maxlvt();
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-01-30 12:30:20 +00:00
|
|
|
local_irq_save(flags);
|
2008-08-16 19:21:51 +00:00
|
|
|
|
2008-08-24 09:01:47 +00:00
|
|
|
#ifdef HAVE_X2APIC
|
2008-08-16 19:21:51 +00:00
|
|
|
if (x2apic)
|
|
|
|
enable_x2apic();
|
|
|
|
else
|
|
|
|
#endif
|
2008-08-18 04:12:27 +00:00
|
|
|
{
|
2008-08-16 19:21:51 +00:00
|
|
|
/*
|
|
|
|
* Make sure the APICBASE points to the right address
|
|
|
|
*
|
|
|
|
* FIXME! This will be wrong if we ever support suspend on
|
|
|
|
* SMP! We'll need to do this as part of the CPU restore!
|
|
|
|
*/
|
2008-07-10 18:16:58 +00:00
|
|
|
rdmsr(MSR_IA32_APICBASE, l, h);
|
|
|
|
l &= ~MSR_IA32_APICBASE_BASE;
|
|
|
|
l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr;
|
|
|
|
wrmsr(MSR_IA32_APICBASE, l, h);
|
2008-08-18 04:12:27 +00:00
|
|
|
}
|
2008-07-10 18:16:58 +00:00
|
|
|
|
2008-01-30 12:30:20 +00:00
|
|
|
apic_write(APIC_LVTERR, ERROR_APIC_VECTOR | APIC_LVT_MASKED);
|
|
|
|
apic_write(APIC_ID, apic_pm_state.apic_id);
|
|
|
|
apic_write(APIC_DFR, apic_pm_state.apic_dfr);
|
|
|
|
apic_write(APIC_LDR, apic_pm_state.apic_ldr);
|
|
|
|
apic_write(APIC_TASKPRI, apic_pm_state.apic_taskpri);
|
|
|
|
apic_write(APIC_SPIV, apic_pm_state.apic_spiv);
|
|
|
|
apic_write(APIC_LVT0, apic_pm_state.apic_lvt0);
|
|
|
|
apic_write(APIC_LVT1, apic_pm_state.apic_lvt1);
|
2008-08-16 19:21:51 +00:00
|
|
|
#if defined(CONFIG_X86_MCE_P4THERMAL) || defined(CONFIG_X86_MCE_INTEL)
|
2008-01-30 12:30:20 +00:00
|
|
|
if (maxlvt >= 5)
|
|
|
|
apic_write(APIC_LVTTHMR, apic_pm_state.apic_thmr);
|
|
|
|
#endif
|
|
|
|
if (maxlvt >= 4)
|
|
|
|
apic_write(APIC_LVTPC, apic_pm_state.apic_lvtpc);
|
|
|
|
apic_write(APIC_LVTT, apic_pm_state.apic_lvtt);
|
|
|
|
apic_write(APIC_TDCR, apic_pm_state.apic_tdcr);
|
|
|
|
apic_write(APIC_TMICT, apic_pm_state.apic_tmict);
|
|
|
|
apic_write(APIC_ESR, 0);
|
|
|
|
apic_read(APIC_ESR);
|
|
|
|
apic_write(APIC_LVTERR, apic_pm_state.apic_lvterr);
|
|
|
|
apic_write(APIC_ESR, 0);
|
|
|
|
apic_read(APIC_ESR);
|
2008-08-16 19:21:51 +00:00
|
|
|
|
2008-01-30 12:30:20 +00:00
|
|
|
local_irq_restore(flags);
|
2008-08-16 19:21:51 +00:00
|
|
|
|
2008-01-30 12:30:20 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2007-10-12 21:04:07 +00:00
|
|
|
|
2008-08-16 19:21:53 +00:00
|
|
|
/*
|
|
|
|
* This device has no shutdown method - fully functioning local APICs
|
|
|
|
* are needed on every CPU up until machine_halt/restart/poweroff.
|
|
|
|
*/
|
|
|
|
|
2008-01-30 12:30:20 +00:00
|
|
|
static struct sysdev_class lapic_sysclass = {
|
|
|
|
.name = "lapic",
|
|
|
|
.resume = lapic_resume,
|
|
|
|
.suspend = lapic_suspend,
|
|
|
|
};
|
2007-10-12 21:04:07 +00:00
|
|
|
|
2008-01-30 12:30:20 +00:00
|
|
|
static struct sys_device device_lapic = {
|
2008-01-30 12:32:35 +00:00
|
|
|
.id = 0,
|
|
|
|
.cls = &lapic_sysclass,
|
2008-01-30 12:30:20 +00:00
|
|
|
};
|
2007-10-12 21:04:07 +00:00
|
|
|
|
2008-01-30 12:30:20 +00:00
|
|
|
static void __cpuinit apic_pm_activate(void)
|
|
|
|
{
|
|
|
|
apic_pm_state.active = 1;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2008-01-30 12:30:20 +00:00
|
|
|
static int __init init_lapic_sysfs(void)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2008-01-30 12:30:20 +00:00
|
|
|
int error;
|
2008-01-30 12:32:35 +00:00
|
|
|
|
2008-01-30 12:30:20 +00:00
|
|
|
if (!cpu_has_apic)
|
|
|
|
return 0;
|
|
|
|
/* XXX: remove suspend/resume procs if !apic_pm_state.active? */
|
2008-01-30 12:32:35 +00:00
|
|
|
|
2008-01-30 12:30:20 +00:00
|
|
|
error = sysdev_class_register(&lapic_sysclass);
|
|
|
|
if (!error)
|
|
|
|
error = sysdev_register(&device_lapic);
|
|
|
|
return error;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2008-01-30 12:30:20 +00:00
|
|
|
device_initcall(init_lapic_sysfs);
|
|
|
|
|
|
|
|
#else /* CONFIG_PM */
|
|
|
|
|
|
|
|
static void apic_pm_activate(void) { }
|
|
|
|
|
|
|
|
#endif /* CONFIG_PM */
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-08-24 09:01:49 +00:00
|
|
|
#ifdef CONFIG_X86_64
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
2006-06-26 11:58:23 +00:00
|
|
|
* apic_is_clustered_box() -- Check if we can expect good TSC
|
2005-04-16 22:20:36 +00:00
|
|
|
*
|
|
|
|
* Thus far, the major user of this is IBM's Summit2 series:
|
|
|
|
*
|
2006-02-28 04:41:56 +00:00
|
|
|
* Clustered boxes may have unsynced TSC problems if they are
|
2005-04-16 22:20:36 +00:00
|
|
|
* multi-chassis. Use available data to take a good guess.
|
|
|
|
* If in doubt, go HPET.
|
|
|
|
*/
|
2006-06-26 11:58:23 +00:00
|
|
|
__cpuinit int apic_is_clustered_box(void)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
int i, clusters, zeros;
|
|
|
|
unsigned id;
|
2008-02-24 05:48:42 +00:00
|
|
|
u16 *bios_cpu_apicid;
|
2005-04-16 22:20:36 +00:00
|
|
|
DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
|
|
|
|
|
2008-02-24 05:48:42 +00:00
|
|
|
/*
|
|
|
|
* there is not this kind of box with AMD CPU yet.
|
|
|
|
* Some AMD box with quadcore cpu and 8 sockets apicid
|
|
|
|
* will be [4, 0x23] or [8, 0x27] could be thought to
|
2008-02-25 05:36:28 +00:00
|
|
|
* vsmp box still need checking...
|
2008-02-24 05:48:42 +00:00
|
|
|
*/
|
2008-03-20 07:45:08 +00:00
|
|
|
if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && !is_vsmp_box())
|
2008-02-24 05:48:42 +00:00
|
|
|
return 0;
|
|
|
|
|
x86: cleanup early per cpu variables/accesses v4
* Introduce a new PER_CPU macro called "EARLY_PER_CPU". This is
used by some per_cpu variables that are initialized and accessed
before there are per_cpu areas allocated.
["Early" in respect to per_cpu variables is "earlier than the per_cpu
areas have been setup".]
This patchset adds these new macros:
DEFINE_EARLY_PER_CPU(_type, _name, _initvalue)
EXPORT_EARLY_PER_CPU_SYMBOL(_name)
DECLARE_EARLY_PER_CPU(_type, _name)
early_per_cpu_ptr(_name)
early_per_cpu_map(_name, _idx)
early_per_cpu(_name, _cpu)
The DEFINE macro defines the per_cpu variable as well as the early
map and pointer. It also initializes the per_cpu variable and map
elements to "_initvalue". The early_* macros provide access to
the initial map (usually setup during system init) and the early
pointer. This pointer is initialized to point to the early map
but is then NULL'ed when the actual per_cpu areas are setup. After
that the per_cpu variable is the correct access to the variable.
The early_per_cpu() macro is not very efficient but does show how to
access the variable if you have a function that can be called both
"early" and "late". It tests the early ptr to be NULL, and if not
then it's still valid. Otherwise, the per_cpu variable is used
instead:
#define early_per_cpu(_name, _cpu) \
(early_per_cpu_ptr(_name) ? \
early_per_cpu_ptr(_name)[_cpu] : \
per_cpu(_name, _cpu))
A better method is to actually check the pointer manually. In the
case below, numa_set_node can be called both "early" and "late":
void __cpuinit numa_set_node(int cpu, int node)
{
int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
if (cpu_to_node_map)
cpu_to_node_map[cpu] = node;
else
per_cpu(x86_cpu_to_node_map, cpu) = node;
}
* Add a flag "arch_provides_topology_pointers" that indicates pointers
to topology cpumask_t maps are available. Otherwise, use the function
returning the cpumask_t value. This is useful if cpumask_t set size
is very large to avoid copying data on to/off of the stack.
* The coverage of CONFIG_DEBUG_PER_CPU_MAPS has been increased while
the non-debug case has been optimized a bit.
* Remove an unreferenced compiler warning in drivers/base/topology.c
* Clean up #ifdef in setup.c
For inclusion into sched-devel/latest tree.
Based on:
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git
+ sched-devel/latest .../mingo/linux-2.6-sched-devel.git
Signed-off-by: Mike Travis <travis@sgi.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2008-05-12 19:21:12 +00:00
|
|
|
bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
|
2005-05-17 04:53:32 +00:00
|
|
|
bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
for (i = 0; i < NR_CPUS; i++) {
|
2008-01-30 12:33:12 +00:00
|
|
|
/* are we being called early in kernel startup? */
|
2008-01-30 12:33:14 +00:00
|
|
|
if (bios_cpu_apicid) {
|
|
|
|
id = bios_cpu_apicid[i];
|
2008-01-30 12:33:12 +00:00
|
|
|
}
|
|
|
|
else if (i < nr_cpu_ids) {
|
|
|
|
if (cpu_present(i))
|
|
|
|
id = per_cpu(x86_bios_cpu_apicid, i);
|
|
|
|
else
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
break;
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
if (id != BAD_APICID)
|
|
|
|
__set_bit(APIC_CLUSTERID(id), clustermap);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Problem: Partially populated chassis may not have CPUs in some of
|
|
|
|
* the APIC clusters they have been allocated. Only present CPUs have
|
2008-01-30 12:33:21 +00:00
|
|
|
* x86_bios_cpu_apicid entries, thus causing zeroes in the bitmap.
|
|
|
|
* Since clusters are allocated sequentially, count zeros only if
|
|
|
|
* they are bounded by ones.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
|
|
|
clusters = 0;
|
|
|
|
zeros = 0;
|
|
|
|
for (i = 0; i < NUM_APIC_CLUSTERS; i++) {
|
|
|
|
if (test_bit(i, clustermap)) {
|
|
|
|
clusters += 1 + zeros;
|
|
|
|
zeros = 0;
|
|
|
|
} else
|
|
|
|
++zeros;
|
|
|
|
}
|
|
|
|
|
2008-03-20 07:45:08 +00:00
|
|
|
/* ScaleMP vSMPowered boxes have one cluster per board and TSCs are
|
|
|
|
* not guaranteed to be synced between boards
|
|
|
|
*/
|
|
|
|
if (is_vsmp_box() && clusters > 1)
|
|
|
|
return 1;
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
2006-06-26 11:58:23 +00:00
|
|
|
* If clusters > 2, then should be multi-chassis.
|
2005-04-16 22:20:36 +00:00
|
|
|
* May have to revisit this when multi-core + hyperthreaded CPUs come
|
|
|
|
* out, but AFAIK this will work even for them.
|
|
|
|
*/
|
|
|
|
return (clusters > 2);
|
|
|
|
}
|
2008-08-24 09:01:49 +00:00
|
|
|
#endif
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
2008-01-30 12:30:20 +00:00
|
|
|
* APIC command line parameters
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2008-08-18 16:46:01 +00:00
|
|
|
static int __init setup_disableapic(char *arg)
|
2007-07-21 15:10:17 +00:00
|
|
|
{
|
2005-04-16 22:20:36 +00:00
|
|
|
disable_apic = 1;
|
2008-07-21 08:38:14 +00:00
|
|
|
setup_clear_cpu_cap(X86_FEATURE_APIC);
|
2006-09-26 08:52:32 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
early_param("disableapic", setup_disableapic);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-09-26 08:52:32 +00:00
|
|
|
/* same as disableapic, for compatibility */
|
2008-08-18 16:46:01 +00:00
|
|
|
static int __init setup_nolapic(char *arg)
|
2007-07-21 15:10:17 +00:00
|
|
|
{
|
2008-08-18 16:46:01 +00:00
|
|
|
return setup_disableapic(arg);
|
2007-07-21 15:10:17 +00:00
|
|
|
}
|
2006-09-26 08:52:32 +00:00
|
|
|
early_param("nolapic", setup_nolapic);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-03-23 18:32:31 +00:00
|
|
|
static int __init parse_lapic_timer_c2_ok(char *arg)
|
|
|
|
{
|
|
|
|
local_apic_timer_c2_ok = 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
early_param("lapic_timer_c2_ok", parse_lapic_timer_c2_ok);
|
|
|
|
|
2008-08-15 11:51:20 +00:00
|
|
|
static int __init parse_disable_apic_timer(char *arg)
|
2007-07-21 15:10:17 +00:00
|
|
|
{
|
2005-04-16 22:20:36 +00:00
|
|
|
disable_apic_timer = 1;
|
2008-08-15 11:51:20 +00:00
|
|
|
return 0;
|
2007-07-21 15:10:17 +00:00
|
|
|
}
|
2008-08-15 11:51:20 +00:00
|
|
|
early_param("noapictimer", parse_disable_apic_timer);
|
|
|
|
|
|
|
|
static int __init parse_nolapic_timer(char *arg)
|
|
|
|
{
|
|
|
|
disable_apic_timer = 1;
|
|
|
|
return 0;
|
2007-07-21 15:10:17 +00:00
|
|
|
}
|
2008-08-15 11:51:20 +00:00
|
|
|
early_param("nolapic_timer", parse_nolapic_timer);
|
2006-02-03 20:50:50 +00:00
|
|
|
|
2008-08-18 16:46:00 +00:00
|
|
|
static int __init apic_set_verbosity(char *arg)
|
|
|
|
{
|
|
|
|
if (!arg) {
|
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
skip_ioapic_setup = 0;
|
|
|
|
return 0;
|
|
|
|
#endif
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (strcmp("debug", arg) == 0)
|
|
|
|
apic_verbosity = APIC_DEBUG;
|
|
|
|
else if (strcmp("verbose", arg) == 0)
|
|
|
|
apic_verbosity = APIC_VERBOSE;
|
|
|
|
else {
|
|
|
|
printk(KERN_WARNING "APIC Verbosity level %s not recognised"
|
|
|
|
" use apic=verbose or apic=debug\n", arg);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
early_param("apic", apic_set_verbosity);
|
|
|
|
|
2008-02-22 21:37:26 +00:00
|
|
|
static int __init lapic_insert_resource(void)
|
|
|
|
{
|
|
|
|
if (!apic_phys)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
/* Put local APIC into the resource map. */
|
|
|
|
lapic_resource.start = apic_phys;
|
|
|
|
lapic_resource.end = lapic_resource.start + PAGE_SIZE - 1;
|
|
|
|
insert_resource(&iomem_resource, &lapic_resource);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* need call insert after e820_reserve_resources()
|
|
|
|
* that is using request_resource
|
|
|
|
*/
|
|
|
|
late_initcall(lapic_insert_resource);
|