mirror of
https://github.com/torvalds/linux.git
synced 2024-11-28 15:11:31 +00:00
Merge branch 'x86-timers-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 timers updates from Thomas Gleixner: "This update contains: - The solution for the TSC deadline timer borkage, which is caused by a hardware problem in the TSC_ADJUST/TSC_DEADLINE_TIMER logic. The problem is documented now and fixed with a microcode update, so we can remove the workaround and just check for the microcode version. If the microcode is not up to date, then the TSC deadline timer is disabled. If the borkage is fixed by the proper microcode version, then the deadline timer can be used. In both cases the restrictions to the range of the TSC_ADJUST value, which were added as workarounds, are removed. - A few simple fixes and updates to the timer related x86 code" * 'x86-timers-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/tsc: Call check_system_tsc_reliable() before unsynchronized_tsc() x86/hpet: Do not use smp_processor_id() in preemptible code x86/time: Make setup_default_timer_irq() static x86/tsc: Remove the TSC_ADJUST clamp x86/apic: Add TSC_DEADLINE quirk due to errata x86/apic: Change the lapic name in deadline mode
This commit is contained in:
commit
3ad918e65d
@ -44,7 +44,6 @@ extern unsigned long saved_video_mode;
|
||||
|
||||
extern void reserve_standard_io_resources(void);
|
||||
extern void i386_reserve_resources(void);
|
||||
extern void setup_default_timer_irq(void);
|
||||
|
||||
#ifdef CONFIG_X86_INTEL_MID
|
||||
extern void x86_intel_mid_early_setup(void);
|
||||
|
@ -54,6 +54,8 @@
|
||||
#include <asm/mce.h>
|
||||
#include <asm/tsc.h>
|
||||
#include <asm/hypervisor.h>
|
||||
#include <asm/cpu_device_id.h>
|
||||
#include <asm/intel-family.h>
|
||||
|
||||
unsigned int num_processors;
|
||||
|
||||
@ -545,6 +547,81 @@ static struct clock_event_device lapic_clockevent = {
|
||||
};
|
||||
static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
|
||||
|
||||
#define DEADLINE_MODEL_MATCH_FUNC(model, func) \
|
||||
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&func }
|
||||
|
||||
#define DEADLINE_MODEL_MATCH_REV(model, rev) \
|
||||
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)rev }
|
||||
|
||||
static u32 hsx_deadline_rev(void)
|
||||
{
|
||||
switch (boot_cpu_data.x86_mask) {
|
||||
case 0x02: return 0x3a; /* EP */
|
||||
case 0x04: return 0x0f; /* EX */
|
||||
}
|
||||
|
||||
return ~0U;
|
||||
}
|
||||
|
||||
static u32 bdx_deadline_rev(void)
|
||||
{
|
||||
switch (boot_cpu_data.x86_mask) {
|
||||
case 0x02: return 0x00000011;
|
||||
case 0x03: return 0x0700000e;
|
||||
case 0x04: return 0x0f00000c;
|
||||
case 0x05: return 0x0e000003;
|
||||
}
|
||||
|
||||
return ~0U;
|
||||
}
|
||||
|
||||
static const struct x86_cpu_id deadline_match[] = {
|
||||
DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_HASWELL_X, hsx_deadline_rev),
|
||||
DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_BROADWELL_X, 0x0b000020),
|
||||
DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_BROADWELL_XEON_D, bdx_deadline_rev),
|
||||
DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_SKYLAKE_X, 0x02000014),
|
||||
|
||||
DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL_CORE, 0x22),
|
||||
DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL_ULT, 0x20),
|
||||
DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL_GT3E, 0x17),
|
||||
|
||||
DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_BROADWELL_CORE, 0x25),
|
||||
DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_BROADWELL_GT3E, 0x17),
|
||||
|
||||
DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_SKYLAKE_MOBILE, 0xb2),
|
||||
DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_SKYLAKE_DESKTOP, 0xb2),
|
||||
|
||||
DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_KABYLAKE_MOBILE, 0x52),
|
||||
DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_KABYLAKE_DESKTOP, 0x52),
|
||||
|
||||
{},
|
||||
};
|
||||
|
||||
static void apic_check_deadline_errata(void)
|
||||
{
|
||||
const struct x86_cpu_id *m = x86_match_cpu(deadline_match);
|
||||
u32 rev;
|
||||
|
||||
if (!m)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Function pointers will have the MSB set due to address layout,
|
||||
* immediate revisions will not.
|
||||
*/
|
||||
if ((long)m->driver_data < 0)
|
||||
rev = ((u32 (*)(void))(m->driver_data))();
|
||||
else
|
||||
rev = (u32)m->driver_data;
|
||||
|
||||
if (boot_cpu_data.microcode >= rev)
|
||||
return;
|
||||
|
||||
setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
|
||||
pr_err(FW_BUG "TSC_DEADLINE disabled due to Errata; "
|
||||
"please update microcode to version: 0x%x (or later)\n", rev);
|
||||
}
|
||||
|
||||
/*
|
||||
* Setup the local APIC timer for this CPU. Copy the initialized values
|
||||
* of the boot CPU and register the clock event in the framework.
|
||||
@ -563,6 +640,7 @@ static void setup_APIC_timer(void)
|
||||
levt->cpumask = cpumask_of(smp_processor_id());
|
||||
|
||||
if (this_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER)) {
|
||||
levt->name = "lapic-deadline";
|
||||
levt->features &= ~(CLOCK_EVT_FEAT_PERIODIC |
|
||||
CLOCK_EVT_FEAT_DUMMY);
|
||||
levt->set_next_event = lapic_next_deadline;
|
||||
@ -1779,6 +1857,8 @@ void __init init_apic_mappings(void)
|
||||
{
|
||||
unsigned int new_apicid;
|
||||
|
||||
apic_check_deadline_errata();
|
||||
|
||||
if (x2apic_mode) {
|
||||
boot_cpu_physical_apicid = read_apic_id();
|
||||
return;
|
||||
|
@ -285,7 +285,7 @@ static void hpet_legacy_clockevent_register(void)
|
||||
* Start hpet with the boot cpu mask and make it
|
||||
* global after the IO_APIC has been initialized.
|
||||
*/
|
||||
hpet_clockevent.cpumask = cpumask_of(smp_processor_id());
|
||||
hpet_clockevent.cpumask = cpumask_of(boot_cpu_data.cpu_index);
|
||||
clockevents_config_and_register(&hpet_clockevent, hpet_freq,
|
||||
HPET_MIN_PROG_DELTA, 0x7FFFFFFF);
|
||||
global_clock_event = &hpet_clockevent;
|
||||
|
@ -66,7 +66,7 @@ static struct irqaction irq0 = {
|
||||
.name = "timer"
|
||||
};
|
||||
|
||||
void __init setup_default_timer_irq(void)
|
||||
static void __init setup_default_timer_irq(void)
|
||||
{
|
||||
if (!nr_legacy_irqs())
|
||||
return;
|
||||
|
@ -1328,11 +1328,11 @@ void __init tsc_init(void)
|
||||
|
||||
use_tsc_delay();
|
||||
|
||||
check_system_tsc_reliable();
|
||||
|
||||
if (unsynchronized_tsc())
|
||||
mark_tsc_unstable("TSCs unsynchronized");
|
||||
|
||||
check_system_tsc_reliable();
|
||||
|
||||
detect_art();
|
||||
}
|
||||
|
||||
|
@ -71,13 +71,8 @@ static void tsc_sanitize_first_cpu(struct tsc_adjust *cur, s64 bootval,
|
||||
* non zero. We don't do that on non boot cpus because physical
|
||||
* hotplug should have set the ADJUST register to a value > 0 so
|
||||
* the TSC is in sync with the already running cpus.
|
||||
*
|
||||
* But we always force positive ADJUST values. Otherwise the TSC
|
||||
* deadline timer creates an interrupt storm. We also have to
|
||||
* prevent values > 0x7FFFFFFF as those wreckage the timer as well.
|
||||
*/
|
||||
if ((bootcpu && bootval != 0) || (!bootcpu && bootval < 0) ||
|
||||
(bootval > 0x7FFFFFFF)) {
|
||||
if (bootcpu && bootval != 0) {
|
||||
pr_warn(FW_BUG "TSC ADJUST: CPU%u: %lld force to 0\n", cpu,
|
||||
bootval);
|
||||
wrmsrl(MSR_IA32_TSC_ADJUST, 0);
|
||||
@ -451,20 +446,6 @@ retry:
|
||||
*/
|
||||
cur->adjusted += cur_max_warp;
|
||||
|
||||
/*
|
||||
* TSC deadline timer stops working or creates an interrupt storm
|
||||
* with adjust values < 0 and > x07ffffff.
|
||||
*
|
||||
* To allow adjust values > 0x7FFFFFFF we need to disable the
|
||||
* deadline timer and use the local APIC timer, but that requires
|
||||
* more intrusive changes and we do not have any useful information
|
||||
* from Intel about the underlying HW wreckage yet.
|
||||
*/
|
||||
if (cur->adjusted < 0)
|
||||
cur->adjusted = 0;
|
||||
if (cur->adjusted > 0x7FFFFFFF)
|
||||
cur->adjusted = 0x7FFFFFFF;
|
||||
|
||||
pr_warn("TSC ADJUST compensate: CPU%u observed %lld warp. Adjust: %lld\n",
|
||||
cpu, cur_max_warp, cur->adjusted);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user