forked from Minki/linux
a1aab6f3d2
Pull x86 asm updates from Ingo Molnar: "Most of the changes relate to Peter Zijlstra's cleanup of ptregs handling, in particular the i386 part is now much simplified and standardized - no more partial ptregs stack frames via the esp/ss oddity. This simplifies ftrace, kprobes, the unwinder, ptrace, kdump and kgdb. There's also a CR4 hardening enhancements by Kees Cook, to make the generic platform functions such as native_write_cr4() less useful as ROP gadgets that disable SMEP/SMAP. Also protect the WP bit of CR0 against similar attacks. The rest is smaller cleanups/fixes" * 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/alternatives: Add int3_emulate_call() selftest x86/stackframe/32: Allow int3_emulate_push() x86/stackframe/32: Provide consistent pt_regs x86/stackframe, x86/ftrace: Add pt_regs frame annotations x86/stackframe, x86/kprobes: Fix frame pointer annotations x86/stackframe: Move ENCODE_FRAME_POINTER to asm/frame.h x86/entry/32: Clean up return from interrupt preemption path x86/asm: Pin sensitive CR0 bits x86/asm: Pin sensitive CR4 bits Documentation/x86: Fix path to entry_32.S x86/asm: Remove unused TASK_TI_flags from asm-offsets.c
132 lines
3.0 KiB
C
132 lines
3.0 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* Copyright (c) 1991,1992,1995 Linus Torvalds
|
|
* Copyright (c) 1994 Alan Modra
|
|
* Copyright (c) 1995 Markus Kuhn
|
|
* Copyright (c) 1996 Ingo Molnar
|
|
* Copyright (c) 1998 Andrea Arcangeli
|
|
* Copyright (c) 2002,2006 Vojtech Pavlik
|
|
* Copyright (c) 2003 Andi Kleen
|
|
*
|
|
*/
|
|
|
|
#include <linux/clocksource.h>
|
|
#include <linux/clockchips.h>
|
|
#include <linux/interrupt.h>
|
|
#include <linux/irq.h>
|
|
#include <linux/i8253.h>
|
|
#include <linux/time.h>
|
|
#include <linux/export.h>
|
|
|
|
#include <asm/vsyscall.h>
|
|
#include <asm/x86_init.h>
|
|
#include <asm/i8259.h>
|
|
#include <asm/timer.h>
|
|
#include <asm/hpet.h>
|
|
#include <asm/time.h>
|
|
|
|
#ifdef CONFIG_X86_64
|
|
__visible volatile unsigned long jiffies __cacheline_aligned_in_smp = INITIAL_JIFFIES;
|
|
#endif
|
|
|
|
unsigned long profile_pc(struct pt_regs *regs)
|
|
{
|
|
unsigned long pc = instruction_pointer(regs);
|
|
|
|
if (!user_mode(regs) && in_lock_functions(pc)) {
|
|
#ifdef CONFIG_FRAME_POINTER
|
|
return *(unsigned long *)(regs->bp + sizeof(long));
|
|
#else
|
|
unsigned long *sp = (unsigned long *)regs->sp;
|
|
/*
|
|
* Return address is either directly at stack pointer
|
|
* or above a saved flags. Eflags has bits 22-31 zero,
|
|
* kernel addresses don't.
|
|
*/
|
|
if (sp[0] >> 22)
|
|
return sp[0];
|
|
if (sp[1] >> 22)
|
|
return sp[1];
|
|
#endif
|
|
}
|
|
return pc;
|
|
}
|
|
EXPORT_SYMBOL(profile_pc);
|
|
|
|
/*
|
|
* Default timer interrupt handler for PIT/HPET
|
|
*/
|
|
static irqreturn_t timer_interrupt(int irq, void *dev_id)
|
|
{
|
|
global_clock_event->event_handler(global_clock_event);
|
|
return IRQ_HANDLED;
|
|
}
|
|
|
|
static struct irqaction irq0 = {
|
|
.handler = timer_interrupt,
|
|
.flags = IRQF_NOBALANCING | IRQF_IRQPOLL | IRQF_TIMER,
|
|
.name = "timer"
|
|
};
|
|
|
|
static void __init setup_default_timer_irq(void)
|
|
{
|
|
/*
|
|
* Unconditionally register the legacy timer; even without legacy
|
|
* PIC/PIT we need this for the HPET0 in legacy replacement mode.
|
|
*/
|
|
if (setup_irq(0, &irq0))
|
|
pr_info("Failed to register legacy timer interrupt\n");
|
|
}
|
|
|
|
/* Default timer init function */
|
|
void __init hpet_time_init(void)
|
|
{
|
|
if (!hpet_enable()) {
|
|
if (!pit_timer_init())
|
|
return;
|
|
}
|
|
|
|
setup_default_timer_irq();
|
|
}
|
|
|
|
static __init void x86_late_time_init(void)
|
|
{
|
|
x86_init.timers.timer_init();
|
|
/*
|
|
* After PIT/HPET timers init, select and setup
|
|
* the final interrupt mode for delivering IRQs.
|
|
*/
|
|
x86_init.irqs.intr_mode_init();
|
|
tsc_init();
|
|
}
|
|
|
|
/*
|
|
* Initialize TSC and delay the periodic timer init to
|
|
* late x86_late_time_init() so ioremap works.
|
|
*/
|
|
void __init time_init(void)
|
|
{
|
|
late_time_init = x86_late_time_init;
|
|
}
|
|
|
|
/*
|
|
* Sanity check the vdso related archdata content.
|
|
*/
|
|
void clocksource_arch_init(struct clocksource *cs)
|
|
{
|
|
if (cs->archdata.vclock_mode == VCLOCK_NONE)
|
|
return;
|
|
|
|
if (cs->archdata.vclock_mode > VCLOCK_MAX) {
|
|
pr_warn("clocksource %s registered with invalid vclock_mode %d. Disabling vclock.\n",
|
|
cs->name, cs->archdata.vclock_mode);
|
|
cs->archdata.vclock_mode = VCLOCK_NONE;
|
|
}
|
|
|
|
if (cs->mask != CLOCKSOURCE_MASK(64)) {
|
|
pr_warn("clocksource %s registered with invalid mask %016llx. Disabling vclock.\n",
|
|
cs->name, cs->mask);
|
|
cs->archdata.vclock_mode = VCLOCK_NONE;
|
|
}
|
|
}
|