mirror of
https://github.com/torvalds/linux.git
synced 2024-12-05 18:41:23 +00:00
bb7a78e343
Under CONFIG_DEBUG_ATOMIC_SLEEP=y and CONFIG_DEBUG_PREEMPT=y, we can see the following messages on LoongArch, this is because using might_sleep() in preemption disable context. [ 0.001127] smp: Bringing up secondary CPUs ... [ 0.001222] Booting CPU#1... [ 0.001244] 64-bit Loongson Processor probed (LA464 Core) [ 0.001247] CPU1 revision is: 0014c012 (Loongson-64bit) [ 0.001250] FPU1 revision is: 00000000 [ 0.001252] BUG: sleeping function called from invalid context at kernel/locking/mutex.c:283 [ 0.001255] in_atomic(): 1, irqs_disabled(): 1, non_block: 0, pid: 0, name: swapper/1 [ 0.001257] preempt_count: 1, expected: 0 [ 0.001258] RCU nest depth: 0, expected: 0 [ 0.001259] Preemption disabled at: [ 0.001261] [<9000000000223800>] arch_dup_task_struct+0x20/0x110 [ 0.001272] CPU: 1 PID: 0 Comm: swapper/1 Not tainted 6.2.0-rc7+ #43 [ 0.001275] Hardware name: Loongson Loongson-3A5000-7A1000-1w-A2101/Loongson-LS3A5000-7A1000-1w-A2101, BIOS vUDK2018-LoongArch-V4.0.05132-beta10 12/13/202 [ 0.001277] Stack : 0072617764726148 0000000000000000 9000000000222f1c 90000001001e0000 [ 0.001286] 90000001001e3be0 90000001001e3be8 0000000000000000 0000000000000000 [ 0.001292] 90000001001e3be8 0000000000000040 90000001001e3cb8 90000001001e3a50 [ 0.001297] 9000000001642000 90000001001e3be8 be694d10ce4139dd 9000000100174500 [ 0.001303] 0000000000000001 0000000000000001 00000000ffffe0a2 0000000000000020 [ 0.001309] 000000000000002f 9000000001354116 00000000056b0000 ffffffffffffffff [ 0.001314] 0000000000000000 0000000000000000 90000000014f6e90 9000000001642000 [ 0.001320] 900000000022b69c 0000000000000001 0000000000000000 9000000001736a90 [ 0.001325] 9000000100038000 0000000000000000 9000000000222f34 0000000000000000 [ 0.001331] 00000000000000b0 0000000000000004 0000000000000000 0000000000070000 [ 0.001337] ... [ 0.001339] Call Trace: [ 0.001342] [<9000000000222f34>] show_stack+0x5c/0x180 [ 0.001346] [<90000000010bdd80>] dump_stack_lvl+0x60/0x88 [ 0.001352] [<9000000000266418>] __might_resched+0x180/0x1cc [ 0.001356] [<90000000010c742c>] mutex_lock+0x20/0x64 [ 0.001359] [<90000000002a8ccc>] irq_find_matching_fwspec+0x48/0x124 [ 0.001364] [<90000000002259c4>] constant_clockevent_init+0x68/0x204 [ 0.001368] [<900000000022acf4>] start_secondary+0x40/0xa8 [ 0.001371] [<90000000010c0124>] smpboot_entry+0x60/0x64 Here are the complete call chains: smpboot_entry() start_secondary() constant_clockevent_init() get_timer_irq() irq_find_matching_fwnode() irq_find_matching_fwspec() mutex_lock() might_sleep() __might_sleep() __might_resched() In order to avoid the above issue, we should break the call chains, using timer_irq_installed variable as check condition to only call get_timer_irq() once in constant_clockevent_init() is a simple and proper way. Signed-off-by: Tiezhu Yang <yangtiezhu@loongson.cn> Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
233 lines
5.1 KiB
C
233 lines
5.1 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* Common time service routines for LoongArch machines.
|
|
*
|
|
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
|
|
*/
|
|
#include <linux/clockchips.h>
|
|
#include <linux/delay.h>
|
|
#include <linux/export.h>
|
|
#include <linux/init.h>
|
|
#include <linux/interrupt.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/sched_clock.h>
|
|
#include <linux/spinlock.h>
|
|
|
|
#include <asm/cpu-features.h>
|
|
#include <asm/loongarch.h>
|
|
#include <asm/time.h>
|
|
|
|
u64 cpu_clock_freq;
|
|
EXPORT_SYMBOL(cpu_clock_freq);
|
|
u64 const_clock_freq;
|
|
EXPORT_SYMBOL(const_clock_freq);
|
|
|
|
static DEFINE_RAW_SPINLOCK(state_lock);
|
|
static DEFINE_PER_CPU(struct clock_event_device, constant_clockevent_device);
|
|
|
|
static void constant_event_handler(struct clock_event_device *dev)
|
|
{
|
|
}
|
|
|
|
irqreturn_t constant_timer_interrupt(int irq, void *data)
|
|
{
|
|
int cpu = smp_processor_id();
|
|
struct clock_event_device *cd;
|
|
|
|
/* Clear Timer Interrupt */
|
|
write_csr_tintclear(CSR_TINTCLR_TI);
|
|
cd = &per_cpu(constant_clockevent_device, cpu);
|
|
cd->event_handler(cd);
|
|
|
|
return IRQ_HANDLED;
|
|
}
|
|
|
|
static int constant_set_state_oneshot(struct clock_event_device *evt)
|
|
{
|
|
unsigned long timer_config;
|
|
|
|
raw_spin_lock(&state_lock);
|
|
|
|
timer_config = csr_read64(LOONGARCH_CSR_TCFG);
|
|
timer_config |= CSR_TCFG_EN;
|
|
timer_config &= ~CSR_TCFG_PERIOD;
|
|
csr_write64(timer_config, LOONGARCH_CSR_TCFG);
|
|
|
|
raw_spin_unlock(&state_lock);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int constant_set_state_oneshot_stopped(struct clock_event_device *evt)
|
|
{
|
|
unsigned long timer_config;
|
|
|
|
raw_spin_lock(&state_lock);
|
|
|
|
timer_config = csr_read64(LOONGARCH_CSR_TCFG);
|
|
timer_config &= ~CSR_TCFG_EN;
|
|
csr_write64(timer_config, LOONGARCH_CSR_TCFG);
|
|
|
|
raw_spin_unlock(&state_lock);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int constant_set_state_periodic(struct clock_event_device *evt)
|
|
{
|
|
unsigned long period;
|
|
unsigned long timer_config;
|
|
|
|
raw_spin_lock(&state_lock);
|
|
|
|
period = const_clock_freq / HZ;
|
|
timer_config = period & CSR_TCFG_VAL;
|
|
timer_config |= (CSR_TCFG_PERIOD | CSR_TCFG_EN);
|
|
csr_write64(timer_config, LOONGARCH_CSR_TCFG);
|
|
|
|
raw_spin_unlock(&state_lock);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int constant_set_state_shutdown(struct clock_event_device *evt)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static int constant_timer_next_event(unsigned long delta, struct clock_event_device *evt)
|
|
{
|
|
unsigned long timer_config;
|
|
|
|
delta &= CSR_TCFG_VAL;
|
|
timer_config = delta | CSR_TCFG_EN;
|
|
csr_write64(timer_config, LOONGARCH_CSR_TCFG);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static unsigned long __init get_loops_per_jiffy(void)
|
|
{
|
|
unsigned long lpj = (unsigned long)const_clock_freq;
|
|
|
|
do_div(lpj, HZ);
|
|
|
|
return lpj;
|
|
}
|
|
|
|
static long init_offset __nosavedata;
|
|
|
|
void save_counter(void)
|
|
{
|
|
init_offset = drdtime();
|
|
}
|
|
|
|
void sync_counter(void)
|
|
{
|
|
/* Ensure counter begin at 0 */
|
|
csr_write64(init_offset, LOONGARCH_CSR_CNTC);
|
|
}
|
|
|
|
static int get_timer_irq(void)
|
|
{
|
|
struct irq_domain *d = irq_find_matching_fwnode(cpuintc_handle, DOMAIN_BUS_ANY);
|
|
|
|
if (d)
|
|
return irq_create_mapping(d, EXCCODE_TIMER - EXCCODE_INT_START);
|
|
|
|
return -EINVAL;
|
|
}
|
|
|
|
int constant_clockevent_init(void)
|
|
{
|
|
unsigned int cpu = smp_processor_id();
|
|
unsigned long min_delta = 0x600;
|
|
unsigned long max_delta = (1UL << 48) - 1;
|
|
struct clock_event_device *cd;
|
|
static int irq = 0, timer_irq_installed = 0;
|
|
|
|
if (!timer_irq_installed) {
|
|
irq = get_timer_irq();
|
|
if (irq < 0)
|
|
pr_err("Failed to map irq %d (timer)\n", irq);
|
|
}
|
|
|
|
cd = &per_cpu(constant_clockevent_device, cpu);
|
|
|
|
cd->name = "Constant";
|
|
cd->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_PERCPU;
|
|
|
|
cd->irq = irq;
|
|
cd->rating = 320;
|
|
cd->cpumask = cpumask_of(cpu);
|
|
cd->set_state_oneshot = constant_set_state_oneshot;
|
|
cd->set_state_oneshot_stopped = constant_set_state_oneshot_stopped;
|
|
cd->set_state_periodic = constant_set_state_periodic;
|
|
cd->set_state_shutdown = constant_set_state_shutdown;
|
|
cd->set_next_event = constant_timer_next_event;
|
|
cd->event_handler = constant_event_handler;
|
|
|
|
clockevents_config_and_register(cd, const_clock_freq, min_delta, max_delta);
|
|
|
|
if (timer_irq_installed)
|
|
return 0;
|
|
|
|
timer_irq_installed = 1;
|
|
|
|
sync_counter();
|
|
|
|
if (request_irq(irq, constant_timer_interrupt, IRQF_PERCPU | IRQF_TIMER, "timer", NULL))
|
|
pr_err("Failed to request irq %d (timer)\n", irq);
|
|
|
|
lpj_fine = get_loops_per_jiffy();
|
|
pr_info("Constant clock event device register\n");
|
|
|
|
return 0;
|
|
}
|
|
|
|
static u64 read_const_counter(struct clocksource *clk)
|
|
{
|
|
return drdtime();
|
|
}
|
|
|
|
static u64 native_sched_clock(void)
|
|
{
|
|
return read_const_counter(NULL);
|
|
}
|
|
|
|
static struct clocksource clocksource_const = {
|
|
.name = "Constant",
|
|
.rating = 400,
|
|
.read = read_const_counter,
|
|
.mask = CLOCKSOURCE_MASK(64),
|
|
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
|
|
.vdso_clock_mode = VDSO_CLOCKMODE_CPU,
|
|
};
|
|
|
|
int __init constant_clocksource_init(void)
|
|
{
|
|
int res;
|
|
unsigned long freq = const_clock_freq;
|
|
|
|
res = clocksource_register_hz(&clocksource_const, freq);
|
|
|
|
sched_clock_register(native_sched_clock, 64, freq);
|
|
|
|
pr_info("Constant clock source device register\n");
|
|
|
|
return res;
|
|
}
|
|
|
|
void __init time_init(void)
|
|
{
|
|
if (!cpu_has_cpucfg)
|
|
const_clock_freq = cpu_clock_freq;
|
|
else
|
|
const_clock_freq = calc_const_freq();
|
|
|
|
init_offset = -(drdtime() - csr_read64(LOONGARCH_CSR_CNTC));
|
|
|
|
constant_clockevent_init();
|
|
constant_clocksource_init();
|
|
}
|