Merge branch 'nohz/printk-v8' into irq/core
Conflicts: kernel/irq_work.c Add support for printk in full dynticks CPU. * Don't stop tick with irq works pending. This fix is generally useful and concerns archs that can't raise self IPIs. * Flush irq works before CPU offlining. * Introduce "lazy" irq works that can wait for the next tick to be executed, unless it's stopped. * Implement klogd wake up using irq work. This removes the ad-hoc printk_tick()/printk_needs_cpu() hooks and make it working even in dynticks mode. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
This commit is contained in:
@@ -12,22 +12,15 @@
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/irqflags.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/tick.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <asm/processor.h>
|
||||
|
||||
/*
|
||||
* An entry can be in one of four states:
|
||||
*
|
||||
* free NULL, 0 -> {claimed} : free to be used
|
||||
* claimed NULL, 3 -> {pending} : claimed to be enqueued
|
||||
* pending next, 3 -> {busy} : queued, pending callback
|
||||
* busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed
|
||||
*/
|
||||
|
||||
#define IRQ_WORK_PENDING 1UL
|
||||
#define IRQ_WORK_BUSY 2UL
|
||||
#define IRQ_WORK_FLAGS 3UL
|
||||
|
||||
static DEFINE_PER_CPU(struct llist_head, irq_work_list);
|
||||
static DEFINE_PER_CPU(int, irq_work_raised);
|
||||
|
||||
/*
|
||||
* Claim the entry so that no one else will poke at it.
|
||||
@@ -70,8 +63,6 @@ void __weak arch_irq_work_raise(void)
|
||||
*/
|
||||
void irq_work_queue(struct irq_work *work)
|
||||
{
|
||||
bool empty;
|
||||
|
||||
/* Only queue if not already pending */
|
||||
if (!irq_work_claim(work))
|
||||
return;
|
||||
@@ -79,30 +70,55 @@ void irq_work_queue(struct irq_work *work)
|
||||
/* Queue the entry and raise the IPI if needed. */
|
||||
preempt_disable();
|
||||
|
||||
empty = llist_add(&work->llnode, &__get_cpu_var(irq_work_list));
|
||||
/* The list was empty, raise self-interrupt to start processing. */
|
||||
if (empty)
|
||||
arch_irq_work_raise();
|
||||
llist_add(&work->llnode, &__get_cpu_var(irq_work_list));
|
||||
|
||||
/*
|
||||
* If the work is not "lazy" or the tick is stopped, raise the irq
|
||||
* work interrupt (if supported by the arch), otherwise, just wait
|
||||
* for the next tick.
|
||||
*/
|
||||
if (!(work->flags & IRQ_WORK_LAZY) || tick_nohz_tick_stopped()) {
|
||||
if (!this_cpu_cmpxchg(irq_work_raised, 0, 1))
|
||||
arch_irq_work_raise();
|
||||
}
|
||||
|
||||
preempt_enable();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(irq_work_queue);
|
||||
|
||||
/*
|
||||
* Run the irq_work entries on this cpu. Requires to be ran from hardirq
|
||||
* context with local IRQs disabled.
|
||||
*/
|
||||
void irq_work_run(void)
|
||||
bool irq_work_needs_cpu(void)
|
||||
{
|
||||
struct llist_head *this_list;
|
||||
|
||||
this_list = &__get_cpu_var(irq_work_list);
|
||||
if (llist_empty(this_list))
|
||||
return false;
|
||||
|
||||
/* All work should have been flushed before going offline */
|
||||
WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void __irq_work_run(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct irq_work *work;
|
||||
struct llist_head *this_list;
|
||||
struct llist_node *llnode;
|
||||
|
||||
|
||||
/*
|
||||
* Reset the "raised" state right before we check the list because
|
||||
* an NMI may enqueue after we find the list empty from the runner.
|
||||
*/
|
||||
__this_cpu_write(irq_work_raised, 0);
|
||||
barrier();
|
||||
|
||||
this_list = &__get_cpu_var(irq_work_list);
|
||||
if (llist_empty(this_list))
|
||||
return;
|
||||
|
||||
BUG_ON(!in_irq());
|
||||
BUG_ON(!irqs_disabled());
|
||||
|
||||
llnode = llist_del_all(this_list);
|
||||
@@ -118,15 +134,27 @@ void irq_work_run(void)
|
||||
* to claim that work don't rely on us to handle their data
|
||||
* while we are in the middle of the func.
|
||||
*/
|
||||
xchg(&work->flags, IRQ_WORK_BUSY);
|
||||
flags = work->flags & ~IRQ_WORK_PENDING;
|
||||
xchg(&work->flags, flags);
|
||||
|
||||
work->func(work);
|
||||
/*
|
||||
* Clear the BUSY bit and return to the free state if
|
||||
* no-one else claimed it meanwhile.
|
||||
*/
|
||||
(void)cmpxchg(&work->flags, IRQ_WORK_BUSY, 0);
|
||||
(void)cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Run the irq_work entries on this cpu. Requires to be ran from hardirq
|
||||
* context with local IRQs disabled.
|
||||
*/
|
||||
void irq_work_run(void)
|
||||
{
|
||||
BUG_ON(!in_irq());
|
||||
__irq_work_run();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(irq_work_run);
|
||||
|
||||
/*
|
||||
@@ -141,3 +169,35 @@ void irq_work_sync(struct irq_work *work)
|
||||
cpu_relax();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(irq_work_sync);
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
static int irq_work_cpu_notify(struct notifier_block *self,
|
||||
unsigned long action, void *hcpu)
|
||||
{
|
||||
long cpu = (long)hcpu;
|
||||
|
||||
switch (action) {
|
||||
case CPU_DYING:
|
||||
/* Called from stop_machine */
|
||||
if (WARN_ON_ONCE(cpu != smp_processor_id()))
|
||||
break;
|
||||
__irq_work_run();
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static struct notifier_block cpu_notify;
|
||||
|
||||
static __init int irq_work_init_cpu_notifier(void)
|
||||
{
|
||||
cpu_notify.notifier_call = irq_work_cpu_notify;
|
||||
cpu_notify.priority = 0;
|
||||
register_cpu_notifier(&cpu_notify);
|
||||
return 0;
|
||||
}
|
||||
device_initcall(irq_work_init_cpu_notifier);
|
||||
|
||||
#endif /* CONFIG_HOTPLUG_CPU */
|
||||
|
||||
Reference in New Issue
Block a user