forked from Minki/linux
hotplug: Prevent alloc/free of irq descriptors during cpu up/down
When a cpu goes up some architectures (e.g. x86) have to walk the irq space to set up the vector space for the cpu. While this needs extra protection at the architecture level we can avoid a few race conditions by preventing the concurrent allocation/free of irq descriptors and the associated data. When a cpu goes down it moves the interrupts which are targeted to this cpu away by reassigning the affinities. While this happens interrupts can be allocated and freed, which opens a can of race conditions in the code which reassignes the affinities because interrupt descriptors might be freed underneath. Example: CPU1 CPU2 cpu_up/down irq_desc = irq_to_desc(irq); remove_from_radix_tree(desc); raw_spin_lock(&desc->lock); free(desc); We could protect the irq descriptors with RCU, but that would require a full tree change of all accesses to interrupt descriptors. But fortunately these kind of race conditions are rather limited to a few things like cpu hotplug. The normal setup/teardown is very well serialized. So the simpler and obvious solution is: Prevent allocation and freeing of interrupt descriptors accross cpu hotplug. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Peter Zijlstra <peterz@infradead.org> Cc: xiao jin <jin.xiao@intel.com> Cc: Joerg Roedel <jroedel@suse.de> Cc: Borislav Petkov <bp@suse.de> Cc: Yanmin Zhang <yanmin_zhang@linux.intel.com> Link: http://lkml.kernel.org/r/20150705171102.063519515@linutronix.de
This commit is contained in:
parent
d770e558e2
commit
a899418167
@ -87,7 +87,12 @@ struct irq_desc {
|
|||||||
const char *name;
|
const char *name;
|
||||||
} ____cacheline_internodealigned_in_smp;
|
} ____cacheline_internodealigned_in_smp;
|
||||||
|
|
||||||
#ifndef CONFIG_SPARSE_IRQ
|
#ifdef CONFIG_SPARSE_IRQ
|
||||||
|
extern void irq_lock_sparse(void);
|
||||||
|
extern void irq_unlock_sparse(void);
|
||||||
|
#else
|
||||||
|
static inline void irq_lock_sparse(void) { }
|
||||||
|
static inline void irq_unlock_sparse(void) { }
|
||||||
extern struct irq_desc irq_desc[NR_IRQS];
|
extern struct irq_desc irq_desc[NR_IRQS];
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
22
kernel/cpu.c
22
kernel/cpu.c
@ -21,6 +21,7 @@
|
|||||||
#include <linux/suspend.h>
|
#include <linux/suspend.h>
|
||||||
#include <linux/lockdep.h>
|
#include <linux/lockdep.h>
|
||||||
#include <linux/tick.h>
|
#include <linux/tick.h>
|
||||||
|
#include <linux/irq.h>
|
||||||
#include <trace/events/power.h>
|
#include <trace/events/power.h>
|
||||||
|
|
||||||
#include "smpboot.h"
|
#include "smpboot.h"
|
||||||
@ -391,14 +392,20 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
|
|||||||
|
|
||||||
smpboot_park_threads(cpu);
|
smpboot_park_threads(cpu);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Prevent irq alloc/free while the dying cpu reorganizes the
|
||||||
|
* interrupt affinities.
|
||||||
|
*/
|
||||||
|
irq_lock_sparse();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* So now all preempt/rcu users must observe !cpu_active().
|
* So now all preempt/rcu users must observe !cpu_active().
|
||||||
*/
|
*/
|
||||||
|
|
||||||
err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
|
err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
|
||||||
if (err) {
|
if (err) {
|
||||||
/* CPU didn't die: tell everyone. Can't complain. */
|
/* CPU didn't die: tell everyone. Can't complain. */
|
||||||
cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
|
cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
|
||||||
|
irq_unlock_sparse();
|
||||||
goto out_release;
|
goto out_release;
|
||||||
}
|
}
|
||||||
BUG_ON(cpu_online(cpu));
|
BUG_ON(cpu_online(cpu));
|
||||||
@ -415,6 +422,9 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
|
|||||||
smp_mb(); /* Read from cpu_dead_idle before __cpu_die(). */
|
smp_mb(); /* Read from cpu_dead_idle before __cpu_die(). */
|
||||||
per_cpu(cpu_dead_idle, cpu) = false;
|
per_cpu(cpu_dead_idle, cpu) = false;
|
||||||
|
|
||||||
|
/* Interrupts are moved away from the dying cpu, reenable alloc/free */
|
||||||
|
irq_unlock_sparse();
|
||||||
|
|
||||||
hotplug_cpu__broadcast_tick_pull(cpu);
|
hotplug_cpu__broadcast_tick_pull(cpu);
|
||||||
/* This actually kills the CPU. */
|
/* This actually kills the CPU. */
|
||||||
__cpu_die(cpu);
|
__cpu_die(cpu);
|
||||||
@ -517,8 +527,18 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen)
|
|||||||
goto out_notify;
|
goto out_notify;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Some architectures have to walk the irq descriptors to
|
||||||
|
* setup the vector space for the cpu which comes online.
|
||||||
|
* Prevent irq alloc/free across the bringup.
|
||||||
|
*/
|
||||||
|
irq_lock_sparse();
|
||||||
|
|
||||||
/* Arch-specific enabling code. */
|
/* Arch-specific enabling code. */
|
||||||
ret = __cpu_up(cpu, idle);
|
ret = __cpu_up(cpu, idle);
|
||||||
|
|
||||||
|
irq_unlock_sparse();
|
||||||
|
|
||||||
if (ret != 0)
|
if (ret != 0)
|
||||||
goto out_notify;
|
goto out_notify;
|
||||||
BUG_ON(!cpu_online(cpu));
|
BUG_ON(!cpu_online(cpu));
|
||||||
|
@ -76,12 +76,8 @@ extern void unmask_threaded_irq(struct irq_desc *desc);
|
|||||||
|
|
||||||
#ifdef CONFIG_SPARSE_IRQ
|
#ifdef CONFIG_SPARSE_IRQ
|
||||||
static inline void irq_mark_irq(unsigned int irq) { }
|
static inline void irq_mark_irq(unsigned int irq) { }
|
||||||
extern void irq_lock_sparse(void);
|
|
||||||
extern void irq_unlock_sparse(void);
|
|
||||||
#else
|
#else
|
||||||
extern void irq_mark_irq(unsigned int irq);
|
extern void irq_mark_irq(unsigned int irq);
|
||||||
static inline void irq_lock_sparse(void) { }
|
|
||||||
static inline void irq_unlock_sparse(void) { }
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);
|
extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);
|
||||||
|
Loading…
Reference in New Issue
Block a user