forked from Minki/linux
Merge branch 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: generic-ipi: use per cpu data for single cpu ipi calls cpumask: convert lib/smp_processor_id to new cpumask ops signals, debug: fix BUG: using smp_processor_id() in preemptible code in print_fatal_signal()
This commit is contained in:
commit
1347e965f5
@ -909,7 +909,9 @@ static void print_fatal_signal(struct pt_regs *regs, int signr)
|
||||
}
|
||||
#endif
|
||||
printk("\n");
|
||||
preempt_disable();
|
||||
show_regs(regs);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
static int __init setup_print_fatal_signals(char *str)
|
||||
|
36
kernel/smp.c
36
kernel/smp.c
@ -18,6 +18,7 @@ __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_function_lock);
|
||||
enum {
|
||||
CSD_FLAG_WAIT = 0x01,
|
||||
CSD_FLAG_ALLOC = 0x02,
|
||||
CSD_FLAG_LOCK = 0x04,
|
||||
};
|
||||
|
||||
struct call_function_data {
|
||||
@ -186,6 +187,9 @@ void generic_smp_call_function_single_interrupt(void)
|
||||
if (data_flags & CSD_FLAG_WAIT) {
|
||||
smp_wmb();
|
||||
data->flags &= ~CSD_FLAG_WAIT;
|
||||
} else if (data_flags & CSD_FLAG_LOCK) {
|
||||
smp_wmb();
|
||||
data->flags &= ~CSD_FLAG_LOCK;
|
||||
} else if (data_flags & CSD_FLAG_ALLOC)
|
||||
kfree(data);
|
||||
}
|
||||
@ -196,6 +200,8 @@ void generic_smp_call_function_single_interrupt(void)
|
||||
}
|
||||
}
|
||||
|
||||
static DEFINE_PER_CPU(struct call_single_data, csd_data);
|
||||
|
||||
/*
|
||||
* smp_call_function_single - Run a function on a specific CPU
|
||||
* @func: The function to run. This must be fast and non-blocking.
|
||||
@ -224,14 +230,38 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
|
||||
func(info);
|
||||
local_irq_restore(flags);
|
||||
} else if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) {
|
||||
struct call_single_data *data = NULL;
|
||||
struct call_single_data *data;
|
||||
|
||||
if (!wait) {
|
||||
/*
|
||||
* We are calling a function on a single CPU
|
||||
* and we are not going to wait for it to finish.
|
||||
* We first try to allocate the data, but if we
|
||||
* fail, we fall back to use a per cpu data to pass
|
||||
* the information to that CPU. Since all callers
|
||||
* of this code will use the same data, we must
|
||||
* synchronize the callers to prevent a new caller
|
||||
* from corrupting the data before the callee
|
||||
* can access it.
|
||||
*
|
||||
* The CSD_FLAG_LOCK is used to let us know when
|
||||
* the IPI handler is done with the data.
|
||||
* The first caller will set it, and the callee
|
||||
* will clear it. The next caller must wait for
|
||||
* it to clear before we set it again. This
|
||||
* will make sure the callee is done with the
|
||||
* data before a new caller will use it.
|
||||
*/
|
||||
data = kmalloc(sizeof(*data), GFP_ATOMIC);
|
||||
if (data)
|
||||
data->flags = CSD_FLAG_ALLOC;
|
||||
}
|
||||
if (!data) {
|
||||
else {
|
||||
data = &per_cpu(csd_data, me);
|
||||
while (data->flags & CSD_FLAG_LOCK)
|
||||
cpu_relax();
|
||||
data->flags = CSD_FLAG_LOCK;
|
||||
}
|
||||
} else {
|
||||
data = &d;
|
||||
data->flags = CSD_FLAG_WAIT;
|
||||
}
|
||||
|
@ -22,7 +22,7 @@ notrace unsigned int debug_smp_processor_id(void)
|
||||
* Kernel threads bound to a single CPU can safely use
|
||||
* smp_processor_id():
|
||||
*/
|
||||
if (cpus_equal(current->cpus_allowed, cpumask_of_cpu(this_cpu)))
|
||||
if (cpumask_equal(¤t->cpus_allowed, cpumask_of(this_cpu)))
|
||||
goto out;
|
||||
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user