mirror of
https://github.com/torvalds/linux.git
synced 2024-11-01 17:51:43 +00:00
c46fff2a3b
The name __smp_call_function_single() doesn't tell much about the properties of this function, especially when compared to smp_call_function_single(). The comments above the implementation are also misleading. The main point of this function is actually not to be able to embed the csd in an object. This is actually a requirement that result from the purpose of this function which is to raise an IPI asynchronously. As such it can be called with interrupts disabled. And this feature comes at the cost of the caller who then needs to serialize the IPIs on this csd. Lets rename the function and enhance the comments so that they reflect these properties. Suggested-by: Christoph Hellwig <hch@infradead.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Christoph Hellwig <hch@infradead.org> Cc: Ingo Molnar <mingo@kernel.org> Cc: Jan Kara <jack@suse.cz> Cc: Jens Axboe <axboe@fb.com> Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Signed-off-by: Jens Axboe <axboe@fb.com>
85 lines
1.7 KiB
C
85 lines
1.7 KiB
C
/*
|
|
* Uniprocessor-only support functions. The counterpart to kernel/smp.c
|
|
*/
|
|
|
|
#include <linux/interrupt.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/export.h>
|
|
#include <linux/smp.h>
|
|
|
|
int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
|
|
int wait)
|
|
{
|
|
unsigned long flags;
|
|
|
|
WARN_ON(cpu != 0);
|
|
|
|
local_irq_save(flags);
|
|
func(info);
|
|
local_irq_restore(flags);
|
|
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL(smp_call_function_single);
|
|
|
|
int smp_call_function_single_async(int cpu, struct call_single_data *csd)
|
|
{
|
|
unsigned long flags;
|
|
|
|
local_irq_save(flags);
|
|
csd->func(csd->info);
|
|
local_irq_restore(flags);
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL(smp_call_function_single_async);
|
|
|
|
int on_each_cpu(smp_call_func_t func, void *info, int wait)
|
|
{
|
|
unsigned long flags;
|
|
|
|
local_irq_save(flags);
|
|
func(info);
|
|
local_irq_restore(flags);
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL(on_each_cpu);
|
|
|
|
/*
|
|
* Note we still need to test the mask even for UP
|
|
* because we actually can get an empty mask from
|
|
* code that on SMP might call us without the local
|
|
* CPU in the mask.
|
|
*/
|
|
void on_each_cpu_mask(const struct cpumask *mask,
|
|
smp_call_func_t func, void *info, bool wait)
|
|
{
|
|
unsigned long flags;
|
|
|
|
if (cpumask_test_cpu(0, mask)) {
|
|
local_irq_save(flags);
|
|
func(info);
|
|
local_irq_restore(flags);
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(on_each_cpu_mask);
|
|
|
|
/*
|
|
* Preemption is disabled here to make sure the cond_func is called under the
|
|
* same condtions in UP and SMP.
|
|
*/
|
|
void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
|
|
smp_call_func_t func, void *info, bool wait,
|
|
gfp_t gfp_flags)
|
|
{
|
|
unsigned long flags;
|
|
|
|
preempt_disable();
|
|
if (cond_func(0, info)) {
|
|
local_irq_save(flags);
|
|
func(info);
|
|
local_irq_restore(flags);
|
|
}
|
|
preempt_enable();
|
|
}
|
|
EXPORT_SYMBOL(on_each_cpu_cond);
|