mirror of
https://github.com/torvalds/linux.git
synced 2024-11-24 13:11:40 +00:00
ring-buffer: Replace this_cpu_*() with __this_cpu_*()
It has come to my attention that this_cpu_read/write are horrible on architectures other than x86. Worse yet, they actually disable preemption or interrupts! This caused some unexpected tracing results on ARM. 101.356868: preempt_count_add <-ring_buffer_lock_reserve 101.356870: preempt_count_sub <-ring_buffer_lock_reserve The ring_buffer_lock_reserve has recursion protection that requires accessing a per cpu variable. But since preempt_disable() is traced, it too got traced while accessing the variable that is suppose to prevent recursion like this. The generic version of this_cpu_read() and write() are: #define this_cpu_generic_read(pcp) \ ({ typeof(pcp) ret__; \ preempt_disable(); \ ret__ = *this_cpu_ptr(&(pcp)); \ preempt_enable(); \ ret__; \ }) #define this_cpu_generic_to_op(pcp, val, op) \ do { \ unsigned long flags; \ raw_local_irq_save(flags); \ *__this_cpu_ptr(&(pcp)) op val; \ raw_local_irq_restore(flags); \ } while (0) Which is unacceptable for locations that know they are within preempt disabled or interrupt disabled locations. Paul McKenney stated that __this_cpu_() versions produce much better code on other architectures than this_cpu_() does, if we know that the call is done in a preempt disabled location. I also changed the recursive_unlock() to use two local variables instead of accessing the per_cpu variable twice. Link: http://lkml.kernel.org/r/20150317114411.GE3589@linux.vnet.ibm.com Link: http://lkml.kernel.org/r/20150317104038.312e73d1@gandalf.local.home Cc: stable@vger.kernel.org Acked-by: Christoph Lameter <cl@linux.com> Reported-by: Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de> Tested-by: Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
This commit is contained in:
parent
06e5801b8c
commit
80a9b64e2c
@ -2679,7 +2679,7 @@ static DEFINE_PER_CPU(unsigned int, current_context);
|
||||
|
||||
static __always_inline int trace_recursive_lock(void)
|
||||
{
|
||||
unsigned int val = this_cpu_read(current_context);
|
||||
unsigned int val = __this_cpu_read(current_context);
|
||||
int bit;
|
||||
|
||||
if (in_interrupt()) {
|
||||
@ -2696,18 +2696,17 @@ static __always_inline int trace_recursive_lock(void)
|
||||
return 1;
|
||||
|
||||
val |= (1 << bit);
|
||||
this_cpu_write(current_context, val);
|
||||
__this_cpu_write(current_context, val);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static __always_inline void trace_recursive_unlock(void)
|
||||
{
|
||||
unsigned int val = this_cpu_read(current_context);
|
||||
unsigned int val = __this_cpu_read(current_context);
|
||||
|
||||
val--;
|
||||
val &= this_cpu_read(current_context);
|
||||
this_cpu_write(current_context, val);
|
||||
val &= val & (val - 1);
|
||||
__this_cpu_write(current_context, val);
|
||||
}
|
||||
|
||||
#else
|
||||
|
Loading…
Reference in New Issue
Block a user