forked from Minki/linux
mips/mmiowb: Add unconditional mmiowb() to arch_spin_unlock()
The mmiowb() macro is horribly difficult to use and drivers will continue to work most of the time if they omit a call when it is required. Rather than rely on driver authors getting this right, push mmiowb() into arch_spin_unlock() for mips. If this is deemed to be a performance issue, a subsequent optimisation could make use of ARCH_HAS_MMIOWB to elide the barrier in cases where no I/O writes were performed inside the critical section. Acked-by: Paul Burton <paul.burton@mips.com> Acked-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Will Deacon <will.deacon@arm.com>
This commit is contained in:
parent
e9e8543fec
commit
346e91ee09
@ -12,7 +12,6 @@ generic-y += irq_work.h
|
||||
generic-y += local64.h
|
||||
generic-y += mcs_spinlock.h
|
||||
generic-y += mm-arch-hooks.h
|
||||
generic-y += mmiowb.h
|
||||
generic-y += msi.h
|
||||
generic-y += parport.h
|
||||
generic-y += percpu.h
|
||||
|
@ -102,9 +102,6 @@ static inline void set_io_port_base(unsigned long base)
|
||||
#define iobarrier_w() wmb()
|
||||
#define iobarrier_sync() iob()
|
||||
|
||||
/* Some callers use this older API instead. */
|
||||
#define mmiowb() iobarrier_w()
|
||||
|
||||
/*
|
||||
* virt_to_phys - map virtual addresses to physical
|
||||
* @address: address to remap
|
||||
|
11
arch/mips/include/asm/mmiowb.h
Normal file
11
arch/mips/include/asm/mmiowb.h
Normal file
@ -0,0 +1,11 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _ASM_MMIOWB_H
|
||||
#define _ASM_MMIOWB_H
|
||||
|
||||
#include <asm/io.h>
|
||||
|
||||
#define mmiowb() iobarrier_w()
|
||||
|
||||
#include <asm-generic/mmiowb.h>
|
||||
|
||||
#endif /* _ASM_MMIOWB_H */
|
@ -11,6 +11,21 @@
|
||||
|
||||
#include <asm/processor.h>
|
||||
#include <asm/qrwlock.h>
|
||||
|
||||
#include <asm-generic/qspinlock_types.h>
|
||||
|
||||
#define queued_spin_unlock queued_spin_unlock
|
||||
/**
|
||||
* queued_spin_unlock - release a queued spinlock
|
||||
* @lock : Pointer to queued spinlock structure
|
||||
*/
|
||||
static inline void queued_spin_unlock(struct qspinlock *lock)
|
||||
{
|
||||
/* This could be optimised with ARCH_HAS_MMIOWB */
|
||||
mmiowb();
|
||||
smp_store_release(&lock->locked, 0);
|
||||
}
|
||||
|
||||
#include <asm/qspinlock.h>
|
||||
|
||||
#endif /* _ASM_SPINLOCK_H */
|
||||
|
Loading…
Reference in New Issue
Block a user