riscv/mmiowb: Hook up mmwiob() implementation to asm-generic code

In a bid to kill off explicit mmiowb() usage in driver code, hook up
the asm-generic mmiowb() tracking code for riscv, so that an mmiowb()
is automatically issued from spin_unlock() if an I/O write was performed
in the critical section.

Reviewed-by: Palmer Dabbelt <palmer@sifive.com>
Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
This commit is contained in:
Will Deacon 2019-02-22 14:45:42 +00:00
parent 420af15547
commit b012980d1c
4 changed files with 17 additions and 14 deletions

View File

@ -48,6 +48,7 @@ config RISCV
select RISCV_TIMER
select GENERIC_IRQ_MULTI_HANDLER
select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_MMIOWB
select HAVE_EBPF_JIT if 64BIT
config MMU

View File

@ -21,7 +21,6 @@ generic-y += kvm_para.h
generic-y += local.h
generic-y += local64.h
generic-y += mm-arch-hooks.h
generic-y += mmiowb.h
generic-y += mutex.h
generic-y += percpu.h
generic-y += preempt.h

View File

@ -20,6 +20,7 @@
#define _ASM_RISCV_IO_H
#include <linux/types.h>
#include <asm/mmiowb.h>
extern void __iomem *ioremap(phys_addr_t offset, unsigned long size);
@ -99,18 +100,6 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
}
#endif
/*
* FIXME: I'm flip-flopping on whether or not we should keep this or enforce
* the ordering with I/O on spinlocks like PowerPC does. The worry is that
* drivers won't get this correct, but I also don't want to introduce a fence
* into the lock code that otherwise only uses AMOs (and is essentially defined
* by the ISA to be correct). For now I'm leaving this here: "o,w" is
* sufficient to ensure that all writes to the device have completed before the
* write to the spinlock is allowed to commit. I surmised this from reading
* "ACQUIRES VS I/O ACCESSES" in memory-barriers.txt.
*/
#define mmiowb() __asm__ __volatile__ ("fence o,w" : : : "memory");
/*
* Unordered I/O memory access primitives. These are even more relaxed than
* the relaxed versions, as they don't even order accesses between successive
@ -165,7 +154,7 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
#define __io_br() do {} while (0)
#define __io_ar(v) __asm__ __volatile__ ("fence i,r" : : : "memory");
#define __io_bw() __asm__ __volatile__ ("fence w,o" : : : "memory");
#define __io_aw() do {} while (0)
#define __io_aw() mmiowb_set_pending()
#define readb(c) ({ u8 __v; __io_br(); __v = readb_cpu(c); __io_ar(__v); __v; })
#define readw(c) ({ u16 __v; __io_br(); __v = readw_cpu(c); __io_ar(__v); __v; })

View File

@ -0,0 +1,14 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_RISCV_MMIOWB_H
#define _ASM_RISCV_MMIOWB_H
/*
* "o,w" is sufficient to ensure that all writes to the device have completed
* before the write to the spinlock is allowed to commit.
*/
#define mmiowb() __asm__ __volatile__ ("fence o,w" : : : "memory");
#include <asm-generic/mmiowb.h>
#endif /* ASM_RISCV_MMIOWB_H */