riscv: fix build break after macro-to-function conversion in generic cacheflush.h

Commit c296d4dc13 ("asm-generic: fix a compilation warning")
converted the various flush_*cache_* macros in
asm-generic/cacheflush.h to static inline functions.  This breaks
RISC-V builds, since RISC-V's cacheflush.h includes the generic
cacheflush.h and then undefines the macros to be overridden.

Fix by copying the subset of the no-op functions that are reused from
the generic cacheflush.h into the RISC-V cacheflush.h, and dropping
the include of the generic cacheflush.h.

Fixes: c296d4dc13 ("asm-generic: fix a compilation warning")
Signed-off-by: Paul Walmsley <paul.walmsley@sifive.com>
Cc: Qian Cai <cai@lca.pw>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Paul Walmsley 2019-07-17 13:41:51 -07:00
parent 0f327f2aaa
commit 2d69fbf3d0

View File

@ -6,11 +6,66 @@
#ifndef _ASM_RISCV_CACHEFLUSH_H
#define _ASM_RISCV_CACHEFLUSH_H
#include <asm-generic/cacheflush.h>
#include <linux/mm.h>
#undef flush_icache_range
#undef flush_icache_user_range
#undef flush_dcache_page
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
/*
* The cache doesn't need to be flushed when TLB entries change when
* the cache is mapped to physical memory, not virtual memory
*/
static inline void flush_cache_all(void)
{
}
static inline void flush_cache_mm(struct mm_struct *mm)
{
}
static inline void flush_cache_dup_mm(struct mm_struct *mm)
{
}
static inline void flush_cache_range(struct vm_area_struct *vma,
unsigned long start,
unsigned long end)
{
}
static inline void flush_cache_page(struct vm_area_struct *vma,
unsigned long vmaddr,
unsigned long pfn)
{
}
static inline void flush_dcache_mmap_lock(struct address_space *mapping)
{
}
static inline void flush_dcache_mmap_unlock(struct address_space *mapping)
{
}
static inline void flush_icache_page(struct vm_area_struct *vma,
struct page *page)
{
}
static inline void flush_cache_vmap(unsigned long start, unsigned long end)
{
}
static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
{
}
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
memcpy(dst, src, len); \
flush_icache_user_range(vma, page, vaddr, len); \
} while (0)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
memcpy(dst, src, len)
static inline void local_flush_icache_all(void)
{