mirror of
https://github.com/torvalds/linux.git
synced 2024-11-25 05:32:00 +00:00
7a92fc8b4d
The pcpu setup when using the page allocator sets up a new vmalloc mapping very early in the boot process, so early that it cannot use the flush_cache_vmap() function which may depend on structures not yet initialized (for example in riscv, we currently send an IPI to flush other cpus TLB). But on some architectures, we must call flush_cache_vmap(): for example, in riscv, some uarchs can cache invalid TLB entries so we need to flush the new established mapping to avoid taking an exception. So fix this by introducing a new function flush_cache_vmap_early() which is called right after setting the new page table entry and before accessing this new mapping. This new function implements a local flush tlb on riscv and is no-op for other architectures (same as today). Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com> Acked-by: Geert Uytterhoeven <geert@linux-m68k.org> Signed-off-by: Dennis Zhou <dennis@kernel.org>
128 lines
2.6 KiB
C
128 lines
2.6 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _ASM_GENERIC_CACHEFLUSH_H
|
|
#define _ASM_GENERIC_CACHEFLUSH_H
|
|
|
|
#include <linux/instrumented.h>
|
|
|
|
struct mm_struct;
|
|
struct vm_area_struct;
|
|
struct page;
|
|
struct address_space;
|
|
|
|
/*
|
|
* The cache doesn't need to be flushed when TLB entries change when
|
|
* the cache is mapped to physical memory, not virtual memory
|
|
*/
|
|
#ifndef flush_cache_all
|
|
static inline void flush_cache_all(void)
|
|
{
|
|
}
|
|
#endif
|
|
|
|
#ifndef flush_cache_mm
|
|
static inline void flush_cache_mm(struct mm_struct *mm)
|
|
{
|
|
}
|
|
#endif
|
|
|
|
#ifndef flush_cache_dup_mm
|
|
static inline void flush_cache_dup_mm(struct mm_struct *mm)
|
|
{
|
|
}
|
|
#endif
|
|
|
|
#ifndef flush_cache_range
|
|
static inline void flush_cache_range(struct vm_area_struct *vma,
|
|
unsigned long start,
|
|
unsigned long end)
|
|
{
|
|
}
|
|
#endif
|
|
|
|
#ifndef flush_cache_page
|
|
static inline void flush_cache_page(struct vm_area_struct *vma,
|
|
unsigned long vmaddr,
|
|
unsigned long pfn)
|
|
{
|
|
}
|
|
#endif
|
|
|
|
#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
|
|
static inline void flush_dcache_page(struct page *page)
|
|
{
|
|
}
|
|
|
|
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
|
|
#endif
|
|
|
|
#ifndef flush_dcache_mmap_lock
|
|
static inline void flush_dcache_mmap_lock(struct address_space *mapping)
|
|
{
|
|
}
|
|
#endif
|
|
|
|
#ifndef flush_dcache_mmap_unlock
|
|
static inline void flush_dcache_mmap_unlock(struct address_space *mapping)
|
|
{
|
|
}
|
|
#endif
|
|
|
|
#ifndef flush_icache_range
|
|
static inline void flush_icache_range(unsigned long start, unsigned long end)
|
|
{
|
|
}
|
|
#endif
|
|
|
|
#ifndef flush_icache_user_range
|
|
#define flush_icache_user_range flush_icache_range
|
|
#endif
|
|
|
|
#ifndef flush_icache_user_page
|
|
static inline void flush_icache_user_page(struct vm_area_struct *vma,
|
|
struct page *page,
|
|
unsigned long addr, int len)
|
|
{
|
|
}
|
|
#endif
|
|
|
|
#ifndef flush_cache_vmap
|
|
static inline void flush_cache_vmap(unsigned long start, unsigned long end)
|
|
{
|
|
}
|
|
#endif
|
|
|
|
#ifndef flush_cache_vmap_early
|
|
static inline void flush_cache_vmap_early(unsigned long start, unsigned long end)
|
|
{
|
|
}
|
|
#endif
|
|
|
|
#ifndef flush_cache_vunmap
|
|
static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
|
|
{
|
|
}
|
|
#endif
|
|
|
|
#ifndef copy_to_user_page
|
|
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
|
|
do { \
|
|
instrument_copy_to_user((void __user *)dst, src, len); \
|
|
memcpy(dst, src, len); \
|
|
flush_icache_user_page(vma, page, vaddr, len); \
|
|
} while (0)
|
|
#endif
|
|
|
|
|
|
#ifndef copy_from_user_page
|
|
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
|
|
do { \
|
|
instrument_copy_from_user_before(dst, (void __user *)src, \
|
|
len); \
|
|
memcpy(dst, src, len); \
|
|
instrument_copy_from_user_after(dst, (void __user *)src, len, \
|
|
0); \
|
|
} while (0)
|
|
#endif
|
|
|
|
#endif /* _ASM_GENERIC_CACHEFLUSH_H */
|