asm-generic: fixes for v5.5

Here are two bugfixes from Mike Rapoport, both fixing
 compile-time errors for the nds32 architecture that
 were recently introduced.
 
 Signed-off-by: Arnd Bergmann <arnd@arndb.de>
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2
 
 iQIcBAABCAAGBQJeHehEAAoJEGCrR//JCVIncBIQAKmOG0xrS7YnUiyi0MqddSaj
 wszJg5Y1edyMhrrYXMkMz3G0AJ3esLAXpwxk/d3thPjKwgY5TaWGNgcpYKiNG0Mm
 ehSaeJHdXlXiAZIfjxM2POuJZ1na98CQJwBN+8jpWyXmlvzTAm3YJZyE65gZBAmP
 VYZtaLCD1lAs10Pp8101r7Z0gQ+Fhv06Unf144eJ0ZF33sG5/xxJW57LCv/Yfy00
 XeiSbN7xJ/2gPuz8JcsaJUF8VwJzf1UhNganFUUSoaQ9n03saytbuS2UhhWDoc5z
 S9felivdKqo6l8ASc21ymW8WeSMwCO+mAOPuWg3T1K51Q/hasZ/CmGPBceHi0Pf9
 zXtAigBc/GGANhu9w7kKl1IdKe5nTugk7EL+wm/4HAR8Z8JmdZT5SkE54SjB3maw
 E1tvIMF8TS/KjXHJCLhyksB/kmV3+TKqwQl0y+ZI4hmdVECE301PaTO3xNhb4oS4
 dOnEMPsIPQutqmKOyoN+S6T0IYaBL0XJ+5UXeFVRC2LGUWfGx096jScMYqCwadDy
 MA49oK8xtKhXxdPa2E7yEzQIKx91EHCpavIy5Pg2WF1nvejGfH/amFgpvYaaJ1N1
 uKYgavDJb6TeIylYfnaVawkB/WwM5uwWBANByPen86V9XqGhED3x10ceyvwNT6WF
 jD27rkrHRT9VN/x38VOb
 =ZMNN
 -----END PGP SIGNATURE-----

Merge tag 'asm-generic-5.5' of git://git.kernel.org/pub/scm/linux/kernel/git/arnd/playground

Pull asm-generic fixes from Arnd Bergmann:
 "Here are two bugfixes from Mike Rapoport, both fixing compile-time
  errors for the nds32 architecture that were recently introduced"

* tag 'asm-generic-5.5' of git://git.kernel.org/pub/scm/linux/kernel/git/arnd/playground:
  nds32: fix build failure caused by page table folding updates
  asm-generic/nds32: don't redefine cacheflush primitives
This commit is contained in:
Linus Torvalds 2020-01-14 10:17:15 -08:00
commit 67373994d2
3 changed files with 40 additions and 6 deletions

View File

@ -9,7 +9,11 @@
#define PG_dcache_dirty PG_arch_1
void flush_icache_range(unsigned long start, unsigned long end);
#define flush_icache_range flush_icache_range
void flush_icache_page(struct vm_area_struct *vma, struct page *page);
#define flush_icache_page flush_icache_page
#ifdef CONFIG_CPU_CACHE_ALIASING
void flush_cache_mm(struct mm_struct *mm);
void flush_cache_dup_mm(struct mm_struct *mm);
@ -40,12 +44,11 @@ void invalidate_kernel_vmap_range(void *addr, int size);
#define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&(mapping)->i_pages)
#else
#include <asm-generic/cacheflush.h>
#undef flush_icache_range
#undef flush_icache_page
#undef flush_icache_user_range
void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
unsigned long addr, int len);
#define flush_icache_user_range flush_icache_user_range
#include <asm-generic/cacheflush.h>
#endif
#endif /* __NDS32_CACHEFLUSH_H__ */

View File

@ -195,7 +195,7 @@ extern void paging_init(void);
#define pte_unmap(pte) do { } while (0)
#define pte_unmap_nested(pte) do { } while (0)
#define pmd_off_k(address) pmd_offset(pgd_offset_k(address), address)
#define pmd_off_k(address) pmd_offset(pud_offset(p4d_offset(pgd_offset_k(address), (address)), (address)), (address))
#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
/*

View File

@ -11,71 +11,102 @@
* The cache doesn't need to be flushed when TLB entries change when
* the cache is mapped to physical memory, not virtual memory
*/
#ifndef flush_cache_all
static inline void flush_cache_all(void)
{
}
#endif
#ifndef flush_cache_mm
static inline void flush_cache_mm(struct mm_struct *mm)
{
}
#endif
#ifndef flush_cache_dup_mm
static inline void flush_cache_dup_mm(struct mm_struct *mm)
{
}
#endif
#ifndef flush_cache_range
static inline void flush_cache_range(struct vm_area_struct *vma,
unsigned long start,
unsigned long end)
{
}
#endif
#ifndef flush_cache_page
static inline void flush_cache_page(struct vm_area_struct *vma,
unsigned long vmaddr,
unsigned long pfn)
{
}
#endif
#ifndef flush_dcache_page
static inline void flush_dcache_page(struct page *page)
{
}
#endif
#ifndef flush_dcache_mmap_lock
static inline void flush_dcache_mmap_lock(struct address_space *mapping)
{
}
#endif
#ifndef flush_dcache_mmap_unlock
static inline void flush_dcache_mmap_unlock(struct address_space *mapping)
{
}
#endif
#ifndef flush_icache_range
static inline void flush_icache_range(unsigned long start, unsigned long end)
{
}
#endif
#ifndef flush_icache_page
static inline void flush_icache_page(struct vm_area_struct *vma,
struct page *page)
{
}
#endif
#ifndef flush_icache_user_range
static inline void flush_icache_user_range(struct vm_area_struct *vma,
struct page *page,
unsigned long addr, int len)
{
}
#endif
#ifndef flush_cache_vmap
static inline void flush_cache_vmap(unsigned long start, unsigned long end)
{
}
#endif
#ifndef flush_cache_vunmap
static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
{
}
#endif
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
#ifndef copy_to_user_page
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
memcpy(dst, src, len); \
flush_icache_user_range(vma, page, vaddr, len); \
} while (0)
#endif
#ifndef copy_from_user_page
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
memcpy(dst, src, len)
#endif
#endif /* __ASM_CACHEFLUSH_H */