mirror of
https://github.com/torvalds/linux.git
synced 2024-12-23 03:11:46 +00:00
4ca9a23765
Based almost entirely upon a patch by Christopher Alexander Tobias
Schulze.
In commit db64fe0225
("mm: rewrite vmap
layer") lazy VMAP tlb flushing was added to the vmalloc layer. This
causes problems on sparc64.
Sparc64 has two VMAP mapped regions and they are not contiguous with
eachother. First we have the malloc mapping area, then another
unrelated region, then the vmalloc region.
This "another unrelated region" is where the firmware is mapped.
If the lazy TLB flushing logic in the vmalloc code triggers after
we've had both a module unload and a vfree or similar, it will pass an
address range that goes from somewhere inside the malloc region to
somewhere inside the vmalloc region, and thus covering the
openfirmware area entirely.
The sparc64 kernel learns about openfirmware's dynamic mappings in
this region early in the boot, and then services TLB misses in this
area. But openfirmware has some locked TLB entries which are not
mentioned in those dynamic mappings and we should thus not disturb
them.
These huge lazy TLB flush ranges causes those openfirmware locked TLB
entries to be removed, resulting in all kinds of problems including
hard hangs and crashes during reboot/reset.
Besides causing problems like this, such huge TLB flush ranges are
also incredibly inefficient. A plea has been made with the author of
the VMAP lazy TLB flushing code, but for now we'll put a safety guard
into our flush_tlb_kernel_range() implementation.
Since the implementation has become non-trivial, stop defining it as a
macro and instead make it a function in a C source file.
Signed-off-by: David S. Miller <davem@davemloft.net>
69 lines
1.6 KiB
C
69 lines
1.6 KiB
C
#ifndef _SPARC64_TLBFLUSH_H
|
|
#define _SPARC64_TLBFLUSH_H
|
|
|
|
#include <asm/mmu_context.h>
|
|
|
|
/* TSB flush operations. */
|
|
|
|
#define TLB_BATCH_NR 192
|
|
|
|
struct tlb_batch {
|
|
struct mm_struct *mm;
|
|
unsigned long tlb_nr;
|
|
unsigned long active;
|
|
unsigned long vaddrs[TLB_BATCH_NR];
|
|
};
|
|
|
|
void flush_tsb_kernel_range(unsigned long start, unsigned long end);
|
|
void flush_tsb_user(struct tlb_batch *tb);
|
|
void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr);
|
|
|
|
/* TLB flush operations. */
|
|
|
|
static inline void flush_tlb_mm(struct mm_struct *mm)
|
|
{
|
|
}
|
|
|
|
static inline void flush_tlb_page(struct vm_area_struct *vma,
|
|
unsigned long vmaddr)
|
|
{
|
|
}
|
|
|
|
static inline void flush_tlb_range(struct vm_area_struct *vma,
|
|
unsigned long start, unsigned long end)
|
|
{
|
|
}
|
|
|
|
void flush_tlb_kernel_range(unsigned long start, unsigned long end);
|
|
|
|
#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
|
|
|
|
void flush_tlb_pending(void);
|
|
void arch_enter_lazy_mmu_mode(void);
|
|
void arch_leave_lazy_mmu_mode(void);
|
|
#define arch_flush_lazy_mmu_mode() do {} while (0)
|
|
|
|
/* Local cpu only. */
|
|
void __flush_tlb_all(void);
|
|
void __flush_tlb_page(unsigned long context, unsigned long vaddr);
|
|
void __flush_tlb_kernel_range(unsigned long start, unsigned long end);
|
|
|
|
#ifndef CONFIG_SMP
|
|
|
|
static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
|
|
{
|
|
__flush_tlb_page(CTX_HWBITS(mm->context), vaddr);
|
|
}
|
|
|
|
#else /* CONFIG_SMP */
|
|
|
|
void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end);
|
|
void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr);
|
|
|
|
#define global_flush_tlb_page(mm, vaddr) \
|
|
smp_flush_tlb_page(mm, vaddr)
|
|
|
|
#endif /* ! CONFIG_SMP */
|
|
|
|
#endif /* _SPARC64_TLBFLUSH_H */
|