mirror of
https://github.com/torvalds/linux.git
synced 2024-11-24 05:02:12 +00:00
parisc: Optimize flush_kernel_vmap_range and invalidate_kernel_vmap_range
The previously submitted patch did not resolve the random segmentation faults observed on the phantom buildd system. There are still unresolved problems with the Debian 4.8 and 4.9 kernels on C8000. The attached patch removes the flush of the offset map pages and does a whole data cache flush for large ranges. No other arch flushes the offset map in these routines as far as I can tell. I have not observed any random segmentation faults on rp3440 in two weeks of testing with 4.10.0 and 4.10.1. Signed-off-by: John David Anglin <dave.anglin@bell.net> Cc: stable@vger.kernel.org # v4.8+ Signed-off-by: Helge Deller <deller@gmx.de>
This commit is contained in:
parent
5f655322b1
commit
316ec0624f
@ -43,28 +43,9 @@ static inline void flush_kernel_dcache_page(struct page *page)
|
||||
|
||||
#define flush_kernel_dcache_range(start,size) \
|
||||
flush_kernel_dcache_range_asm((start), (start)+(size));
|
||||
/* vmap range flushes and invalidates. Architecturally, we don't need
|
||||
* the invalidate, because the CPU should refuse to speculate once an
|
||||
* area has been flushed, so invalidate is left empty */
|
||||
static inline void flush_kernel_vmap_range(void *vaddr, int size)
|
||||
{
|
||||
unsigned long start = (unsigned long)vaddr;
|
||||
|
||||
flush_kernel_dcache_range_asm(start, start + size);
|
||||
}
|
||||
static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
|
||||
{
|
||||
unsigned long start = (unsigned long)vaddr;
|
||||
void *cursor = vaddr;
|
||||
|
||||
for ( ; cursor < vaddr + size; cursor += PAGE_SIZE) {
|
||||
struct page *page = vmalloc_to_page(cursor);
|
||||
|
||||
if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
|
||||
flush_kernel_dcache_page(page);
|
||||
}
|
||||
flush_kernel_dcache_range_asm(start, start + size);
|
||||
}
|
||||
void flush_kernel_vmap_range(void *vaddr, int size);
|
||||
void invalidate_kernel_vmap_range(void *vaddr, int size);
|
||||
|
||||
#define flush_cache_vmap(start, end) flush_cache_all()
|
||||
#define flush_cache_vunmap(start, end) flush_cache_all()
|
||||
|
@ -616,3 +616,25 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
|
||||
__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
|
||||
}
|
||||
}
|
||||
|
||||
void flush_kernel_vmap_range(void *vaddr, int size)
|
||||
{
|
||||
unsigned long start = (unsigned long)vaddr;
|
||||
|
||||
if ((unsigned long)size > parisc_cache_flush_threshold)
|
||||
flush_data_cache();
|
||||
else
|
||||
flush_kernel_dcache_range_asm(start, start + size);
|
||||
}
|
||||
EXPORT_SYMBOL(flush_kernel_vmap_range);
|
||||
|
||||
void invalidate_kernel_vmap_range(void *vaddr, int size)
|
||||
{
|
||||
unsigned long start = (unsigned long)vaddr;
|
||||
|
||||
if ((unsigned long)size > parisc_cache_flush_threshold)
|
||||
flush_data_cache();
|
||||
else
|
||||
flush_kernel_dcache_range_asm(start, start + size);
|
||||
}
|
||||
EXPORT_SYMBOL(invalidate_kernel_vmap_range);
|
||||
|
Loading…
Reference in New Issue
Block a user