arm64: tlbi: Set MAX_TLBI_OPS to PTRS_PER_PTE
In order to reduce the possibility of soft lock-ups, we bound the maximum number of TLBI operations performed by a single call to flush_tlb_range() to an arbitrary constant of 1024. Whilst this does the job of avoiding lock-ups, we can actually be a bit smarter by defining this as PTRS_PER_PTE. Due to the structure of our page tables, using PTRS_PER_PTE means that an outer loop calling flush_tlb_range() for entire table entries will end up performing just a single TLBI operation for each entry. As an example, mremap()ing a 1GB range mapped using 4k pages now requires only 512 TLBI operations when moving the page tables as opposed to 262144 operations (512*512) when using the current threshold of 1024. Cc: Joel Fernandes <joel@joelfernandes.org> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
This commit is contained in:
parent
bdb85cd1d2
commit
3d65b6bbc0
@ -186,7 +186,7 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
|
||||
* This is meant to avoid soft lock-ups on large TLB flushing ranges and not
|
||||
* necessarily a performance improvement.
|
||||
*/
|
||||
#define MAX_TLBI_OPS 1024UL
|
||||
#define MAX_TLBI_OPS PTRS_PER_PTE
|
||||
|
||||
static inline void __flush_tlb_range(struct vm_area_struct *vma,
|
||||
unsigned long start, unsigned long end,
|
||||
@ -195,7 +195,7 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
|
||||
unsigned long asid = ASID(vma->vm_mm);
|
||||
unsigned long addr;
|
||||
|
||||
if ((end - start) > (MAX_TLBI_OPS * stride)) {
|
||||
if ((end - start) >= (MAX_TLBI_OPS * stride)) {
|
||||
flush_tlb_mm(vma->vm_mm);
|
||||
return;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user