__flush_icache_range works on kernel addresses, and doesn't need uaccess. The existing code is a side-effect of its current implementation with __flush_cache_user_range fallthrough. Instead of fallthrough to share the code, use a common macro for the two where the caller specifies an optional fixup label if user access is needed. If provided, this label would be used to generate an extable entry. Simplify the code to use dcache_by_line_op, instead of replicating much of its functionality. No functional change intended. Possible performance impact due to the reduced number of instructions. Reported-by: Catalin Marinas <catalin.marinas@arm.com> Reported-by: Will Deacon <will@kernel.org> Reported-by: Mark Rutland <mark.rutland@arm.com> Link: https://lore.kernel.org/linux-arch/20200511110014.lb9PEahJ4hVOYrbwIb_qUHXyNy9KQzNFdb_I3YlzY6A@z/ Link: https://lore.kernel.org/linux-arm-kernel/20210521121846.GB1040@C02TD0UTHF1T.local/ Signed-off-by: Fuad Tabba <tabba@google.com> Acked-by: Mark Rutland <mark.rutland@arm.com> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Reviewed-by: Ard Biesheuvel <ardb@kernel.org> Link: https://lore.kernel.org/r/20210524083001.2586635-5-tabba@google.com Signed-off-by: Will Deacon <will@kernel.org>
258 lines
6.0 KiB
ArmAsm
258 lines
6.0 KiB
ArmAsm
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/*
|
|
* Cache maintenance
|
|
*
|
|
* Copyright (C) 2001 Deep Blue Solutions Ltd.
|
|
* Copyright (C) 2012 ARM Ltd.
|
|
*/
|
|
|
|
#include <linux/errno.h>
|
|
#include <linux/linkage.h>
|
|
#include <linux/init.h>
|
|
#include <asm/assembler.h>
|
|
#include <asm/cpufeature.h>
|
|
#include <asm/alternative.h>
|
|
#include <asm/asm-uaccess.h>
|
|
|
|
/*
|
|
* __flush_cache_range(start,end) [fixup]
|
|
*
|
|
* Ensure that the I and D caches are coherent within specified region.
|
|
* This is typically used when code has been written to a memory region,
|
|
* and will be executed.
|
|
*
|
|
* - start - virtual start address of region
|
|
* - end - virtual end address of region
|
|
* - fixup - optional label to branch to on user fault
|
|
*/
|
|
.macro __flush_cache_range, fixup
|
|
alternative_if ARM64_HAS_CACHE_IDC
|
|
dsb ishst
|
|
b .Ldc_skip_\@
|
|
alternative_else_nop_endif
|
|
mov x2, x0
|
|
sub x3, x1, x0
|
|
dcache_by_line_op cvau, ish, x2, x3, x4, x5, \fixup
|
|
.Ldc_skip_\@:
|
|
alternative_if ARM64_HAS_CACHE_DIC
|
|
isb
|
|
b .Lic_skip_\@
|
|
alternative_else_nop_endif
|
|
invalidate_icache_by_line x0, x1, x2, x3, \fixup
|
|
.Lic_skip_\@:
|
|
.endm
|
|
|
|
/*
|
|
* flush_icache_range(start,end)
|
|
*
|
|
* Ensure that the I and D caches are coherent within specified region.
|
|
* This is typically used when code has been written to a memory region,
|
|
* and will be executed.
|
|
*
|
|
* - start - virtual start address of region
|
|
* - end - virtual end address of region
|
|
*/
|
|
SYM_FUNC_START(__flush_icache_range)
|
|
__flush_cache_range
|
|
ret
|
|
SYM_FUNC_END(__flush_icache_range)
|
|
|
|
/*
|
|
* __flush_cache_user_range(start,end)
|
|
*
|
|
* Ensure that the I and D caches are coherent within specified region.
|
|
* This is typically used when code has been written to a memory region,
|
|
* and will be executed.
|
|
*
|
|
* - start - virtual start address of region
|
|
* - end - virtual end address of region
|
|
*/
|
|
SYM_FUNC_START(__flush_cache_user_range)
|
|
uaccess_ttbr0_enable x2, x3, x4
|
|
|
|
__flush_cache_range 2f
|
|
mov x0, xzr
|
|
1:
|
|
uaccess_ttbr0_disable x1, x2
|
|
ret
|
|
2:
|
|
mov x0, #-EFAULT
|
|
b 1b
|
|
SYM_FUNC_END(__flush_cache_user_range)
|
|
|
|
/*
|
|
* invalidate_icache_range(start,end)
|
|
*
|
|
* Ensure that the I cache is invalid within specified region.
|
|
*
|
|
* - start - virtual start address of region
|
|
* - end - virtual end address of region
|
|
*/
|
|
SYM_FUNC_START(invalidate_icache_range)
|
|
alternative_if ARM64_HAS_CACHE_DIC
|
|
mov x0, xzr
|
|
isb
|
|
ret
|
|
alternative_else_nop_endif
|
|
|
|
uaccess_ttbr0_enable x2, x3, x4
|
|
|
|
invalidate_icache_by_line x0, x1, x2, x3, 2f
|
|
mov x0, xzr
|
|
1:
|
|
uaccess_ttbr0_disable x1, x2
|
|
ret
|
|
2:
|
|
mov x0, #-EFAULT
|
|
b 1b
|
|
SYM_FUNC_END(invalidate_icache_range)
|
|
|
|
/*
|
|
* __flush_dcache_area(kaddr, size)
|
|
*
|
|
* Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
|
|
* are cleaned and invalidated to the PoC.
|
|
*
|
|
* - kaddr - kernel address
|
|
* - size - size in question
|
|
*/
|
|
SYM_FUNC_START_PI(__flush_dcache_area)
|
|
dcache_by_line_op civac, sy, x0, x1, x2, x3
|
|
ret
|
|
SYM_FUNC_END_PI(__flush_dcache_area)
|
|
|
|
/*
|
|
* __clean_dcache_area_pou(kaddr, size)
|
|
*
|
|
* Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
|
|
* are cleaned to the PoU.
|
|
*
|
|
* - kaddr - kernel address
|
|
* - size - size in question
|
|
*/
|
|
SYM_FUNC_START(__clean_dcache_area_pou)
|
|
alternative_if ARM64_HAS_CACHE_IDC
|
|
dsb ishst
|
|
ret
|
|
alternative_else_nop_endif
|
|
dcache_by_line_op cvau, ish, x0, x1, x2, x3
|
|
ret
|
|
SYM_FUNC_END(__clean_dcache_area_pou)
|
|
|
|
/*
|
|
* __inval_dcache_area(kaddr, size)
|
|
*
|
|
* Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
|
|
* are invalidated. Any partial lines at the ends of the interval are
|
|
* also cleaned to PoC to prevent data loss.
|
|
*
|
|
* - kaddr - kernel address
|
|
* - size - size in question
|
|
*/
|
|
SYM_FUNC_START_LOCAL(__dma_inv_area)
|
|
SYM_FUNC_START_PI(__inval_dcache_area)
|
|
/* FALLTHROUGH */
|
|
|
|
/*
|
|
* __dma_inv_area(start, size)
|
|
* - start - virtual start address of region
|
|
* - size - size in question
|
|
*/
|
|
add x1, x1, x0
|
|
dcache_line_size x2, x3
|
|
sub x3, x2, #1
|
|
tst x1, x3 // end cache line aligned?
|
|
bic x1, x1, x3
|
|
b.eq 1f
|
|
dc civac, x1 // clean & invalidate D / U line
|
|
1: tst x0, x3 // start cache line aligned?
|
|
bic x0, x0, x3
|
|
b.eq 2f
|
|
dc civac, x0 // clean & invalidate D / U line
|
|
b 3f
|
|
2: dc ivac, x0 // invalidate D / U line
|
|
3: add x0, x0, x2
|
|
cmp x0, x1
|
|
b.lo 2b
|
|
dsb sy
|
|
ret
|
|
SYM_FUNC_END_PI(__inval_dcache_area)
|
|
SYM_FUNC_END(__dma_inv_area)
|
|
|
|
/*
|
|
* __clean_dcache_area_poc(kaddr, size)
|
|
*
|
|
* Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
|
|
* are cleaned to the PoC.
|
|
*
|
|
* - kaddr - kernel address
|
|
* - size - size in question
|
|
*/
|
|
SYM_FUNC_START_LOCAL(__dma_clean_area)
|
|
SYM_FUNC_START_PI(__clean_dcache_area_poc)
|
|
/* FALLTHROUGH */
|
|
|
|
/*
|
|
* __dma_clean_area(start, size)
|
|
* - start - virtual start address of region
|
|
* - size - size in question
|
|
*/
|
|
dcache_by_line_op cvac, sy, x0, x1, x2, x3
|
|
ret
|
|
SYM_FUNC_END_PI(__clean_dcache_area_poc)
|
|
SYM_FUNC_END(__dma_clean_area)
|
|
|
|
/*
|
|
* __clean_dcache_area_pop(kaddr, size)
|
|
*
|
|
* Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
|
|
* are cleaned to the PoP.
|
|
*
|
|
* - kaddr - kernel address
|
|
* - size - size in question
|
|
*/
|
|
SYM_FUNC_START_PI(__clean_dcache_area_pop)
|
|
alternative_if_not ARM64_HAS_DCPOP
|
|
b __clean_dcache_area_poc
|
|
alternative_else_nop_endif
|
|
dcache_by_line_op cvap, sy, x0, x1, x2, x3
|
|
ret
|
|
SYM_FUNC_END_PI(__clean_dcache_area_pop)
|
|
|
|
/*
|
|
* __dma_flush_area(start, size)
|
|
*
|
|
* clean & invalidate D / U line
|
|
*
|
|
* - start - virtual start address of region
|
|
* - size - size in question
|
|
*/
|
|
SYM_FUNC_START_PI(__dma_flush_area)
|
|
dcache_by_line_op civac, sy, x0, x1, x2, x3
|
|
ret
|
|
SYM_FUNC_END_PI(__dma_flush_area)
|
|
|
|
/*
|
|
* __dma_map_area(start, size, dir)
|
|
* - start - kernel virtual start address
|
|
* - size - size of region
|
|
* - dir - DMA direction
|
|
*/
|
|
SYM_FUNC_START_PI(__dma_map_area)
|
|
cmp w2, #DMA_FROM_DEVICE
|
|
b.eq __dma_inv_area
|
|
b __dma_clean_area
|
|
SYM_FUNC_END_PI(__dma_map_area)
|
|
|
|
/*
|
|
* __dma_unmap_area(start, size, dir)
|
|
* - start - kernel virtual start address
|
|
* - size - size of region
|
|
* - dir - DMA direction
|
|
*/
|
|
SYM_FUNC_START_PI(__dma_unmap_area)
|
|
cmp w2, #DMA_TO_DEVICE
|
|
b.ne __dma_inv_area
|
|
ret
|
|
SYM_FUNC_END_PI(__dma_unmap_area)
|