2012-03-05 11:49:26 +00:00
|
|
|
/*
|
2016-04-27 16:47:00 +00:00
|
|
|
* Based on arch/arm/include/asm/assembler.h, arch/arm/mm/proc-macros.S
|
2012-03-05 11:49:26 +00:00
|
|
|
*
|
|
|
|
* Copyright (C) 1996-2000 Russell King
|
|
|
|
* Copyright (C) 2012 ARM Ltd.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
#error "Only include this from assembly code"
|
|
|
|
#endif
|
|
|
|
|
2015-02-20 13:53:13 +00:00
|
|
|
#ifndef __ASM_ASSEMBLER_H
|
|
|
|
#define __ASM_ASSEMBLER_H
|
|
|
|
|
2018-12-07 18:08:16 +00:00
|
|
|
#include <asm-generic/export.h>
|
|
|
|
|
2016-04-27 16:47:00 +00:00
|
|
|
#include <asm/asm-offsets.h>
|
2016-06-28 17:07:29 +00:00
|
|
|
#include <asm/cpufeature.h>
|
2019-02-26 18:43:41 +00:00
|
|
|
#include <asm/cputype.h>
|
2017-10-25 09:04:32 +00:00
|
|
|
#include <asm/debug-monitors.h>
|
2016-04-27 16:47:10 +00:00
|
|
|
#include <asm/page.h>
|
2016-04-27 16:47:00 +00:00
|
|
|
#include <asm/pgtable-hwdef.h>
|
2012-03-05 11:49:26 +00:00
|
|
|
#include <asm/ptrace.h>
|
arm64: debug: avoid accessing mdscr_el1 on fault paths where possible
Since mdscr_el1 is part of the debug register group, it is highly likely
to be trapped by a hypervisor to prevent virtual machines from debugging
(buggering?) each other. Unfortunately, this absolutely destroys our
performance, since we access the register on many of our low-level
fault handling paths to keep track of the various debug state machines.
This patch removes our dependency on mdscr_el1 in the case that debugging
is not being used. More specifically we:
- Use TIF_SINGLESTEP to indicate that a task is stepping at EL0 and
avoid disabling step in the MDSCR when we don't need to.
MDSCR_EL1.SS handling is moved to kernel_entry, when trapping from
userspace.
- Ensure debug exceptions are re-enabled on *all* exception entry
paths, even the debug exception handling path (where we re-enable
exceptions after invoking the handler). Since we can now rely on
MDSCR_EL1.SS being cleared by the entry code, exception handlers can
usually enable debug immediately before enabling interrupts.
- Remove all debug exception unmasking from ret_to_user and
el1_preempt, since we will never get here with debug exceptions
masked.
This results in a slight change to kernel debug behaviour, where we now
step into interrupt handlers and data aborts from EL1 when debugging the
kernel, which is actually a useful thing to do. A side-effect of this is
that it *does* potentially prevent stepping off {break,watch}points when
there is a high-frequency interrupt source (e.g. a timer), so a debugger
would need to use either breakpoints or manually disable interrupts to
get around this issue.
With this patch applied, guest performance is restored under KVM when
debug register accesses are trapped (and we get a measurable performance
increase on the host on Cortex-A57 too).
Cc: Ian Campbell <ian.campbell@citrix.com>
Tested-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2014-04-29 18:04:06 +00:00
|
|
|
#include <asm/thread_info.h>
|
2012-03-05 11:49:26 +00:00
|
|
|
|
2017-11-02 12:12:34 +00:00
|
|
|
.macro save_and_disable_daif, flags
|
|
|
|
mrs \flags, daif
|
|
|
|
msr daifset, #0xf
|
|
|
|
.endm
|
|
|
|
|
|
|
|
.macro disable_daif
|
|
|
|
msr daifset, #0xf
|
|
|
|
.endm
|
|
|
|
|
|
|
|
.macro enable_daif
|
|
|
|
msr daifclr, #0xf
|
|
|
|
.endm
|
|
|
|
|
|
|
|
.macro restore_daif, flags:req
|
|
|
|
msr daif, \flags
|
|
|
|
.endm
|
|
|
|
|
2017-11-02 12:12:39 +00:00
|
|
|
/* Only on aarch64 pstate, PSR_D_BIT is different for aarch32 */
|
|
|
|
.macro inherit_daif, pstate:req, tmp:req
|
|
|
|
and \tmp, \pstate, #(PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)
|
|
|
|
msr daif, \tmp
|
|
|
|
.endm
|
|
|
|
|
2017-11-02 12:12:41 +00:00
|
|
|
/* IRQ is the lowest priority flag, unconditionally unmask the rest. */
|
|
|
|
.macro enable_da_f
|
|
|
|
msr daifclr, #(8 | 4 | 1)
|
|
|
|
.endm
|
|
|
|
|
2012-03-05 11:49:26 +00:00
|
|
|
/*
|
2019-01-31 14:58:40 +00:00
|
|
|
* Save/restore interrupts.
|
2012-03-05 11:49:26 +00:00
|
|
|
*/
|
2016-07-01 15:53:00 +00:00
|
|
|
.macro save_and_disable_irq, flags
|
|
|
|
mrs \flags, daif
|
|
|
|
msr daifset, #2
|
|
|
|
.endm
|
|
|
|
|
|
|
|
.macro restore_irq, flags
|
|
|
|
msr daif, \flags
|
|
|
|
.endm
|
|
|
|
|
2012-03-05 11:49:26 +00:00
|
|
|
.macro enable_dbg
|
|
|
|
msr daifclr, #8
|
|
|
|
.endm
|
|
|
|
|
arm64: debug: avoid accessing mdscr_el1 on fault paths where possible
Since mdscr_el1 is part of the debug register group, it is highly likely
to be trapped by a hypervisor to prevent virtual machines from debugging
(buggering?) each other. Unfortunately, this absolutely destroys our
performance, since we access the register on many of our low-level
fault handling paths to keep track of the various debug state machines.
This patch removes our dependency on mdscr_el1 in the case that debugging
is not being used. More specifically we:
- Use TIF_SINGLESTEP to indicate that a task is stepping at EL0 and
avoid disabling step in the MDSCR when we don't need to.
MDSCR_EL1.SS handling is moved to kernel_entry, when trapping from
userspace.
- Ensure debug exceptions are re-enabled on *all* exception entry
paths, even the debug exception handling path (where we re-enable
exceptions after invoking the handler). Since we can now rely on
MDSCR_EL1.SS being cleared by the entry code, exception handlers can
usually enable debug immediately before enabling interrupts.
- Remove all debug exception unmasking from ret_to_user and
el1_preempt, since we will never get here with debug exceptions
masked.
This results in a slight change to kernel debug behaviour, where we now
step into interrupt handlers and data aborts from EL1 when debugging the
kernel, which is actually a useful thing to do. A side-effect of this is
that it *does* potentially prevent stepping off {break,watch}points when
there is a high-frequency interrupt source (e.g. a timer), so a debugger
would need to use either breakpoints or manually disable interrupts to
get around this issue.
With this patch applied, guest performance is restored under KVM when
debug register accesses are trapped (and we get a measurable performance
increase on the host on Cortex-A57 too).
Cc: Ian Campbell <ian.campbell@citrix.com>
Tested-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2014-04-29 18:04:06 +00:00
|
|
|
.macro disable_step_tsk, flgs, tmp
|
|
|
|
tbz \flgs, #TIF_SINGLESTEP, 9990f
|
2012-03-05 11:49:26 +00:00
|
|
|
mrs \tmp, mdscr_el1
|
2017-10-25 09:04:32 +00:00
|
|
|
bic \tmp, \tmp, #DBG_MDSCR_SS
|
2012-03-05 11:49:26 +00:00
|
|
|
msr mdscr_el1, \tmp
|
arm64: debug: avoid accessing mdscr_el1 on fault paths where possible
Since mdscr_el1 is part of the debug register group, it is highly likely
to be trapped by a hypervisor to prevent virtual machines from debugging
(buggering?) each other. Unfortunately, this absolutely destroys our
performance, since we access the register on many of our low-level
fault handling paths to keep track of the various debug state machines.
This patch removes our dependency on mdscr_el1 in the case that debugging
is not being used. More specifically we:
- Use TIF_SINGLESTEP to indicate that a task is stepping at EL0 and
avoid disabling step in the MDSCR when we don't need to.
MDSCR_EL1.SS handling is moved to kernel_entry, when trapping from
userspace.
- Ensure debug exceptions are re-enabled on *all* exception entry
paths, even the debug exception handling path (where we re-enable
exceptions after invoking the handler). Since we can now rely on
MDSCR_EL1.SS being cleared by the entry code, exception handlers can
usually enable debug immediately before enabling interrupts.
- Remove all debug exception unmasking from ret_to_user and
el1_preempt, since we will never get here with debug exceptions
masked.
This results in a slight change to kernel debug behaviour, where we now
step into interrupt handlers and data aborts from EL1 when debugging the
kernel, which is actually a useful thing to do. A side-effect of this is
that it *does* potentially prevent stepping off {break,watch}points when
there is a high-frequency interrupt source (e.g. a timer), so a debugger
would need to use either breakpoints or manually disable interrupts to
get around this issue.
With this patch applied, guest performance is restored under KVM when
debug register accesses are trapped (and we get a measurable performance
increase on the host on Cortex-A57 too).
Cc: Ian Campbell <ian.campbell@citrix.com>
Tested-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2014-04-29 18:04:06 +00:00
|
|
|
isb // Synchronise with enable_dbg
|
|
|
|
9990:
|
2012-03-05 11:49:26 +00:00
|
|
|
.endm
|
|
|
|
|
2017-11-02 12:12:38 +00:00
|
|
|
/* call with daif masked */
|
arm64: debug: avoid accessing mdscr_el1 on fault paths where possible
Since mdscr_el1 is part of the debug register group, it is highly likely
to be trapped by a hypervisor to prevent virtual machines from debugging
(buggering?) each other. Unfortunately, this absolutely destroys our
performance, since we access the register on many of our low-level
fault handling paths to keep track of the various debug state machines.
This patch removes our dependency on mdscr_el1 in the case that debugging
is not being used. More specifically we:
- Use TIF_SINGLESTEP to indicate that a task is stepping at EL0 and
avoid disabling step in the MDSCR when we don't need to.
MDSCR_EL1.SS handling is moved to kernel_entry, when trapping from
userspace.
- Ensure debug exceptions are re-enabled on *all* exception entry
paths, even the debug exception handling path (where we re-enable
exceptions after invoking the handler). Since we can now rely on
MDSCR_EL1.SS being cleared by the entry code, exception handlers can
usually enable debug immediately before enabling interrupts.
- Remove all debug exception unmasking from ret_to_user and
el1_preempt, since we will never get here with debug exceptions
masked.
This results in a slight change to kernel debug behaviour, where we now
step into interrupt handlers and data aborts from EL1 when debugging the
kernel, which is actually a useful thing to do. A side-effect of this is
that it *does* potentially prevent stepping off {break,watch}points when
there is a high-frequency interrupt source (e.g. a timer), so a debugger
would need to use either breakpoints or manually disable interrupts to
get around this issue.
With this patch applied, guest performance is restored under KVM when
debug register accesses are trapped (and we get a measurable performance
increase on the host on Cortex-A57 too).
Cc: Ian Campbell <ian.campbell@citrix.com>
Tested-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2014-04-29 18:04:06 +00:00
|
|
|
.macro enable_step_tsk, flgs, tmp
|
|
|
|
tbz \flgs, #TIF_SINGLESTEP, 9990f
|
2012-03-05 11:49:26 +00:00
|
|
|
mrs \tmp, mdscr_el1
|
2017-10-25 09:04:32 +00:00
|
|
|
orr \tmp, \tmp, #DBG_MDSCR_SS
|
2012-03-05 11:49:26 +00:00
|
|
|
msr mdscr_el1, \tmp
|
arm64: debug: avoid accessing mdscr_el1 on fault paths where possible
Since mdscr_el1 is part of the debug register group, it is highly likely
to be trapped by a hypervisor to prevent virtual machines from debugging
(buggering?) each other. Unfortunately, this absolutely destroys our
performance, since we access the register on many of our low-level
fault handling paths to keep track of the various debug state machines.
This patch removes our dependency on mdscr_el1 in the case that debugging
is not being used. More specifically we:
- Use TIF_SINGLESTEP to indicate that a task is stepping at EL0 and
avoid disabling step in the MDSCR when we don't need to.
MDSCR_EL1.SS handling is moved to kernel_entry, when trapping from
userspace.
- Ensure debug exceptions are re-enabled on *all* exception entry
paths, even the debug exception handling path (where we re-enable
exceptions after invoking the handler). Since we can now rely on
MDSCR_EL1.SS being cleared by the entry code, exception handlers can
usually enable debug immediately before enabling interrupts.
- Remove all debug exception unmasking from ret_to_user and
el1_preempt, since we will never get here with debug exceptions
masked.
This results in a slight change to kernel debug behaviour, where we now
step into interrupt handlers and data aborts from EL1 when debugging the
kernel, which is actually a useful thing to do. A side-effect of this is
that it *does* potentially prevent stepping off {break,watch}points when
there is a high-frequency interrupt source (e.g. a timer), so a debugger
would need to use either breakpoints or manually disable interrupts to
get around this issue.
With this patch applied, guest performance is restored under KVM when
debug register accesses are trapped (and we get a measurable performance
increase on the host on Cortex-A57 too).
Cc: Ian Campbell <ian.campbell@citrix.com>
Tested-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2014-04-29 18:04:06 +00:00
|
|
|
9990:
|
2012-03-05 11:49:26 +00:00
|
|
|
.endm
|
|
|
|
|
|
|
|
/*
|
|
|
|
* SMP data memory barrier
|
|
|
|
*/
|
|
|
|
.macro smp_dmb, opt
|
|
|
|
dmb \opt
|
|
|
|
.endm
|
|
|
|
|
2018-01-15 19:38:59 +00:00
|
|
|
/*
|
|
|
|
* RAS Error Synchronization barrier
|
|
|
|
*/
|
|
|
|
.macro esb
|
|
|
|
hint #16
|
|
|
|
.endm
|
|
|
|
|
2018-02-05 15:34:16 +00:00
|
|
|
/*
|
|
|
|
* Value prediction barrier
|
|
|
|
*/
|
|
|
|
.macro csdb
|
|
|
|
hint #20
|
|
|
|
.endm
|
|
|
|
|
2018-06-14 10:21:34 +00:00
|
|
|
/*
|
|
|
|
* Speculation barrier
|
|
|
|
*/
|
|
|
|
.macro sb
|
|
|
|
alternative_if_not ARM64_HAS_SB
|
|
|
|
dsb nsh
|
|
|
|
isb
|
|
|
|
alternative_else
|
|
|
|
SB_BARRIER_INSN
|
|
|
|
nop
|
|
|
|
alternative_endif
|
|
|
|
.endm
|
|
|
|
|
2018-02-05 15:34:20 +00:00
|
|
|
/*
|
|
|
|
* Sanitise a 64-bit bounded index wrt speculation, returning zero if out
|
|
|
|
* of bounds.
|
|
|
|
*/
|
|
|
|
.macro mask_nospec64, idx, limit, tmp
|
|
|
|
sub \tmp, \idx, \limit
|
|
|
|
bic \tmp, \tmp, \idx
|
|
|
|
and \idx, \idx, \tmp, asr #63
|
|
|
|
csdb
|
|
|
|
.endm
|
|
|
|
|
2016-09-06 15:40:23 +00:00
|
|
|
/*
|
|
|
|
* NOP sequence
|
|
|
|
*/
|
|
|
|
.macro nops, num
|
|
|
|
.rept \num
|
|
|
|
nop
|
|
|
|
.endr
|
|
|
|
.endm
|
|
|
|
|
2016-01-01 14:02:12 +00:00
|
|
|
/*
|
|
|
|
* Emit an entry into the exception table
|
|
|
|
*/
|
|
|
|
.macro _asm_extable, from, to
|
|
|
|
.pushsection __ex_table, "a"
|
|
|
|
.align 3
|
|
|
|
.long (\from - .), (\to - .)
|
|
|
|
.popsection
|
|
|
|
.endm
|
|
|
|
|
2012-03-05 11:49:26 +00:00
|
|
|
#define USER(l, x...) \
|
|
|
|
9999: x; \
|
2016-01-01 14:02:12 +00:00
|
|
|
_asm_extable 9999b, l
|
2012-03-05 11:49:26 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Register aliases.
|
|
|
|
*/
|
|
|
|
lr .req x30 // link register
|
2012-10-19 16:37:35 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Vector entry
|
|
|
|
*/
|
|
|
|
.macro ventry label
|
|
|
|
.align 7
|
|
|
|
b \label
|
|
|
|
.endm
|
2013-10-11 13:52:15 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Select code when configured for BE.
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_CPU_BIG_ENDIAN
|
|
|
|
#define CPU_BE(code...) code
|
|
|
|
#else
|
|
|
|
#define CPU_BE(code...)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Select code when configured for LE.
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_CPU_BIG_ENDIAN
|
|
|
|
#define CPU_LE(code...)
|
|
|
|
#else
|
|
|
|
#define CPU_LE(code...) code
|
|
|
|
#endif
|
|
|
|
|
2013-10-11 13:52:13 +00:00
|
|
|
/*
|
|
|
|
* Define a macro that constructs a 64-bit value by concatenating two
|
|
|
|
* 32-bit registers. Note that on big endian systems the order of the
|
|
|
|
* registers is swapped.
|
|
|
|
*/
|
|
|
|
#ifndef CONFIG_CPU_BIG_ENDIAN
|
|
|
|
.macro regs_to_64, rd, lbits, hbits
|
|
|
|
#else
|
|
|
|
.macro regs_to_64, rd, hbits, lbits
|
|
|
|
#endif
|
|
|
|
orr \rd, \lbits, \hbits, lsl #32
|
|
|
|
.endm
|
2015-02-20 13:53:13 +00:00
|
|
|
|
2015-03-04 18:45:38 +00:00
|
|
|
/*
|
|
|
|
* Pseudo-ops for PC-relative adr/ldr/str <reg>, <symbol> where
|
2018-03-10 14:59:29 +00:00
|
|
|
* <symbol> is within the range +/- 4 GB of the PC.
|
2015-03-04 18:45:38 +00:00
|
|
|
*/
|
|
|
|
/*
|
|
|
|
* @dst: destination register (64 bit wide)
|
|
|
|
* @sym: name of the symbol
|
|
|
|
*/
|
2017-01-11 14:54:53 +00:00
|
|
|
.macro adr_l, dst, sym
|
2015-03-04 18:45:38 +00:00
|
|
|
adrp \dst, \sym
|
|
|
|
add \dst, \dst, :lo12:\sym
|
|
|
|
.endm
|
|
|
|
|
|
|
|
/*
|
|
|
|
* @dst: destination register (32 or 64 bit wide)
|
|
|
|
* @sym: name of the symbol
|
|
|
|
* @tmp: optional 64-bit scratch register to be used if <dst> is a
|
|
|
|
* 32-bit wide register, in which case it cannot be used to hold
|
|
|
|
* the address
|
|
|
|
*/
|
|
|
|
.macro ldr_l, dst, sym, tmp=
|
|
|
|
.ifb \tmp
|
|
|
|
adrp \dst, \sym
|
|
|
|
ldr \dst, [\dst, :lo12:\sym]
|
|
|
|
.else
|
|
|
|
adrp \tmp, \sym
|
|
|
|
ldr \dst, [\tmp, :lo12:\sym]
|
|
|
|
.endif
|
|
|
|
.endm
|
|
|
|
|
|
|
|
/*
|
|
|
|
* @src: source register (32 or 64 bit wide)
|
|
|
|
* @sym: name of the symbol
|
|
|
|
* @tmp: mandatory 64-bit scratch register to calculate the address
|
|
|
|
* while <src> needs to be preserved.
|
|
|
|
*/
|
|
|
|
.macro str_l, src, sym, tmp
|
|
|
|
adrp \tmp, \sym
|
|
|
|
str \src, [\tmp, :lo12:\sym]
|
|
|
|
.endm
|
|
|
|
|
2015-12-10 10:22:39 +00:00
|
|
|
/*
|
2018-03-10 14:59:29 +00:00
|
|
|
* @dst: Result of per_cpu(sym, smp_processor_id()) (can be SP)
|
2015-12-10 10:22:39 +00:00
|
|
|
* @sym: The name of the per-cpu variable
|
|
|
|
* @tmp: scratch register
|
|
|
|
*/
|
2016-11-03 20:23:12 +00:00
|
|
|
.macro adr_this_cpu, dst, sym, tmp
|
2017-07-15 16:23:13 +00:00
|
|
|
adrp \tmp, \sym
|
|
|
|
add \dst, \tmp, #:lo12:\sym
|
2018-01-08 15:38:06 +00:00
|
|
|
alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
|
2015-12-10 10:22:39 +00:00
|
|
|
mrs \tmp, tpidr_el1
|
2018-01-08 15:38:06 +00:00
|
|
|
alternative_else
|
|
|
|
mrs \tmp, tpidr_el2
|
|
|
|
alternative_endif
|
2016-11-03 20:23:12 +00:00
|
|
|
add \dst, \dst, \tmp
|
|
|
|
.endm
|
|
|
|
|
|
|
|
/*
|
|
|
|
* @dst: Result of READ_ONCE(per_cpu(sym, smp_processor_id()))
|
|
|
|
* @sym: The name of the per-cpu variable
|
|
|
|
* @tmp: scratch register
|
|
|
|
*/
|
|
|
|
.macro ldr_this_cpu dst, sym, tmp
|
|
|
|
adr_l \dst, \sym
|
2018-01-08 15:38:06 +00:00
|
|
|
alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
|
2016-11-03 20:23:12 +00:00
|
|
|
mrs \tmp, tpidr_el1
|
2018-01-08 15:38:06 +00:00
|
|
|
alternative_else
|
|
|
|
mrs \tmp, tpidr_el2
|
|
|
|
alternative_endif
|
2016-11-03 20:23:12 +00:00
|
|
|
ldr \dst, [\dst, \tmp]
|
2015-12-10 10:22:39 +00:00
|
|
|
.endm
|
|
|
|
|
2016-04-27 16:47:00 +00:00
|
|
|
/*
|
|
|
|
* vma_vm_mm - get mm pointer from vma pointer (vma->vm_mm)
|
|
|
|
*/
|
|
|
|
.macro vma_vm_mm, rd, rn
|
|
|
|
ldr \rd, [\rn, #VMA_VM_MM]
|
|
|
|
.endm
|
|
|
|
|
|
|
|
/*
|
|
|
|
* mmid - get context id from mm pointer (mm->context.id)
|
|
|
|
*/
|
|
|
|
.macro mmid, rd, rn
|
|
|
|
ldr \rd, [\rn, #MM_CONTEXT_ID]
|
|
|
|
.endm
|
2016-09-09 13:07:16 +00:00
|
|
|
/*
|
2018-09-19 10:41:21 +00:00
|
|
|
* read_ctr - read CTR_EL0. If the system has mismatched register fields,
|
|
|
|
* provide the system wide safe value from arm64_ftr_reg_ctrel0.sys_val
|
2016-09-09 13:07:16 +00:00
|
|
|
*/
|
|
|
|
.macro read_ctr, reg
|
2018-09-19 10:41:21 +00:00
|
|
|
alternative_if_not ARM64_MISMATCHED_CACHE_TYPE
|
2016-09-09 13:07:16 +00:00
|
|
|
mrs \reg, ctr_el0 // read CTR
|
|
|
|
nop
|
|
|
|
alternative_else
|
|
|
|
ldr_l \reg, arm64_ftr_reg_ctrel0 + ARM64_FTR_SYSVAL
|
|
|
|
alternative_endif
|
|
|
|
.endm
|
|
|
|
|
2016-04-27 16:47:00 +00:00
|
|
|
|
|
|
|
/*
|
2016-09-09 13:07:14 +00:00
|
|
|
* raw_dcache_line_size - get the minimum D-cache line size on this CPU
|
|
|
|
* from the CTR register.
|
2016-04-27 16:47:00 +00:00
|
|
|
*/
|
2016-09-09 13:07:14 +00:00
|
|
|
.macro raw_dcache_line_size, reg, tmp
|
2016-04-27 16:47:00 +00:00
|
|
|
mrs \tmp, ctr_el0 // read CTR
|
|
|
|
ubfm \tmp, \tmp, #16, #19 // cache line size encoding
|
|
|
|
mov \reg, #4 // bytes per word
|
|
|
|
lsl \reg, \reg, \tmp // actual cache line size
|
|
|
|
.endm
|
|
|
|
|
|
|
|
/*
|
2016-09-09 13:07:14 +00:00
|
|
|
* dcache_line_size - get the safe D-cache line size across all CPUs
|
2016-04-27 16:47:00 +00:00
|
|
|
*/
|
2016-09-09 13:07:14 +00:00
|
|
|
.macro dcache_line_size, reg, tmp
|
2016-09-09 13:07:16 +00:00
|
|
|
read_ctr \tmp
|
|
|
|
ubfm \tmp, \tmp, #16, #19 // cache line size encoding
|
|
|
|
mov \reg, #4 // bytes per word
|
|
|
|
lsl \reg, \reg, \tmp // actual cache line size
|
2016-09-09 13:07:14 +00:00
|
|
|
.endm
|
|
|
|
|
|
|
|
/*
|
|
|
|
* raw_icache_line_size - get the minimum I-cache line size on this CPU
|
|
|
|
* from the CTR register.
|
|
|
|
*/
|
|
|
|
.macro raw_icache_line_size, reg, tmp
|
2016-04-27 16:47:00 +00:00
|
|
|
mrs \tmp, ctr_el0 // read CTR
|
|
|
|
and \tmp, \tmp, #0xf // cache line size encoding
|
|
|
|
mov \reg, #4 // bytes per word
|
|
|
|
lsl \reg, \reg, \tmp // actual cache line size
|
|
|
|
.endm
|
|
|
|
|
2016-09-09 13:07:14 +00:00
|
|
|
/*
|
|
|
|
* icache_line_size - get the safe I-cache line size across all CPUs
|
|
|
|
*/
|
|
|
|
.macro icache_line_size, reg, tmp
|
2016-09-09 13:07:16 +00:00
|
|
|
read_ctr \tmp
|
|
|
|
and \tmp, \tmp, #0xf // cache line size encoding
|
|
|
|
mov \reg, #4 // bytes per word
|
|
|
|
lsl \reg, \reg, \tmp // actual cache line size
|
2016-09-09 13:07:14 +00:00
|
|
|
.endm
|
|
|
|
|
2016-04-27 16:47:00 +00:00
|
|
|
/*
|
2018-12-06 22:50:41 +00:00
|
|
|
* tcr_set_t0sz - update TCR.T0SZ so that we can load the ID map
|
2016-04-27 16:47:00 +00:00
|
|
|
*/
|
2018-12-06 22:50:41 +00:00
|
|
|
.macro tcr_set_t0sz, valreg, t0sz
|
|
|
|
bfi \valreg, \t0sz, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
|
2016-04-27 16:47:00 +00:00
|
|
|
.endm
|
|
|
|
|
2017-12-13 17:07:17 +00:00
|
|
|
/*
|
|
|
|
* tcr_compute_pa_size - set TCR.(I)PS to the highest supported
|
|
|
|
* ID_AA64MMFR0_EL1.PARange value
|
|
|
|
*
|
|
|
|
* tcr: register with the TCR_ELx value to be updated
|
2018-01-15 15:23:50 +00:00
|
|
|
* pos: IPS or PS bitfield position
|
2017-12-13 17:07:17 +00:00
|
|
|
* tmp{0,1}: temporary registers
|
|
|
|
*/
|
|
|
|
.macro tcr_compute_pa_size, tcr, pos, tmp0, tmp1
|
|
|
|
mrs \tmp0, ID_AA64MMFR0_EL1
|
|
|
|
// Narrow PARange to fit the PS field in TCR_ELx
|
|
|
|
ubfx \tmp0, \tmp0, #ID_AA64MMFR0_PARANGE_SHIFT, #3
|
|
|
|
mov \tmp1, #ID_AA64MMFR0_PARANGE_MAX
|
|
|
|
cmp \tmp0, \tmp1
|
|
|
|
csel \tmp0, \tmp1, \tmp0, hi
|
|
|
|
bfi \tcr, \tmp0, \pos, #3
|
2016-04-27 16:47:00 +00:00
|
|
|
.endm
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Macro to perform a data cache maintenance for the interval
|
|
|
|
* [kaddr, kaddr + size)
|
|
|
|
*
|
|
|
|
* op: operation passed to dc instruction
|
|
|
|
* domain: domain used in dsb instruciton
|
|
|
|
* kaddr: starting virtual address of the region
|
|
|
|
* size: size of the region
|
|
|
|
* Corrupts: kaddr, size, tmp1, tmp2
|
|
|
|
*/
|
2018-12-10 13:39:48 +00:00
|
|
|
.macro __dcache_op_workaround_clean_cache, op, kaddr
|
|
|
|
alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
|
|
|
|
dc \op, \kaddr
|
|
|
|
alternative_else
|
|
|
|
dc civac, \kaddr
|
|
|
|
alternative_endif
|
|
|
|
.endm
|
|
|
|
|
2016-04-27 16:47:00 +00:00
|
|
|
.macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2
|
|
|
|
dcache_line_size \tmp1, \tmp2
|
|
|
|
add \size, \kaddr, \size
|
|
|
|
sub \tmp2, \tmp1, #1
|
|
|
|
bic \kaddr, \kaddr, \tmp2
|
2016-06-28 17:07:29 +00:00
|
|
|
9998:
|
2018-12-10 13:39:48 +00:00
|
|
|
.ifc \op, cvau
|
|
|
|
__dcache_op_workaround_clean_cache \op, \kaddr
|
|
|
|
.else
|
|
|
|
.ifc \op, cvac
|
|
|
|
__dcache_op_workaround_clean_cache \op, \kaddr
|
|
|
|
.else
|
|
|
|
.ifc \op, cvap
|
|
|
|
sys 3, c7, c12, 1, \kaddr // dc cvap
|
2016-06-28 17:07:29 +00:00
|
|
|
.else
|
2019-04-09 09:52:44 +00:00
|
|
|
.ifc \op, cvadp
|
|
|
|
sys 3, c7, c13, 1, \kaddr // dc cvadp
|
|
|
|
.else
|
2016-06-28 17:07:29 +00:00
|
|
|
dc \op, \kaddr
|
|
|
|
.endif
|
2018-12-10 13:39:48 +00:00
|
|
|
.endif
|
|
|
|
.endif
|
2019-04-09 09:52:44 +00:00
|
|
|
.endif
|
2016-04-27 16:47:00 +00:00
|
|
|
add \kaddr, \kaddr, \tmp1
|
|
|
|
cmp \kaddr, \size
|
|
|
|
b.lo 9998b
|
|
|
|
dsb \domain
|
|
|
|
.endm
|
|
|
|
|
2017-10-23 16:11:16 +00:00
|
|
|
/*
|
|
|
|
* Macro to perform an instruction cache maintenance for the interval
|
|
|
|
* [start, end)
|
|
|
|
*
|
|
|
|
* start, end: virtual addresses describing the region
|
|
|
|
* label: A label to branch to on user fault.
|
|
|
|
* Corrupts: tmp1, tmp2
|
|
|
|
*/
|
|
|
|
.macro invalidate_icache_by_line start, end, tmp1, tmp2, label
|
|
|
|
icache_line_size \tmp1, \tmp2
|
|
|
|
sub \tmp2, \tmp1, #1
|
|
|
|
bic \tmp2, \start, \tmp2
|
|
|
|
9997:
|
|
|
|
USER(\label, ic ivau, \tmp2) // invalidate I line PoU
|
|
|
|
add \tmp2, \tmp2, \tmp1
|
|
|
|
cmp \tmp2, \end
|
|
|
|
b.lo 9997b
|
|
|
|
dsb ish
|
|
|
|
isb
|
|
|
|
.endm
|
|
|
|
|
2016-04-27 16:47:00 +00:00
|
|
|
/*
|
|
|
|
* reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present
|
|
|
|
*/
|
|
|
|
.macro reset_pmuserenr_el0, tmpreg
|
2019-04-05 10:20:12 +00:00
|
|
|
mrs \tmpreg, id_aa64dfr0_el1
|
|
|
|
sbfx \tmpreg, \tmpreg, #ID_AA64DFR0_PMUVER_SHIFT, #4
|
2016-04-27 16:47:00 +00:00
|
|
|
cmp \tmpreg, #1 // Skip if no PMU present
|
|
|
|
b.lt 9000f
|
|
|
|
msr pmuserenr_el0, xzr // Disable PMU access from EL0
|
|
|
|
9000:
|
|
|
|
.endm
|
|
|
|
|
2016-04-27 16:47:10 +00:00
|
|
|
/*
|
|
|
|
* copy_page - copy src to dest using temp registers t1-t8
|
|
|
|
*/
|
|
|
|
.macro copy_page dest:req src:req t1:req t2:req t3:req t4:req t5:req t6:req t7:req t8:req
|
|
|
|
9998: ldp \t1, \t2, [\src]
|
|
|
|
ldp \t3, \t4, [\src, #16]
|
|
|
|
ldp \t5, \t6, [\src, #32]
|
|
|
|
ldp \t7, \t8, [\src, #48]
|
|
|
|
add \src, \src, #64
|
|
|
|
stnp \t1, \t2, [\dest]
|
|
|
|
stnp \t3, \t4, [\dest, #16]
|
|
|
|
stnp \t5, \t6, [\dest, #32]
|
|
|
|
stnp \t7, \t8, [\dest, #48]
|
|
|
|
add \dest, \dest, #64
|
|
|
|
tst \src, #(PAGE_SIZE - 1)
|
|
|
|
b.ne 9998b
|
|
|
|
.endm
|
|
|
|
|
2015-10-08 19:02:03 +00:00
|
|
|
/*
|
|
|
|
* Annotate a function as position independent, i.e., safe to be called before
|
|
|
|
* the kernel virtual mapping is activated.
|
|
|
|
*/
|
|
|
|
#define ENDPIPROC(x) \
|
|
|
|
.globl __pi_##x; \
|
|
|
|
.type __pi_##x, %function; \
|
|
|
|
.set __pi_##x, x; \
|
|
|
|
.size __pi_##x, . - x; \
|
|
|
|
ENDPROC(x)
|
|
|
|
|
2017-07-26 15:05:20 +00:00
|
|
|
/*
|
|
|
|
* Annotate a function as being unsuitable for kprobes.
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_KPROBES
|
|
|
|
#define NOKPROBE(x) \
|
|
|
|
.pushsection "_kprobe_blacklist", "aw"; \
|
|
|
|
.quad x; \
|
|
|
|
.popsection;
|
|
|
|
#else
|
|
|
|
#define NOKPROBE(x)
|
|
|
|
#endif
|
2018-12-07 18:08:16 +00:00
|
|
|
|
|
|
|
#ifdef CONFIG_KASAN
|
|
|
|
#define EXPORT_SYMBOL_NOKASAN(name)
|
|
|
|
#else
|
|
|
|
#define EXPORT_SYMBOL_NOKASAN(name) EXPORT_SYMBOL(name)
|
|
|
|
#endif
|
|
|
|
|
2015-12-26 12:48:02 +00:00
|
|
|
/*
|
|
|
|
* Emit a 64-bit absolute little endian symbol reference in a way that
|
|
|
|
* ensures that it will be resolved at build time, even when building a
|
|
|
|
* PIE binary. This requires cooperation from the linker script, which
|
|
|
|
* must emit the lo32/hi32 halves individually.
|
|
|
|
*/
|
|
|
|
.macro le64sym, sym
|
|
|
|
.long \sym\()_lo32
|
|
|
|
.long \sym\()_hi32
|
|
|
|
.endm
|
|
|
|
|
2016-04-18 15:09:44 +00:00
|
|
|
/*
|
|
|
|
* mov_q - move an immediate constant into a 64-bit register using
|
|
|
|
* between 2 and 4 movz/movk instructions (depending on the
|
|
|
|
* magnitude and sign of the operand)
|
|
|
|
*/
|
|
|
|
.macro mov_q, reg, val
|
|
|
|
.if (((\val) >> 31) == 0 || ((\val) >> 31) == 0x1ffffffff)
|
|
|
|
movz \reg, :abs_g1_s:\val
|
|
|
|
.else
|
|
|
|
.if (((\val) >> 47) == 0 || ((\val) >> 47) == 0x1ffff)
|
|
|
|
movz \reg, :abs_g2_s:\val
|
|
|
|
.else
|
|
|
|
movz \reg, :abs_g3:\val
|
|
|
|
movk \reg, :abs_g2_nc:\val
|
|
|
|
.endif
|
|
|
|
movk \reg, :abs_g1_nc:\val
|
|
|
|
.endif
|
|
|
|
movk \reg, :abs_g0_nc:\val
|
|
|
|
.endm
|
|
|
|
|
2016-07-01 15:53:00 +00:00
|
|
|
/*
|
2019-02-22 09:32:50 +00:00
|
|
|
* Return the current task_struct.
|
2016-07-01 15:53:00 +00:00
|
|
|
*/
|
2019-02-22 09:32:50 +00:00
|
|
|
.macro get_current_task, rd
|
2016-07-01 15:53:00 +00:00
|
|
|
mrs \rd, sp_el0
|
|
|
|
.endm
|
|
|
|
|
arm64: mm: Offset TTBR1 to allow 52-bit PTRS_PER_PGD
Enabling 52-bit VAs on arm64 requires that the PGD table expands from 64
entries (for the 48-bit case) to 1024 entries. This quantity,
PTRS_PER_PGD is used as follows to compute which PGD entry corresponds
to a given virtual address, addr:
pgd_index(addr) -> (addr >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)
Userspace addresses are prefixed by 0's, so for a 48-bit userspace
address, uva, the following is true:
(uva >> PGDIR_SHIFT) & (1024 - 1) == (uva >> PGDIR_SHIFT) & (64 - 1)
In other words, a 48-bit userspace address will have the same pgd_index
when using PTRS_PER_PGD = 64 and 1024.
Kernel addresses are prefixed by 1's so, given a 48-bit kernel address,
kva, we have the following inequality:
(kva >> PGDIR_SHIFT) & (1024 - 1) != (kva >> PGDIR_SHIFT) & (64 - 1)
In other words a 48-bit kernel virtual address will have a different
pgd_index when using PTRS_PER_PGD = 64 and 1024.
If, however, we note that:
kva = 0xFFFF << 48 + lower (where lower[63:48] == 0b)
and, PGDIR_SHIFT = 42 (as we are dealing with 64KB PAGE_SIZE)
We can consider:
(kva >> PGDIR_SHIFT) & (1024 - 1) - (kva >> PGDIR_SHIFT) & (64 - 1)
= (0xFFFF << 6) & 0x3FF - (0xFFFF << 6) & 0x3F // "lower" cancels out
= 0x3C0
In other words, one can switch PTRS_PER_PGD to the 52-bit value globally
provided that they increment ttbr1_el1 by 0x3C0 * 8 = 0x1E00 bytes when
running with 48-bit kernel VAs (TCR_EL1.T1SZ = 16).
For kernel configuration where 52-bit userspace VAs are possible, this
patch offsets ttbr1_el1 and sets PTRS_PER_PGD corresponding to the
52-bit value.
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Suggested-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Steve Capper <steve.capper@arm.com>
[will: added comment to TTBR1_BADDR_4852_OFFSET calculation]
Signed-off-by: Will Deacon <will.deacon@arm.com>
2018-12-06 22:50:39 +00:00
|
|
|
/*
|
|
|
|
* Offset ttbr1 to allow for 48-bit kernel VAs set with 52-bit PTRS_PER_PGD.
|
|
|
|
* orr is used as it can cover the immediate value (and is idempotent).
|
|
|
|
* In future this may be nop'ed out when dealing with 52-bit kernel VAs.
|
|
|
|
* ttbr: Value of ttbr to set, modified.
|
|
|
|
*/
|
|
|
|
.macro offset_ttbr1, ttbr
|
2018-12-10 14:15:15 +00:00
|
|
|
#ifdef CONFIG_ARM64_USER_VA_BITS_52
|
arm64: mm: Offset TTBR1 to allow 52-bit PTRS_PER_PGD
Enabling 52-bit VAs on arm64 requires that the PGD table expands from 64
entries (for the 48-bit case) to 1024 entries. This quantity,
PTRS_PER_PGD is used as follows to compute which PGD entry corresponds
to a given virtual address, addr:
pgd_index(addr) -> (addr >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)
Userspace addresses are prefixed by 0's, so for a 48-bit userspace
address, uva, the following is true:
(uva >> PGDIR_SHIFT) & (1024 - 1) == (uva >> PGDIR_SHIFT) & (64 - 1)
In other words, a 48-bit userspace address will have the same pgd_index
when using PTRS_PER_PGD = 64 and 1024.
Kernel addresses are prefixed by 1's so, given a 48-bit kernel address,
kva, we have the following inequality:
(kva >> PGDIR_SHIFT) & (1024 - 1) != (kva >> PGDIR_SHIFT) & (64 - 1)
In other words a 48-bit kernel virtual address will have a different
pgd_index when using PTRS_PER_PGD = 64 and 1024.
If, however, we note that:
kva = 0xFFFF << 48 + lower (where lower[63:48] == 0b)
and, PGDIR_SHIFT = 42 (as we are dealing with 64KB PAGE_SIZE)
We can consider:
(kva >> PGDIR_SHIFT) & (1024 - 1) - (kva >> PGDIR_SHIFT) & (64 - 1)
= (0xFFFF << 6) & 0x3FF - (0xFFFF << 6) & 0x3F // "lower" cancels out
= 0x3C0
In other words, one can switch PTRS_PER_PGD to the 52-bit value globally
provided that they increment ttbr1_el1 by 0x3C0 * 8 = 0x1E00 bytes when
running with 48-bit kernel VAs (TCR_EL1.T1SZ = 16).
For kernel configuration where 52-bit userspace VAs are possible, this
patch offsets ttbr1_el1 and sets PTRS_PER_PGD corresponding to the
52-bit value.
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Suggested-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Steve Capper <steve.capper@arm.com>
[will: added comment to TTBR1_BADDR_4852_OFFSET calculation]
Signed-off-by: Will Deacon <will.deacon@arm.com>
2018-12-06 22:50:39 +00:00
|
|
|
orr \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET
|
|
|
|
#endif
|
|
|
|
.endm
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Perform the reverse of offset_ttbr1.
|
|
|
|
* bic is used as it can cover the immediate value and, in future, won't need
|
|
|
|
* to be nop'ed out when dealing with 52-bit kernel VAs.
|
|
|
|
*/
|
|
|
|
.macro restore_ttbr1, ttbr
|
2018-12-10 14:15:15 +00:00
|
|
|
#ifdef CONFIG_ARM64_USER_VA_BITS_52
|
arm64: mm: Offset TTBR1 to allow 52-bit PTRS_PER_PGD
Enabling 52-bit VAs on arm64 requires that the PGD table expands from 64
entries (for the 48-bit case) to 1024 entries. This quantity,
PTRS_PER_PGD is used as follows to compute which PGD entry corresponds
to a given virtual address, addr:
pgd_index(addr) -> (addr >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)
Userspace addresses are prefixed by 0's, so for a 48-bit userspace
address, uva, the following is true:
(uva >> PGDIR_SHIFT) & (1024 - 1) == (uva >> PGDIR_SHIFT) & (64 - 1)
In other words, a 48-bit userspace address will have the same pgd_index
when using PTRS_PER_PGD = 64 and 1024.
Kernel addresses are prefixed by 1's so, given a 48-bit kernel address,
kva, we have the following inequality:
(kva >> PGDIR_SHIFT) & (1024 - 1) != (kva >> PGDIR_SHIFT) & (64 - 1)
In other words a 48-bit kernel virtual address will have a different
pgd_index when using PTRS_PER_PGD = 64 and 1024.
If, however, we note that:
kva = 0xFFFF << 48 + lower (where lower[63:48] == 0b)
and, PGDIR_SHIFT = 42 (as we are dealing with 64KB PAGE_SIZE)
We can consider:
(kva >> PGDIR_SHIFT) & (1024 - 1) - (kva >> PGDIR_SHIFT) & (64 - 1)
= (0xFFFF << 6) & 0x3FF - (0xFFFF << 6) & 0x3F // "lower" cancels out
= 0x3C0
In other words, one can switch PTRS_PER_PGD to the 52-bit value globally
provided that they increment ttbr1_el1 by 0x3C0 * 8 = 0x1E00 bytes when
running with 48-bit kernel VAs (TCR_EL1.T1SZ = 16).
For kernel configuration where 52-bit userspace VAs are possible, this
patch offsets ttbr1_el1 and sets PTRS_PER_PGD corresponding to the
52-bit value.
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Suggested-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Steve Capper <steve.capper@arm.com>
[will: added comment to TTBR1_BADDR_4852_OFFSET calculation]
Signed-off-by: Will Deacon <will.deacon@arm.com>
2018-12-06 22:50:39 +00:00
|
|
|
bic \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET
|
|
|
|
#endif
|
|
|
|
.endm
|
|
|
|
|
2017-12-13 17:07:18 +00:00
|
|
|
/*
|
|
|
|
* Arrange a physical address in a TTBR register, taking care of 52-bit
|
|
|
|
* addresses.
|
|
|
|
*
|
|
|
|
* phys: physical address, preserved
|
|
|
|
* ttbr: returns the TTBR value
|
|
|
|
*/
|
2018-01-29 11:59:57 +00:00
|
|
|
.macro phys_to_ttbr, ttbr, phys
|
2017-12-13 17:07:18 +00:00
|
|
|
#ifdef CONFIG_ARM64_PA_BITS_52
|
|
|
|
orr \ttbr, \phys, \phys, lsr #46
|
|
|
|
and \ttbr, \ttbr, #TTBR_BADDR_MASK_52
|
|
|
|
#else
|
|
|
|
mov \ttbr, \phys
|
|
|
|
#endif
|
|
|
|
.endm
|
|
|
|
|
2018-01-29 11:59:59 +00:00
|
|
|
.macro phys_to_pte, pte, phys
|
|
|
|
#ifdef CONFIG_ARM64_PA_BITS_52
|
|
|
|
/*
|
|
|
|
* We assume \phys is 64K aligned and this is guaranteed by only
|
|
|
|
* supporting this configuration with 64K pages.
|
|
|
|
*/
|
|
|
|
orr \pte, \phys, \phys, lsr #36
|
|
|
|
and \pte, \pte, #PTE_ADDR_MASK
|
|
|
|
#else
|
|
|
|
mov \pte, \phys
|
|
|
|
#endif
|
|
|
|
.endm
|
|
|
|
|
2018-02-06 22:22:50 +00:00
|
|
|
.macro pte_to_phys, phys, pte
|
|
|
|
#ifdef CONFIG_ARM64_PA_BITS_52
|
|
|
|
ubfiz \phys, \pte, #(48 - 16 - 12), #16
|
|
|
|
bfxil \phys, \pte, #16, #32
|
|
|
|
lsl \phys, \phys, #16
|
|
|
|
#else
|
|
|
|
and \phys, \pte, #PTE_ADDR_MASK
|
|
|
|
#endif
|
|
|
|
.endm
|
|
|
|
|
2019-02-26 18:43:41 +00:00
|
|
|
/*
|
|
|
|
* tcr_clear_errata_bits - Clear TCR bits that trigger an errata on this CPU.
|
|
|
|
*/
|
|
|
|
.macro tcr_clear_errata_bits, tcr, tmp1, tmp2
|
|
|
|
#ifdef CONFIG_FUJITSU_ERRATUM_010001
|
|
|
|
mrs \tmp1, midr_el1
|
|
|
|
|
|
|
|
mov_q \tmp2, MIDR_FUJITSU_ERRATUM_010001_MASK
|
|
|
|
and \tmp1, \tmp1, \tmp2
|
|
|
|
mov_q \tmp2, MIDR_FUJITSU_ERRATUM_010001
|
|
|
|
cmp \tmp1, \tmp2
|
|
|
|
b.ne 10f
|
|
|
|
|
|
|
|
mov_q \tmp2, TCR_CLEAR_FUJITSU_ERRATUM_010001
|
|
|
|
bic \tcr, \tcr, \tmp2
|
|
|
|
10:
|
|
|
|
#endif /* CONFIG_FUJITSU_ERRATUM_010001 */
|
|
|
|
.endm
|
|
|
|
|
2018-01-29 11:59:52 +00:00
|
|
|
/**
|
|
|
|
* Errata workaround prior to disable MMU. Insert an ISB immediately prior
|
|
|
|
* to executing the MSR that will change SCTLR_ELn[M] from a value of 1 to 0.
|
|
|
|
*/
|
|
|
|
.macro pre_disable_mmu_workaround
|
|
|
|
#ifdef CONFIG_QCOM_FALKOR_ERRATUM_E1041
|
|
|
|
isb
|
|
|
|
#endif
|
|
|
|
.endm
|
|
|
|
|
2018-03-29 13:13:22 +00:00
|
|
|
/*
|
|
|
|
* frame_push - Push @regcount callee saved registers to the stack,
|
|
|
|
* starting at x19, as well as x29/x30, and set x29 to
|
|
|
|
* the new value of sp. Add @extra bytes of stack space
|
|
|
|
* for locals.
|
|
|
|
*/
|
|
|
|
.macro frame_push, regcount:req, extra
|
|
|
|
__frame st, \regcount, \extra
|
|
|
|
.endm
|
|
|
|
|
|
|
|
/*
|
|
|
|
* frame_pop - Pop the callee saved registers from the stack that were
|
|
|
|
* pushed in the most recent call to frame_push, as well
|
|
|
|
* as x29/x30 and any extra stack space that may have been
|
|
|
|
* allocated.
|
|
|
|
*/
|
|
|
|
.macro frame_pop
|
|
|
|
__frame ld
|
|
|
|
.endm
|
|
|
|
|
|
|
|
.macro __frame_regs, reg1, reg2, op, num
|
|
|
|
.if .Lframe_regcount == \num
|
|
|
|
\op\()r \reg1, [sp, #(\num + 1) * 8]
|
|
|
|
.elseif .Lframe_regcount > \num
|
|
|
|
\op\()p \reg1, \reg2, [sp, #(\num + 1) * 8]
|
|
|
|
.endif
|
|
|
|
.endm
|
|
|
|
|
|
|
|
.macro __frame, op, regcount, extra=0
|
|
|
|
.ifc \op, st
|
|
|
|
.if (\regcount) < 0 || (\regcount) > 10
|
|
|
|
.error "regcount should be in the range [0 ... 10]"
|
|
|
|
.endif
|
|
|
|
.if ((\extra) % 16) != 0
|
|
|
|
.error "extra should be a multiple of 16 bytes"
|
|
|
|
.endif
|
|
|
|
.ifdef .Lframe_regcount
|
|
|
|
.if .Lframe_regcount != -1
|
|
|
|
.error "frame_push/frame_pop may not be nested"
|
|
|
|
.endif
|
|
|
|
.endif
|
|
|
|
.set .Lframe_regcount, \regcount
|
|
|
|
.set .Lframe_extra, \extra
|
|
|
|
.set .Lframe_local_offset, ((\regcount + 3) / 2) * 16
|
|
|
|
stp x29, x30, [sp, #-.Lframe_local_offset - .Lframe_extra]!
|
|
|
|
mov x29, sp
|
|
|
|
.endif
|
|
|
|
|
|
|
|
__frame_regs x19, x20, \op, 1
|
|
|
|
__frame_regs x21, x22, \op, 3
|
|
|
|
__frame_regs x23, x24, \op, 5
|
|
|
|
__frame_regs x25, x26, \op, 7
|
|
|
|
__frame_regs x27, x28, \op, 9
|
|
|
|
|
|
|
|
.ifc \op, ld
|
|
|
|
.if .Lframe_regcount == -1
|
|
|
|
.error "frame_push/frame_pop may not be nested"
|
|
|
|
.endif
|
|
|
|
ldp x29, x30, [sp], #.Lframe_local_offset + .Lframe_extra
|
|
|
|
.set .Lframe_regcount, -1
|
|
|
|
.endif
|
|
|
|
.endm
|
|
|
|
|
2018-03-29 13:13:23 +00:00
|
|
|
/*
|
|
|
|
* Check whether to yield to another runnable task from kernel mode NEON code
|
|
|
|
* (which runs with preemption disabled).
|
|
|
|
*
|
|
|
|
* if_will_cond_yield_neon
|
|
|
|
* // pre-yield patchup code
|
|
|
|
* do_cond_yield_neon
|
|
|
|
* // post-yield patchup code
|
|
|
|
* endif_yield_neon <label>
|
|
|
|
*
|
|
|
|
* where <label> is optional, and marks the point where execution will resume
|
|
|
|
* after a yield has been performed. If omitted, execution resumes right after
|
|
|
|
* the endif_yield_neon invocation. Note that the entire sequence, including
|
|
|
|
* the provided patchup code, will be omitted from the image if CONFIG_PREEMPT
|
|
|
|
* is not defined.
|
|
|
|
*
|
|
|
|
* As a convenience, in the case where no patchup code is required, the above
|
|
|
|
* sequence may be abbreviated to
|
|
|
|
*
|
|
|
|
* cond_yield_neon <label>
|
|
|
|
*
|
|
|
|
* Note that the patchup code does not support assembler directives that change
|
|
|
|
* the output section, any use of such directives is undefined.
|
|
|
|
*
|
|
|
|
* The yield itself consists of the following:
|
|
|
|
* - Check whether the preempt count is exactly 1, in which case disabling
|
|
|
|
* preemption once will make the task preemptible. If this is not the case,
|
|
|
|
* yielding is pointless.
|
|
|
|
* - Check whether TIF_NEED_RESCHED is set, and if so, disable and re-enable
|
|
|
|
* kernel mode NEON (which will trigger a reschedule), and branch to the
|
|
|
|
* yield fixup code.
|
|
|
|
*
|
|
|
|
* This macro sequence may clobber all CPU state that is not guaranteed by the
|
|
|
|
* AAPCS to be preserved across an ordinary function call.
|
|
|
|
*/
|
|
|
|
|
|
|
|
.macro cond_yield_neon, lbl
|
|
|
|
if_will_cond_yield_neon
|
|
|
|
do_cond_yield_neon
|
|
|
|
endif_yield_neon \lbl
|
|
|
|
.endm
|
|
|
|
|
|
|
|
.macro if_will_cond_yield_neon
|
|
|
|
#ifdef CONFIG_PREEMPT
|
2019-02-22 09:32:50 +00:00
|
|
|
get_current_task x0
|
2018-12-11 13:41:32 +00:00
|
|
|
ldr x0, [x0, #TSK_TI_PREEMPT]
|
|
|
|
sub x0, x0, #PREEMPT_DISABLE_OFFSET
|
|
|
|
cbz x0, .Lyield_\@
|
2018-03-29 13:13:23 +00:00
|
|
|
/* fall through to endif_yield_neon */
|
|
|
|
.subsection 1
|
|
|
|
.Lyield_\@ :
|
|
|
|
#else
|
|
|
|
.section ".discard.cond_yield_neon", "ax"
|
|
|
|
#endif
|
|
|
|
.endm
|
|
|
|
|
|
|
|
.macro do_cond_yield_neon
|
|
|
|
bl kernel_neon_end
|
|
|
|
bl kernel_neon_begin
|
|
|
|
.endm
|
|
|
|
|
|
|
|
.macro endif_yield_neon, lbl
|
|
|
|
.ifnb \lbl
|
|
|
|
b \lbl
|
|
|
|
.else
|
|
|
|
b .Lyield_out_\@
|
|
|
|
.endif
|
|
|
|
.previous
|
|
|
|
.Lyield_out_\@ :
|
|
|
|
.endm
|
|
|
|
|
2015-02-20 13:53:13 +00:00
|
|
|
#endif /* __ASM_ASSEMBLER_H */
|