2012-03-05 11:49:26 +00:00
|
|
|
/*
|
|
|
|
* Based on arch/arm/kernel/asm-offsets.c
|
|
|
|
*
|
|
|
|
* Copyright (C) 1995-2003 Russell King
|
|
|
|
* 2001-2002 Keith Owens
|
|
|
|
* Copyright (C) 2012 ARM Ltd.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/dma-mapping.h>
|
2013-07-04 12:34:32 +00:00
|
|
|
#include <linux/kvm_host.h>
|
2016-04-27 16:47:12 +00:00
|
|
|
#include <linux/suspend.h>
|
2016-09-09 13:07:16 +00:00
|
|
|
#include <asm/cpufeature.h>
|
2012-03-05 11:49:26 +00:00
|
|
|
#include <asm/thread_info.h>
|
|
|
|
#include <asm/memory.h>
|
arm64: kernel: cpu_{suspend/resume} implementation
Kernel subsystems like CPU idle and suspend to RAM require a generic
mechanism to suspend a processor, save its context and put it into
a quiescent state. The cpu_{suspend}/{resume} implementation provides
such a framework through a kernel interface allowing to save/restore
registers, flush the context to DRAM and suspend/resume to/from
low-power states where processor context may be lost.
The CPU suspend implementation relies on the suspend protocol registered
in CPU operations to carry out a suspend request after context is
saved and flushed to DRAM. The cpu_suspend interface:
int cpu_suspend(unsigned long arg);
allows to pass an opaque parameter that is handed over to the suspend CPU
operations back-end so that it can take action according to the
semantics attached to it. The arg parameter allows suspend to RAM and CPU
idle drivers to communicate to suspend protocol back-ends; it requires
standardization so that the interface can be reused seamlessly across
systems, paving the way for generic drivers.
Context memory is allocated on the stack, whose address is stashed in a
per-cpu variable to keep track of it and passed to core functions that
save/restore the registers required by the architecture.
Even though, upon successful execution, the cpu_suspend function shuts
down the suspending processor, the warm boot resume mechanism, based
on the cpu_resume function, makes the resume path operate as a
cpu_suspend function return, so that cpu_suspend can be treated as a C
function by the caller, which simplifies coding the PM drivers that rely
on the cpu_suspend API.
Upon context save, the minimal amount of memory is flushed to DRAM so
that it can be retrieved when the MMU is off and caches are not searched.
The suspend CPU operation, depending on the required operations (eg CPU vs
Cluster shutdown) is in charge of flushing the cache hierarchy either
implicitly (by calling firmware implementations like PSCI) or explicitly
by executing the required cache maintainance functions.
Debug exceptions are disabled during cpu_{suspend}/{resume} operations
so that debug registers can be saved and restored properly preventing
preemption from debug agents enabled in the kernel.
Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
2013-07-22 11:22:13 +00:00
|
|
|
#include <asm/smp_plat.h>
|
|
|
|
#include <asm/suspend.h>
|
2012-03-05 11:49:26 +00:00
|
|
|
#include <asm/vdso_datapage.h>
|
|
|
|
#include <linux/kbuild.h>
|
2016-01-04 14:44:32 +00:00
|
|
|
#include <linux/arm-smccc.h>
|
2012-03-05 11:49:26 +00:00
|
|
|
|
|
|
|
int main(void)
|
|
|
|
{
|
|
|
|
DEFINE(TSK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
|
|
|
|
BLANK();
|
arm64: split thread_info from task stack
This patch moves arm64's struct thread_info from the task stack into
task_struct. This protects thread_info from corruption in the case of
stack overflows, and makes its address harder to determine if stack
addresses are leaked, making a number of attacks more difficult. Precise
detection and handling of overflow is left for subsequent patches.
Largely, this involves changing code to store the task_struct in sp_el0,
and acquire the thread_info from the task struct. Core code now
implements current_thread_info(), and as noted in <linux/sched.h> this
relies on offsetof(task_struct, thread_info) == 0, enforced by core
code.
This change means that the 'tsk' register used in entry.S now points to
a task_struct, rather than a thread_info as it used to. To make this
clear, the TI_* field offsets are renamed to TSK_TI_*, with asm-offsets
appropriately updated to account for the structural change.
Userspace clobbers sp_el0, and we can no longer restore this from the
stack. Instead, the current task is cached in a per-cpu variable that we
can safely access from early assembly as interrupts are disabled (and we
are thus not preemptible).
Both secondary entry and idle are updated to stash the sp and task
pointer separately.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Tested-by: Laura Abbott <labbott@redhat.com>
Cc: AKASHI Takahiro <takahiro.akashi@linaro.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: James Morse <james.morse@arm.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Suzuki K Poulose <suzuki.poulose@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-11-03 20:23:13 +00:00
|
|
|
DEFINE(TSK_TI_FLAGS, offsetof(struct task_struct, thread_info.flags));
|
|
|
|
DEFINE(TSK_TI_PREEMPT, offsetof(struct task_struct, thread_info.preempt_count));
|
|
|
|
DEFINE(TSK_TI_ADDR_LIMIT, offsetof(struct task_struct, thread_info.addr_limit));
|
2016-07-01 15:53:00 +00:00
|
|
|
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
|
|
|
|
DEFINE(TSK_TI_TTBR0, offsetof(struct task_struct, thread_info.ttbr0));
|
|
|
|
#endif
|
arm64: split thread_info from task stack
This patch moves arm64's struct thread_info from the task stack into
task_struct. This protects thread_info from corruption in the case of
stack overflows, and makes its address harder to determine if stack
addresses are leaked, making a number of attacks more difficult. Precise
detection and handling of overflow is left for subsequent patches.
Largely, this involves changing code to store the task_struct in sp_el0,
and acquire the thread_info from the task struct. Core code now
implements current_thread_info(), and as noted in <linux/sched.h> this
relies on offsetof(task_struct, thread_info) == 0, enforced by core
code.
This change means that the 'tsk' register used in entry.S now points to
a task_struct, rather than a thread_info as it used to. To make this
clear, the TI_* field offsets are renamed to TSK_TI_*, with asm-offsets
appropriately updated to account for the structural change.
Userspace clobbers sp_el0, and we can no longer restore this from the
stack. Instead, the current task is cached in a per-cpu variable that we
can safely access from early assembly as interrupts are disabled (and we
are thus not preemptible).
Both secondary entry and idle are updated to stash the sp and task
pointer separately.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Tested-by: Laura Abbott <labbott@redhat.com>
Cc: AKASHI Takahiro <takahiro.akashi@linaro.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: James Morse <james.morse@arm.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Suzuki K Poulose <suzuki.poulose@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-11-03 20:23:13 +00:00
|
|
|
DEFINE(TSK_STACK, offsetof(struct task_struct, stack));
|
2012-03-05 11:49:26 +00:00
|
|
|
BLANK();
|
|
|
|
DEFINE(THREAD_CPU_CONTEXT, offsetof(struct task_struct, thread.cpu_context));
|
|
|
|
BLANK();
|
|
|
|
DEFINE(S_X0, offsetof(struct pt_regs, regs[0]));
|
|
|
|
DEFINE(S_X1, offsetof(struct pt_regs, regs[1]));
|
|
|
|
DEFINE(S_X2, offsetof(struct pt_regs, regs[2]));
|
|
|
|
DEFINE(S_X3, offsetof(struct pt_regs, regs[3]));
|
|
|
|
DEFINE(S_X4, offsetof(struct pt_regs, regs[4]));
|
|
|
|
DEFINE(S_X5, offsetof(struct pt_regs, regs[5]));
|
|
|
|
DEFINE(S_X6, offsetof(struct pt_regs, regs[6]));
|
|
|
|
DEFINE(S_X7, offsetof(struct pt_regs, regs[7]));
|
2016-07-08 16:35:52 +00:00
|
|
|
DEFINE(S_X8, offsetof(struct pt_regs, regs[8]));
|
|
|
|
DEFINE(S_X10, offsetof(struct pt_regs, regs[10]));
|
|
|
|
DEFINE(S_X12, offsetof(struct pt_regs, regs[12]));
|
|
|
|
DEFINE(S_X14, offsetof(struct pt_regs, regs[14]));
|
|
|
|
DEFINE(S_X16, offsetof(struct pt_regs, regs[16]));
|
|
|
|
DEFINE(S_X18, offsetof(struct pt_regs, regs[18]));
|
|
|
|
DEFINE(S_X20, offsetof(struct pt_regs, regs[20]));
|
|
|
|
DEFINE(S_X22, offsetof(struct pt_regs, regs[22]));
|
|
|
|
DEFINE(S_X24, offsetof(struct pt_regs, regs[24]));
|
|
|
|
DEFINE(S_X26, offsetof(struct pt_regs, regs[26]));
|
|
|
|
DEFINE(S_X28, offsetof(struct pt_regs, regs[28]));
|
2012-03-05 11:49:26 +00:00
|
|
|
DEFINE(S_LR, offsetof(struct pt_regs, regs[30]));
|
|
|
|
DEFINE(S_SP, offsetof(struct pt_regs, sp));
|
|
|
|
#ifdef CONFIG_COMPAT
|
|
|
|
DEFINE(S_COMPAT_SP, offsetof(struct pt_regs, compat_sp));
|
|
|
|
#endif
|
|
|
|
DEFINE(S_PSTATE, offsetof(struct pt_regs, pstate));
|
|
|
|
DEFINE(S_PC, offsetof(struct pt_regs, pc));
|
|
|
|
DEFINE(S_ORIG_X0, offsetof(struct pt_regs, orig_x0));
|
|
|
|
DEFINE(S_SYSCALLNO, offsetof(struct pt_regs, syscallno));
|
2016-06-20 17:28:01 +00:00
|
|
|
DEFINE(S_ORIG_ADDR_LIMIT, offsetof(struct pt_regs, orig_addr_limit));
|
2012-03-05 11:49:26 +00:00
|
|
|
DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs));
|
|
|
|
BLANK();
|
2015-10-06 17:46:24 +00:00
|
|
|
DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id.counter));
|
2012-03-05 11:49:26 +00:00
|
|
|
BLANK();
|
|
|
|
DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm));
|
|
|
|
DEFINE(VMA_VM_FLAGS, offsetof(struct vm_area_struct, vm_flags));
|
|
|
|
BLANK();
|
|
|
|
DEFINE(VM_EXEC, VM_EXEC);
|
|
|
|
BLANK();
|
|
|
|
DEFINE(PAGE_SZ, PAGE_SIZE);
|
|
|
|
BLANK();
|
|
|
|
DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL);
|
|
|
|
DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE);
|
|
|
|
DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE);
|
|
|
|
BLANK();
|
|
|
|
DEFINE(CLOCK_REALTIME, CLOCK_REALTIME);
|
|
|
|
DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC);
|
arm64: Add support for CLOCK_MONOTONIC_RAW in clock_gettime() vDSO
So far the arm64 clock_gettime() vDSO implementation only supported
the following clocks, falling back to the syscall for the others:
- CLOCK_REALTIME{,_COARSE}
- CLOCK_MONOTONIC{,_COARSE}
This patch adds support for the CLOCK_MONOTONIC_RAW clock, taking
advantage of the recent refactoring of the vDSO time functions. Like
the non-_COARSE clocks, this only works when the "arch_sys_counter"
clocksource is in use (allowing us to read the current time from the
virtual counter register), otherwise we also have to fall back to the
syscall.
Most of the data is shared with CLOCK_MONOTONIC, and the algorithm is
similar. The reference implementation in kernel/time/timekeeping.c
shows that:
- CLOCK_MONOTONIC = tk->wall_to_monotonic + tk->xtime_sec +
timekeeping_get_ns(&tk->tkr_mono)
- CLOCK_MONOTONIC_RAW = tk->raw_time + timekeeping_get_ns(&tk->tkr_raw)
- tkr_mono and tkr_raw are identical (in particular, same
clocksource), except these members:
* mult (only mono's multiplier is NTP-adjusted)
* xtime_nsec (always 0 for raw)
Therefore, tk->raw_time and tkr_raw->mult are now also stored in the
vDSO data page.
Cc: Ali Saidi <ali.saidi@arm.com>
Signed-off-by: Kevin Brodsky <kevin.brodsky@arm.com>
Reviewed-by: Dave Martin <dave.martin@arm.com>
Acked-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-07-12 10:24:00 +00:00
|
|
|
DEFINE(CLOCK_MONOTONIC_RAW, CLOCK_MONOTONIC_RAW);
|
2012-03-05 11:49:26 +00:00
|
|
|
DEFINE(CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
|
|
|
|
DEFINE(CLOCK_REALTIME_COARSE, CLOCK_REALTIME_COARSE);
|
|
|
|
DEFINE(CLOCK_MONOTONIC_COARSE,CLOCK_MONOTONIC_COARSE);
|
|
|
|
DEFINE(CLOCK_COARSE_RES, LOW_RES_NSEC);
|
|
|
|
DEFINE(NSEC_PER_SEC, NSEC_PER_SEC);
|
|
|
|
BLANK();
|
|
|
|
DEFINE(VDSO_CS_CYCLE_LAST, offsetof(struct vdso_data, cs_cycle_last));
|
arm64: Add support for CLOCK_MONOTONIC_RAW in clock_gettime() vDSO
So far the arm64 clock_gettime() vDSO implementation only supported
the following clocks, falling back to the syscall for the others:
- CLOCK_REALTIME{,_COARSE}
- CLOCK_MONOTONIC{,_COARSE}
This patch adds support for the CLOCK_MONOTONIC_RAW clock, taking
advantage of the recent refactoring of the vDSO time functions. Like
the non-_COARSE clocks, this only works when the "arch_sys_counter"
clocksource is in use (allowing us to read the current time from the
virtual counter register), otherwise we also have to fall back to the
syscall.
Most of the data is shared with CLOCK_MONOTONIC, and the algorithm is
similar. The reference implementation in kernel/time/timekeeping.c
shows that:
- CLOCK_MONOTONIC = tk->wall_to_monotonic + tk->xtime_sec +
timekeeping_get_ns(&tk->tkr_mono)
- CLOCK_MONOTONIC_RAW = tk->raw_time + timekeeping_get_ns(&tk->tkr_raw)
- tkr_mono and tkr_raw are identical (in particular, same
clocksource), except these members:
* mult (only mono's multiplier is NTP-adjusted)
* xtime_nsec (always 0 for raw)
Therefore, tk->raw_time and tkr_raw->mult are now also stored in the
vDSO data page.
Cc: Ali Saidi <ali.saidi@arm.com>
Signed-off-by: Kevin Brodsky <kevin.brodsky@arm.com>
Reviewed-by: Dave Martin <dave.martin@arm.com>
Acked-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-07-12 10:24:00 +00:00
|
|
|
DEFINE(VDSO_RAW_TIME_SEC, offsetof(struct vdso_data, raw_time_sec));
|
|
|
|
DEFINE(VDSO_RAW_TIME_NSEC, offsetof(struct vdso_data, raw_time_nsec));
|
2012-03-05 11:49:26 +00:00
|
|
|
DEFINE(VDSO_XTIME_CLK_SEC, offsetof(struct vdso_data, xtime_clock_sec));
|
|
|
|
DEFINE(VDSO_XTIME_CLK_NSEC, offsetof(struct vdso_data, xtime_clock_nsec));
|
|
|
|
DEFINE(VDSO_XTIME_CRS_SEC, offsetof(struct vdso_data, xtime_coarse_sec));
|
|
|
|
DEFINE(VDSO_XTIME_CRS_NSEC, offsetof(struct vdso_data, xtime_coarse_nsec));
|
|
|
|
DEFINE(VDSO_WTM_CLK_SEC, offsetof(struct vdso_data, wtm_clock_sec));
|
|
|
|
DEFINE(VDSO_WTM_CLK_NSEC, offsetof(struct vdso_data, wtm_clock_nsec));
|
|
|
|
DEFINE(VDSO_TB_SEQ_COUNT, offsetof(struct vdso_data, tb_seq_count));
|
arm64: Add support for CLOCK_MONOTONIC_RAW in clock_gettime() vDSO
So far the arm64 clock_gettime() vDSO implementation only supported
the following clocks, falling back to the syscall for the others:
- CLOCK_REALTIME{,_COARSE}
- CLOCK_MONOTONIC{,_COARSE}
This patch adds support for the CLOCK_MONOTONIC_RAW clock, taking
advantage of the recent refactoring of the vDSO time functions. Like
the non-_COARSE clocks, this only works when the "arch_sys_counter"
clocksource is in use (allowing us to read the current time from the
virtual counter register), otherwise we also have to fall back to the
syscall.
Most of the data is shared with CLOCK_MONOTONIC, and the algorithm is
similar. The reference implementation in kernel/time/timekeeping.c
shows that:
- CLOCK_MONOTONIC = tk->wall_to_monotonic + tk->xtime_sec +
timekeeping_get_ns(&tk->tkr_mono)
- CLOCK_MONOTONIC_RAW = tk->raw_time + timekeeping_get_ns(&tk->tkr_raw)
- tkr_mono and tkr_raw are identical (in particular, same
clocksource), except these members:
* mult (only mono's multiplier is NTP-adjusted)
* xtime_nsec (always 0 for raw)
Therefore, tk->raw_time and tkr_raw->mult are now also stored in the
vDSO data page.
Cc: Ali Saidi <ali.saidi@arm.com>
Signed-off-by: Kevin Brodsky <kevin.brodsky@arm.com>
Reviewed-by: Dave Martin <dave.martin@arm.com>
Acked-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-07-12 10:24:00 +00:00
|
|
|
DEFINE(VDSO_CS_MONO_MULT, offsetof(struct vdso_data, cs_mono_mult));
|
|
|
|
DEFINE(VDSO_CS_RAW_MULT, offsetof(struct vdso_data, cs_raw_mult));
|
2012-03-05 11:49:26 +00:00
|
|
|
DEFINE(VDSO_CS_SHIFT, offsetof(struct vdso_data, cs_shift));
|
|
|
|
DEFINE(VDSO_TZ_MINWEST, offsetof(struct vdso_data, tz_minuteswest));
|
|
|
|
DEFINE(VDSO_TZ_DSTTIME, offsetof(struct vdso_data, tz_dsttime));
|
|
|
|
DEFINE(VDSO_USE_SYSCALL, offsetof(struct vdso_data, use_syscall));
|
|
|
|
BLANK();
|
|
|
|
DEFINE(TVAL_TV_SEC, offsetof(struct timeval, tv_sec));
|
|
|
|
DEFINE(TVAL_TV_USEC, offsetof(struct timeval, tv_usec));
|
|
|
|
DEFINE(TSPEC_TV_SEC, offsetof(struct timespec, tv_sec));
|
|
|
|
DEFINE(TSPEC_TV_NSEC, offsetof(struct timespec, tv_nsec));
|
|
|
|
BLANK();
|
|
|
|
DEFINE(TZ_MINWEST, offsetof(struct timezone, tz_minuteswest));
|
|
|
|
DEFINE(TZ_DSTTIME, offsetof(struct timezone, tz_dsttime));
|
2012-12-10 16:40:18 +00:00
|
|
|
BLANK();
|
2016-02-23 10:31:42 +00:00
|
|
|
DEFINE(CPU_BOOT_STACK, offsetof(struct secondary_data, stack));
|
arm64: split thread_info from task stack
This patch moves arm64's struct thread_info from the task stack into
task_struct. This protects thread_info from corruption in the case of
stack overflows, and makes its address harder to determine if stack
addresses are leaked, making a number of attacks more difficult. Precise
detection and handling of overflow is left for subsequent patches.
Largely, this involves changing code to store the task_struct in sp_el0,
and acquire the thread_info from the task struct. Core code now
implements current_thread_info(), and as noted in <linux/sched.h> this
relies on offsetof(task_struct, thread_info) == 0, enforced by core
code.
This change means that the 'tsk' register used in entry.S now points to
a task_struct, rather than a thread_info as it used to. To make this
clear, the TI_* field offsets are renamed to TSK_TI_*, with asm-offsets
appropriately updated to account for the structural change.
Userspace clobbers sp_el0, and we can no longer restore this from the
stack. Instead, the current task is cached in a per-cpu variable that we
can safely access from early assembly as interrupts are disabled (and we
are thus not preemptible).
Both secondary entry and idle are updated to stash the sp and task
pointer separately.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Tested-by: Laura Abbott <labbott@redhat.com>
Cc: AKASHI Takahiro <takahiro.akashi@linaro.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: James Morse <james.morse@arm.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Suzuki K Poulose <suzuki.poulose@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-11-03 20:23:13 +00:00
|
|
|
DEFINE(CPU_BOOT_TASK, offsetof(struct secondary_data, task));
|
2016-02-23 10:31:42 +00:00
|
|
|
BLANK();
|
2012-12-10 16:40:18 +00:00
|
|
|
#ifdef CONFIG_KVM_ARM_HOST
|
|
|
|
DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt));
|
|
|
|
DEFINE(CPU_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs));
|
|
|
|
DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_regs, regs));
|
|
|
|
DEFINE(CPU_FP_REGS, offsetof(struct kvm_regs, fp_regs));
|
2015-10-25 19:57:11 +00:00
|
|
|
DEFINE(VCPU_FPEXC32_EL2, offsetof(struct kvm_vcpu, arch.ctxt.sys_regs[FPEXC32_EL2]));
|
2012-12-10 16:40:18 +00:00
|
|
|
DEFINE(VCPU_HOST_CONTEXT, offsetof(struct kvm_vcpu, arch.host_cpu_context));
|
arm64: kernel: cpu_{suspend/resume} implementation
Kernel subsystems like CPU idle and suspend to RAM require a generic
mechanism to suspend a processor, save its context and put it into
a quiescent state. The cpu_{suspend}/{resume} implementation provides
such a framework through a kernel interface allowing to save/restore
registers, flush the context to DRAM and suspend/resume to/from
low-power states where processor context may be lost.
The CPU suspend implementation relies on the suspend protocol registered
in CPU operations to carry out a suspend request after context is
saved and flushed to DRAM. The cpu_suspend interface:
int cpu_suspend(unsigned long arg);
allows to pass an opaque parameter that is handed over to the suspend CPU
operations back-end so that it can take action according to the
semantics attached to it. The arg parameter allows suspend to RAM and CPU
idle drivers to communicate to suspend protocol back-ends; it requires
standardization so that the interface can be reused seamlessly across
systems, paving the way for generic drivers.
Context memory is allocated on the stack, whose address is stashed in a
per-cpu variable to keep track of it and passed to core functions that
save/restore the registers required by the architecture.
Even though, upon successful execution, the cpu_suspend function shuts
down the suspending processor, the warm boot resume mechanism, based
on the cpu_resume function, makes the resume path operate as a
cpu_suspend function return, so that cpu_suspend can be treated as a C
function by the caller, which simplifies coding the PM drivers that rely
on the cpu_suspend API.
Upon context save, the minimal amount of memory is flushed to DRAM so
that it can be retrieved when the MMU is off and caches are not searched.
The suspend CPU operation, depending on the required operations (eg CPU vs
Cluster shutdown) is in charge of flushing the cache hierarchy either
implicitly (by calling firmware implementations like PSCI) or explicitly
by executing the required cache maintainance functions.
Debug exceptions are disabled during cpu_{suspend}/{resume} operations
so that debug registers can be saved and restored properly preventing
preemption from debug agents enabled in the kernel.
Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
2013-07-22 11:22:13 +00:00
|
|
|
#endif
|
2015-01-26 18:33:44 +00:00
|
|
|
#ifdef CONFIG_CPU_PM
|
arm64: kernel: cpu_{suspend/resume} implementation
Kernel subsystems like CPU idle and suspend to RAM require a generic
mechanism to suspend a processor, save its context and put it into
a quiescent state. The cpu_{suspend}/{resume} implementation provides
such a framework through a kernel interface allowing to save/restore
registers, flush the context to DRAM and suspend/resume to/from
low-power states where processor context may be lost.
The CPU suspend implementation relies on the suspend protocol registered
in CPU operations to carry out a suspend request after context is
saved and flushed to DRAM. The cpu_suspend interface:
int cpu_suspend(unsigned long arg);
allows to pass an opaque parameter that is handed over to the suspend CPU
operations back-end so that it can take action according to the
semantics attached to it. The arg parameter allows suspend to RAM and CPU
idle drivers to communicate to suspend protocol back-ends; it requires
standardization so that the interface can be reused seamlessly across
systems, paving the way for generic drivers.
Context memory is allocated on the stack, whose address is stashed in a
per-cpu variable to keep track of it and passed to core functions that
save/restore the registers required by the architecture.
Even though, upon successful execution, the cpu_suspend function shuts
down the suspending processor, the warm boot resume mechanism, based
on the cpu_resume function, makes the resume path operate as a
cpu_suspend function return, so that cpu_suspend can be treated as a C
function by the caller, which simplifies coding the PM drivers that rely
on the cpu_suspend API.
Upon context save, the minimal amount of memory is flushed to DRAM so
that it can be retrieved when the MMU is off and caches are not searched.
The suspend CPU operation, depending on the required operations (eg CPU vs
Cluster shutdown) is in charge of flushing the cache hierarchy either
implicitly (by calling firmware implementations like PSCI) or explicitly
by executing the required cache maintainance functions.
Debug exceptions are disabled during cpu_{suspend}/{resume} operations
so that debug registers can be saved and restored properly preventing
preemption from debug agents enabled in the kernel.
Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
2013-07-22 11:22:13 +00:00
|
|
|
DEFINE(CPU_SUSPEND_SZ, sizeof(struct cpu_suspend_ctx));
|
|
|
|
DEFINE(CPU_CTX_SP, offsetof(struct cpu_suspend_ctx, sp));
|
|
|
|
DEFINE(MPIDR_HASH_MASK, offsetof(struct mpidr_hash, mask));
|
|
|
|
DEFINE(MPIDR_HASH_SHIFTS, offsetof(struct mpidr_hash, shift_aff));
|
2016-04-27 16:47:06 +00:00
|
|
|
DEFINE(SLEEP_STACK_DATA_SYSTEM_REGS, offsetof(struct sleep_stack_data, system_regs));
|
|
|
|
DEFINE(SLEEP_STACK_DATA_CALLEE_REGS, offsetof(struct sleep_stack_data, callee_saved_regs));
|
2012-12-10 16:40:18 +00:00
|
|
|
#endif
|
2017-02-01 17:28:27 +00:00
|
|
|
DEFINE(ARM_SMCCC_RES_X0_OFFS, offsetof(struct arm_smccc_res, a0));
|
|
|
|
DEFINE(ARM_SMCCC_RES_X2_OFFS, offsetof(struct arm_smccc_res, a2));
|
|
|
|
DEFINE(ARM_SMCCC_QUIRK_ID_OFFS, offsetof(struct arm_smccc_quirk, id));
|
|
|
|
DEFINE(ARM_SMCCC_QUIRK_STATE_OFFS, offsetof(struct arm_smccc_quirk, state));
|
|
|
|
|
2016-04-27 16:47:12 +00:00
|
|
|
BLANK();
|
|
|
|
DEFINE(HIBERN_PBE_ORIG, offsetof(struct pbe, orig_address));
|
|
|
|
DEFINE(HIBERN_PBE_ADDR, offsetof(struct pbe, address));
|
|
|
|
DEFINE(HIBERN_PBE_NEXT, offsetof(struct pbe, next));
|
2016-09-09 13:07:16 +00:00
|
|
|
DEFINE(ARM64_FTR_SYSVAL, offsetof(struct arm64_ftr_reg, sys_val));
|
2012-03-05 11:49:26 +00:00
|
|
|
return 0;
|
|
|
|
}
|