mirror of
https://github.com/torvalds/linux.git
synced 2024-12-03 17:41:22 +00:00
75df529bec
Steal time initialization requires mapping a memory region which
invokes a memory allocation. Doing this at CPU starting time results
in the following trace when CONFIG_DEBUG_ATOMIC_SLEEP is enabled:
BUG: sleeping function called from invalid context at mm/slab.h:498
in_atomic(): 1, irqs_disabled(): 128, non_block: 0, pid: 0, name: swapper/1
CPU: 1 PID: 0 Comm: swapper/1 Not tainted 5.9.0-rc5+ #1
Call trace:
dump_backtrace+0x0/0x208
show_stack+0x1c/0x28
dump_stack+0xc4/0x11c
___might_sleep+0xf8/0x130
__might_sleep+0x58/0x90
slab_pre_alloc_hook.constprop.101+0xd0/0x118
kmem_cache_alloc_node_trace+0x84/0x270
__get_vm_area_node+0x88/0x210
get_vm_area_caller+0x38/0x40
__ioremap_caller+0x70/0xf8
ioremap_cache+0x78/0xb0
memremap+0x9c/0x1a8
init_stolen_time_cpu+0x54/0xf0
cpuhp_invoke_callback+0xa8/0x720
notify_cpu_starting+0xc8/0xd8
secondary_start_kernel+0x114/0x180
CPU1: Booted secondary processor 0x0000000001 [0x431f0a11]
However we don't need to initialize steal time at CPU starting time.
We can simply wait until CPU online time, just sacrificing a bit of
accuracy by returning zero for steal time until we know better.
While at it, add __init to the functions that are only called by
pv_time_init() which is __init.
Signed-off-by: Andrew Jones <drjones@redhat.com>
Fixes: e0685fa228
("arm64: Retrieve stolen time as paravirtualized guest")
Cc: stable@vger.kernel.org
Reviewed-by: Steven Price <steven.price@arm.com>
Link: https://lore.kernel.org/r/20200916154530.40809-1-drjones@redhat.com
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
163 lines
3.4 KiB
C
163 lines
3.4 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/*
|
|
*
|
|
* Copyright (C) 2013 Citrix Systems
|
|
*
|
|
* Author: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
|
|
*/
|
|
|
|
#define pr_fmt(fmt) "arm-pv: " fmt
|
|
|
|
#include <linux/arm-smccc.h>
|
|
#include <linux/cpuhotplug.h>
|
|
#include <linux/export.h>
|
|
#include <linux/io.h>
|
|
#include <linux/jump_label.h>
|
|
#include <linux/printk.h>
|
|
#include <linux/psci.h>
|
|
#include <linux/reboot.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/types.h>
|
|
|
|
#include <asm/paravirt.h>
|
|
#include <asm/pvclock-abi.h>
|
|
#include <asm/smp_plat.h>
|
|
|
|
struct static_key paravirt_steal_enabled;
|
|
struct static_key paravirt_steal_rq_enabled;
|
|
|
|
struct paravirt_patch_template pv_ops;
|
|
EXPORT_SYMBOL_GPL(pv_ops);
|
|
|
|
struct pv_time_stolen_time_region {
|
|
struct pvclock_vcpu_stolen_time *kaddr;
|
|
};
|
|
|
|
static DEFINE_PER_CPU(struct pv_time_stolen_time_region, stolen_time_region);
|
|
|
|
static bool steal_acc = true;
|
|
static int __init parse_no_stealacc(char *arg)
|
|
{
|
|
steal_acc = false;
|
|
return 0;
|
|
}
|
|
|
|
early_param("no-steal-acc", parse_no_stealacc);
|
|
|
|
/* return stolen time in ns by asking the hypervisor */
|
|
static u64 pv_steal_clock(int cpu)
|
|
{
|
|
struct pv_time_stolen_time_region *reg;
|
|
|
|
reg = per_cpu_ptr(&stolen_time_region, cpu);
|
|
|
|
/*
|
|
* paravirt_steal_clock() may be called before the CPU
|
|
* online notification callback runs. Until the callback
|
|
* has run we just return zero.
|
|
*/
|
|
if (!reg->kaddr)
|
|
return 0;
|
|
|
|
return le64_to_cpu(READ_ONCE(reg->kaddr->stolen_time));
|
|
}
|
|
|
|
static int stolen_time_cpu_down_prepare(unsigned int cpu)
|
|
{
|
|
struct pv_time_stolen_time_region *reg;
|
|
|
|
reg = this_cpu_ptr(&stolen_time_region);
|
|
if (!reg->kaddr)
|
|
return 0;
|
|
|
|
memunmap(reg->kaddr);
|
|
memset(reg, 0, sizeof(*reg));
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int stolen_time_cpu_online(unsigned int cpu)
|
|
{
|
|
struct pv_time_stolen_time_region *reg;
|
|
struct arm_smccc_res res;
|
|
|
|
reg = this_cpu_ptr(&stolen_time_region);
|
|
|
|
arm_smccc_1_1_invoke(ARM_SMCCC_HV_PV_TIME_ST, &res);
|
|
|
|
if (res.a0 == SMCCC_RET_NOT_SUPPORTED)
|
|
return -EINVAL;
|
|
|
|
reg->kaddr = memremap(res.a0,
|
|
sizeof(struct pvclock_vcpu_stolen_time),
|
|
MEMREMAP_WB);
|
|
|
|
if (!reg->kaddr) {
|
|
pr_warn("Failed to map stolen time data structure\n");
|
|
return -ENOMEM;
|
|
}
|
|
|
|
if (le32_to_cpu(reg->kaddr->revision) != 0 ||
|
|
le32_to_cpu(reg->kaddr->attributes) != 0) {
|
|
pr_warn_once("Unexpected revision or attributes in stolen time data\n");
|
|
return -ENXIO;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int __init pv_time_init_stolen_time(void)
|
|
{
|
|
int ret;
|
|
|
|
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
|
|
"hypervisor/arm/pvtime:online",
|
|
stolen_time_cpu_online,
|
|
stolen_time_cpu_down_prepare);
|
|
if (ret < 0)
|
|
return ret;
|
|
return 0;
|
|
}
|
|
|
|
static bool __init has_pv_steal_clock(void)
|
|
{
|
|
struct arm_smccc_res res;
|
|
|
|
/* To detect the presence of PV time support we require SMCCC 1.1+ */
|
|
if (arm_smccc_1_1_get_conduit() == SMCCC_CONDUIT_NONE)
|
|
return false;
|
|
|
|
arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
|
|
ARM_SMCCC_HV_PV_TIME_FEATURES, &res);
|
|
|
|
if (res.a0 != SMCCC_RET_SUCCESS)
|
|
return false;
|
|
|
|
arm_smccc_1_1_invoke(ARM_SMCCC_HV_PV_TIME_FEATURES,
|
|
ARM_SMCCC_HV_PV_TIME_ST, &res);
|
|
|
|
return (res.a0 == SMCCC_RET_SUCCESS);
|
|
}
|
|
|
|
int __init pv_time_init(void)
|
|
{
|
|
int ret;
|
|
|
|
if (!has_pv_steal_clock())
|
|
return 0;
|
|
|
|
ret = pv_time_init_stolen_time();
|
|
if (ret)
|
|
return ret;
|
|
|
|
pv_ops.time.steal_clock = pv_steal_clock;
|
|
|
|
static_key_slow_inc(¶virt_steal_enabled);
|
|
if (steal_acc)
|
|
static_key_slow_inc(¶virt_steal_rq_enabled);
|
|
|
|
pr_info("using stolen time PV\n");
|
|
|
|
return 0;
|
|
}
|