KVM: arm64: Mark kvm_arm_init() and its unique descendants as __init

Tag kvm_arm_init() and its unique helper as __init, and tag data that is
only ever modified under the kvm_arm_init() umbrella as read-only after
init.

Opportunistically name the boolean param in kvm_timer_hyp_init()'s
prototype to match its definition.

Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20221130230934.1014142-21-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Sean Christopherson 2022-11-30 23:09:04 +00:00 committed by Paolo Bonzini
parent 1dc0f02d53
commit 53bf620a2c

View File

@ -1534,7 +1534,7 @@ static int kvm_init_vector_slots(void)
return 0;
}
static void cpu_prepare_hyp_mode(int cpu, u32 hyp_va_bits)
static void __init cpu_prepare_hyp_mode(int cpu, u32 hyp_va_bits)
{
struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu);
unsigned long tcr;
@ -1746,26 +1746,26 @@ static struct notifier_block hyp_init_cpu_pm_nb = {
.notifier_call = hyp_init_cpu_pm_notifier,
};
static void hyp_cpu_pm_init(void)
static void __init hyp_cpu_pm_init(void)
{
if (!is_protected_kvm_enabled())
cpu_pm_register_notifier(&hyp_init_cpu_pm_nb);
}
static void hyp_cpu_pm_exit(void)
static void __init hyp_cpu_pm_exit(void)
{
if (!is_protected_kvm_enabled())
cpu_pm_unregister_notifier(&hyp_init_cpu_pm_nb);
}
#else
static inline void hyp_cpu_pm_init(void)
static inline void __init hyp_cpu_pm_init(void)
{
}
static inline void hyp_cpu_pm_exit(void)
static inline void __init hyp_cpu_pm_exit(void)
{
}
#endif
static void init_cpu_logical_map(void)
static void __init init_cpu_logical_map(void)
{
unsigned int cpu;
@ -1782,7 +1782,7 @@ static void init_cpu_logical_map(void)
#define init_psci_0_1_impl_state(config, what) \
config.psci_0_1_ ## what ## _implemented = psci_ops.what
static bool init_psci_relay(void)
static bool __init init_psci_relay(void)
{
/*
* If PSCI has not been initialized, protected KVM cannot install
@ -1805,7 +1805,7 @@ static bool init_psci_relay(void)
return true;
}
static int init_subsystems(void)
static int __init init_subsystems(void)
{
int err = 0;
@ -1855,13 +1855,13 @@ out:
return err;
}
static void teardown_subsystems(void)
static void __init teardown_subsystems(void)
{
kvm_unregister_perf_callbacks();
hyp_cpu_pm_exit();
}
static void teardown_hyp_mode(void)
static void __init teardown_hyp_mode(void)
{
int cpu;
@ -1872,7 +1872,7 @@ static void teardown_hyp_mode(void)
}
}
static int do_pkvm_init(u32 hyp_va_bits)
static int __init do_pkvm_init(u32 hyp_va_bits)
{
void *per_cpu_base = kvm_ksym_ref(kvm_nvhe_sym(kvm_arm_hyp_percpu_base));
int ret;
@ -1908,7 +1908,7 @@ static void kvm_hyp_init_symbols(void)
kvm_nvhe_sym(kvm_arm_vmid_bits) = kvm_arm_vmid_bits;
}
static int kvm_hyp_init_protection(u32 hyp_va_bits)
static int __init kvm_hyp_init_protection(u32 hyp_va_bits)
{
void *addr = phys_to_virt(hyp_mem_base);
int ret;
@ -1929,7 +1929,7 @@ static int kvm_hyp_init_protection(u32 hyp_va_bits)
/**
* Inits Hyp-mode on all online CPUs
*/
static int init_hyp_mode(void)
static int __init init_hyp_mode(void)
{
u32 hyp_va_bits;
int cpu;
@ -2111,7 +2111,7 @@ out_err:
return err;
}
static void _kvm_host_prot_finalize(void *arg)
static void __init _kvm_host_prot_finalize(void *arg)
{
int *err = arg;
@ -2119,7 +2119,7 @@ static void _kvm_host_prot_finalize(void *arg)
WRITE_ONCE(*err, -EINVAL);
}
static int pkvm_drop_host_privileges(void)
static int __init pkvm_drop_host_privileges(void)
{
int ret = 0;
@ -2132,7 +2132,7 @@ static int pkvm_drop_host_privileges(void)
return ret;
}
static int finalize_hyp_mode(void)
static int __init finalize_hyp_mode(void)
{
if (!is_protected_kvm_enabled())
return 0;
@ -2207,7 +2207,7 @@ void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *cons)
/**
* Initialize Hyp-mode and memory mappings on all CPUs.
*/
int kvm_arm_init(void)
static __init int kvm_arm_init(void)
{
int err;
bool in_hyp_mode;