forked from Minki/linux
KVM: arm64: Move host EL1 code out of hyp/ directory
kvm/hyp/reserved_mem.c contains host code executing at EL1 and is not linked into the hypervisor object. Move the file into kvm/pkvm.c and rework the headers so that the definitions shared between the host and the hypervisor live in asm/kvm_pkvm.h. Signed-off-by: Will Deacon <will@kernel.org> Tested-by: Fuad Tabba <tabba@google.com> Reviewed-by: Fuad Tabba <tabba@google.com> Signed-off-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20211202171048.26924-4-will@kernel.org
This commit is contained in:
parent
ed4ed15d57
commit
9429f4b041
71
arch/arm64/include/asm/kvm_pkvm.h
Normal file
71
arch/arm64/include/asm/kvm_pkvm.h
Normal file
@ -0,0 +1,71 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (C) 2020 - Google LLC
|
||||
* Author: Quentin Perret <qperret@google.com>
|
||||
*/
|
||||
#ifndef __ARM64_KVM_PKVM_H__
|
||||
#define __ARM64_KVM_PKVM_H__
|
||||
|
||||
#include <linux/memblock.h>
|
||||
#include <asm/kvm_pgtable.h>
|
||||
|
||||
#define HYP_MEMBLOCK_REGIONS 128
|
||||
|
||||
extern struct memblock_region kvm_nvhe_sym(hyp_memory)[];
|
||||
extern unsigned int kvm_nvhe_sym(hyp_memblock_nr);
|
||||
|
||||
static inline unsigned long __hyp_pgtable_max_pages(unsigned long nr_pages)
|
||||
{
|
||||
unsigned long total = 0, i;
|
||||
|
||||
/* Provision the worst case scenario */
|
||||
for (i = 0; i < KVM_PGTABLE_MAX_LEVELS; i++) {
|
||||
nr_pages = DIV_ROUND_UP(nr_pages, PTRS_PER_PTE);
|
||||
total += nr_pages;
|
||||
}
|
||||
|
||||
return total;
|
||||
}
|
||||
|
||||
static inline unsigned long __hyp_pgtable_total_pages(void)
|
||||
{
|
||||
unsigned long res = 0, i;
|
||||
|
||||
/* Cover all of memory with page-granularity */
|
||||
for (i = 0; i < kvm_nvhe_sym(hyp_memblock_nr); i++) {
|
||||
struct memblock_region *reg = &kvm_nvhe_sym(hyp_memory)[i];
|
||||
res += __hyp_pgtable_max_pages(reg->size >> PAGE_SHIFT);
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
static inline unsigned long hyp_s1_pgtable_pages(void)
|
||||
{
|
||||
unsigned long res;
|
||||
|
||||
res = __hyp_pgtable_total_pages();
|
||||
|
||||
/* Allow 1 GiB for private mappings */
|
||||
res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
static inline unsigned long host_s2_pgtable_pages(void)
|
||||
{
|
||||
unsigned long res;
|
||||
|
||||
/*
|
||||
* Include an extra 16 pages to safely upper-bound the worst case of
|
||||
* concatenated pgds.
|
||||
*/
|
||||
res = __hyp_pgtable_total_pages() + 16;
|
||||
|
||||
/* Allow 1 GiB for MMIO mappings */
|
||||
res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
#endif /* __ARM64_KVM_PKVM_H__ */
|
@ -15,7 +15,7 @@ kvm-y := $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o \
|
||||
arm.o mmu.o mmio.o psci.o perf.o hypercalls.o pvtime.o \
|
||||
inject_fault.o va_layout.o handle_exit.o \
|
||||
guest.o debug.o reset.o sys_regs.o \
|
||||
vgic-sys-reg-v3.o fpsimd.o pmu.o \
|
||||
vgic-sys-reg-v3.o fpsimd.o pmu.o pkvm.o \
|
||||
arch_timer.o trng.o\
|
||||
vgic/vgic.o vgic/vgic-init.o \
|
||||
vgic/vgic-irqfd.o vgic/vgic-v2.o \
|
||||
|
@ -10,4 +10,4 @@ subdir-ccflags-y := -I$(incdir) \
|
||||
-DDISABLE_BRANCH_PROFILING \
|
||||
$(DISABLE_STACKLEAK_PLUGIN)
|
||||
|
||||
obj-$(CONFIG_KVM) += vhe/ nvhe/ pgtable.o reserved_mem.o
|
||||
obj-$(CONFIG_KVM) += vhe/ nvhe/ pgtable.o
|
||||
|
@ -10,9 +10,6 @@
|
||||
#include <nvhe/memory.h>
|
||||
#include <nvhe/spinlock.h>
|
||||
|
||||
#define HYP_MEMBLOCK_REGIONS 128
|
||||
extern struct memblock_region kvm_nvhe_sym(hyp_memory)[];
|
||||
extern unsigned int kvm_nvhe_sym(hyp_memblock_nr);
|
||||
extern struct kvm_pgtable pkvm_pgtable;
|
||||
extern hyp_spinlock_t pkvm_pgd_lock;
|
||||
extern struct hyp_pool hpool;
|
||||
@ -39,58 +36,4 @@ static inline void hyp_vmemmap_range(phys_addr_t phys, unsigned long size,
|
||||
*end = ALIGN(*end, PAGE_SIZE);
|
||||
}
|
||||
|
||||
static inline unsigned long __hyp_pgtable_max_pages(unsigned long nr_pages)
|
||||
{
|
||||
unsigned long total = 0, i;
|
||||
|
||||
/* Provision the worst case scenario */
|
||||
for (i = 0; i < KVM_PGTABLE_MAX_LEVELS; i++) {
|
||||
nr_pages = DIV_ROUND_UP(nr_pages, PTRS_PER_PTE);
|
||||
total += nr_pages;
|
||||
}
|
||||
|
||||
return total;
|
||||
}
|
||||
|
||||
static inline unsigned long __hyp_pgtable_total_pages(void)
|
||||
{
|
||||
unsigned long res = 0, i;
|
||||
|
||||
/* Cover all of memory with page-granularity */
|
||||
for (i = 0; i < kvm_nvhe_sym(hyp_memblock_nr); i++) {
|
||||
struct memblock_region *reg = &kvm_nvhe_sym(hyp_memory)[i];
|
||||
res += __hyp_pgtable_max_pages(reg->size >> PAGE_SHIFT);
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
static inline unsigned long hyp_s1_pgtable_pages(void)
|
||||
{
|
||||
unsigned long res;
|
||||
|
||||
res = __hyp_pgtable_total_pages();
|
||||
|
||||
/* Allow 1 GiB for private mappings */
|
||||
res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
static inline unsigned long host_s2_pgtable_pages(void)
|
||||
{
|
||||
unsigned long res;
|
||||
|
||||
/*
|
||||
* Include an extra 16 pages to safely upper-bound the worst case of
|
||||
* concatenated pgds.
|
||||
*/
|
||||
res = __hyp_pgtable_total_pages() + 16;
|
||||
|
||||
/* Allow 1 GiB for MMIO mappings */
|
||||
res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
#endif /* __KVM_HYP_MM_H */
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include <asm/kvm_hyp.h>
|
||||
#include <asm/kvm_mmu.h>
|
||||
#include <asm/kvm_pgtable.h>
|
||||
#include <asm/kvm_pkvm.h>
|
||||
#include <asm/stage2_pgtable.h>
|
||||
|
||||
#include <hyp/fault.h>
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include <asm/kvm_hyp.h>
|
||||
#include <asm/kvm_mmu.h>
|
||||
#include <asm/kvm_pgtable.h>
|
||||
#include <asm/kvm_pkvm.h>
|
||||
#include <asm/spectre.h>
|
||||
|
||||
#include <nvhe/early_alloc.h>
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include <asm/kvm_hyp.h>
|
||||
#include <asm/kvm_mmu.h>
|
||||
#include <asm/kvm_pgtable.h>
|
||||
#include <asm/kvm_pkvm.h>
|
||||
|
||||
#include <nvhe/early_alloc.h>
|
||||
#include <nvhe/fixed_config.h>
|
||||
|
@ -8,10 +8,9 @@
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/sort.h>
|
||||
|
||||
#include <asm/kvm_host.h>
|
||||
#include <asm/kvm_pkvm.h>
|
||||
|
||||
#include <nvhe/memory.h>
|
||||
#include <nvhe/mm.h>
|
||||
#include "hyp_constants.h"
|
||||
|
||||
static struct memblock_region *hyp_memory = kvm_nvhe_sym(hyp_memory);
|
||||
static unsigned int *hyp_memblock_nr_ptr = &kvm_nvhe_sym(hyp_memblock_nr);
|
||||
@ -82,7 +81,8 @@ void __init kvm_hyp_reserve(void)
|
||||
do {
|
||||
prev = nr_pages;
|
||||
nr_pages = hyp_mem_pages + prev;
|
||||
nr_pages = DIV_ROUND_UP(nr_pages * sizeof(struct hyp_page), PAGE_SIZE);
|
||||
nr_pages = DIV_ROUND_UP(nr_pages * STRUCT_HYP_PAGE_SIZE,
|
||||
PAGE_SIZE);
|
||||
nr_pages += __hyp_pgtable_max_pages(nr_pages);
|
||||
} while (nr_pages != prev);
|
||||
hyp_mem_pages += nr_pages;
|
Loading…
Reference in New Issue
Block a user