mirror of
https://github.com/torvalds/linux.git
synced 2024-12-12 06:02:38 +00:00
RISC-V: KVM: Add G-stage ioremap() and iounmap() functions
The in-kernel AIA IMSIC support requires on-demand mapping / unmapping of Guest IMSIC address to Host IMSIC guest files. To help achieve this, we add kvm_riscv_stage2_ioremap() and kvm_riscv_stage2_iounmap() functions. These new functions for updating G-stage page table mappings will be called in atomic context so we have special "in_atomic" parameter for this purpose. Signed-off-by: Anup Patel <apatel@ventanamicro.com> Reviewed-by: Atish Patra <atishp@rivosinc.com> Signed-off-by: Anup Patel <anup@brainfault.org>
This commit is contained in:
parent
4ab0e470c0
commit
c9d57373fc
@ -284,6 +284,11 @@ void kvm_riscv_hfence_vvma_gva(struct kvm *kvm,
|
||||
void kvm_riscv_hfence_vvma_all(struct kvm *kvm,
|
||||
unsigned long hbase, unsigned long hmask);
|
||||
|
||||
int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa,
|
||||
phys_addr_t hpa, unsigned long size,
|
||||
bool writable, bool in_atomic);
|
||||
void kvm_riscv_gstage_iounmap(struct kvm *kvm, gpa_t gpa,
|
||||
unsigned long size);
|
||||
int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
|
||||
struct kvm_memory_slot *memslot,
|
||||
gpa_t gpa, unsigned long hva, bool is_write);
|
||||
|
@ -343,8 +343,9 @@ static void gstage_wp_memory_region(struct kvm *kvm, int slot)
|
||||
kvm_flush_remote_tlbs(kvm);
|
||||
}
|
||||
|
||||
static int gstage_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa,
|
||||
unsigned long size, bool writable)
|
||||
int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa,
|
||||
phys_addr_t hpa, unsigned long size,
|
||||
bool writable, bool in_atomic)
|
||||
{
|
||||
pte_t pte;
|
||||
int ret = 0;
|
||||
@ -353,6 +354,7 @@ static int gstage_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa,
|
||||
struct kvm_mmu_memory_cache pcache;
|
||||
|
||||
memset(&pcache, 0, sizeof(pcache));
|
||||
pcache.gfp_custom = (in_atomic) ? GFP_ATOMIC | __GFP_ACCOUNT : 0;
|
||||
pcache.gfp_zero = __GFP_ZERO;
|
||||
|
||||
end = (gpa + size + PAGE_SIZE - 1) & PAGE_MASK;
|
||||
@ -382,6 +384,13 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
void kvm_riscv_gstage_iounmap(struct kvm *kvm, gpa_t gpa, unsigned long size)
|
||||
{
|
||||
spin_lock(&kvm->mmu_lock);
|
||||
gstage_unmap_range(kvm, gpa, size, false);
|
||||
spin_unlock(&kvm->mmu_lock);
|
||||
}
|
||||
|
||||
void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
|
||||
struct kvm_memory_slot *slot,
|
||||
gfn_t gfn_offset,
|
||||
@ -517,8 +526,9 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = gstage_ioremap(kvm, gpa, pa,
|
||||
vm_end - vm_start, writable);
|
||||
ret = kvm_riscv_gstage_ioremap(kvm, gpa, pa,
|
||||
vm_end - vm_start,
|
||||
writable, false);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user