Last minute KVM/ARM fixes; even the generic change actually
affects nothing but ARM. -----BEGIN PGP SIGNATURE----- Version: GnuPG v2.0.22 (GNU/Linux) iQEcBAABAgAGBQJUddkVAAoJEL/70l94x66D0awIAK+Zy4CnoLdtEpWFZsuoK2YO TXOUg3z0WNR4sD/dFMlh1jLxiBG5f/JkDHBBecSZTi+L9PcU15mrAeS+k1F4rDkw 0GNwRQas+WDElD9rRnqIeBF8A83BmunsVnCsOmn3w+xeXuB8L0HBy6Pgh/wnfHbQ +G4gODi0JMDVcvEujN2NNBf60LcM/G3U0VIFXHHGblEIOUtNCUy9mnGRBCg75vwb CORpMC+8JV7gFF7jVnqurYc2SyN9a6fzun1evIQJWlFN+ohU8XjkVn4JsrsHpv+E 6Eqy1wgEWLW1TQhApsh5EYkIRTvvGLgdKm5KCBu15xUw/i3OTOc1BQ0VrSEPs/Y= =rEqk -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm Pull kvm fixes from Paolo Bonzini: "Last minute KVM/ARM fixes; even the generic change actually affects nothing but ARM" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: kvm: fix kvm_is_mmio_pfn() and rename to kvm_is_reserved_pfn() arm/arm64: kvm: drop inappropriate use of kvm_is_mmio_pfn() arm/arm64: KVM: vgic: Fix error code in kvm_vgic_create() arm64: KVM: Handle traps of ICC_SRE_EL1 as RAZ/WI arm64: KVM: fix unmapping with 48-bit VAs
This commit is contained in:
commit
3314bf6ba2
@ -197,7 +197,8 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
|
|||||||
pgd = pgdp + pgd_index(addr);
|
pgd = pgdp + pgd_index(addr);
|
||||||
do {
|
do {
|
||||||
next = kvm_pgd_addr_end(addr, end);
|
next = kvm_pgd_addr_end(addr, end);
|
||||||
unmap_puds(kvm, pgd, addr, next);
|
if (!pgd_none(*pgd))
|
||||||
|
unmap_puds(kvm, pgd, addr, next);
|
||||||
} while (pgd++, addr = next, addr != end);
|
} while (pgd++, addr = next, addr != end);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -834,6 +835,11 @@ static bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
|
|||||||
return kvm_vcpu_dabt_iswrite(vcpu);
|
return kvm_vcpu_dabt_iswrite(vcpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool kvm_is_device_pfn(unsigned long pfn)
|
||||||
|
{
|
||||||
|
return !pfn_valid(pfn);
|
||||||
|
}
|
||||||
|
|
||||||
static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
||||||
struct kvm_memory_slot *memslot, unsigned long hva,
|
struct kvm_memory_slot *memslot, unsigned long hva,
|
||||||
unsigned long fault_status)
|
unsigned long fault_status)
|
||||||
@ -904,7 +910,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
|||||||
if (is_error_pfn(pfn))
|
if (is_error_pfn(pfn))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
if (kvm_is_mmio_pfn(pfn))
|
if (kvm_is_device_pfn(pfn))
|
||||||
mem_type = PAGE_S2_DEVICE;
|
mem_type = PAGE_S2_DEVICE;
|
||||||
|
|
||||||
spin_lock(&kvm->mmu_lock);
|
spin_lock(&kvm->mmu_lock);
|
||||||
|
@ -424,6 +424,11 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
|||||||
/* VBAR_EL1 */
|
/* VBAR_EL1 */
|
||||||
{ Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b0000), Op2(0b000),
|
{ Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b0000), Op2(0b000),
|
||||||
NULL, reset_val, VBAR_EL1, 0 },
|
NULL, reset_val, VBAR_EL1, 0 },
|
||||||
|
|
||||||
|
/* ICC_SRE_EL1 */
|
||||||
|
{ Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b101),
|
||||||
|
trap_raz_wi },
|
||||||
|
|
||||||
/* CONTEXTIDR_EL1 */
|
/* CONTEXTIDR_EL1 */
|
||||||
{ Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001),
|
{ Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001),
|
||||||
access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
|
access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
|
||||||
@ -690,6 +695,10 @@ static const struct sys_reg_desc cp15_regs[] = {
|
|||||||
{ Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR },
|
{ Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR },
|
||||||
{ Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, c10_AMAIR0 },
|
{ Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, c10_AMAIR0 },
|
||||||
{ Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 },
|
{ Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 },
|
||||||
|
|
||||||
|
/* ICC_SRE */
|
||||||
|
{ Op1( 0), CRn(12), CRm(12), Op2( 5), trap_raz_wi },
|
||||||
|
|
||||||
{ Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
|
{ Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -1563,7 +1563,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
|||||||
|
|
||||||
for (i = 0; i < npages; i++) {
|
for (i = 0; i < npages; i++) {
|
||||||
pfn = gfn_to_pfn(kvm, base_gfn + i);
|
pfn = gfn_to_pfn(kvm, base_gfn + i);
|
||||||
if (!kvm_is_mmio_pfn(pfn)) {
|
if (!kvm_is_reserved_pfn(pfn)) {
|
||||||
kvm_set_pmt_entry(kvm, base_gfn + i,
|
kvm_set_pmt_entry(kvm, base_gfn + i,
|
||||||
pfn << PAGE_SHIFT,
|
pfn << PAGE_SHIFT,
|
||||||
_PAGE_AR_RWX | _PAGE_MA_WB);
|
_PAGE_AR_RWX | _PAGE_MA_WB);
|
||||||
|
@ -630,7 +630,7 @@ static int mmu_spte_clear_track_bits(u64 *sptep)
|
|||||||
* kvm mmu, before reclaiming the page, we should
|
* kvm mmu, before reclaiming the page, we should
|
||||||
* unmap it from mmu first.
|
* unmap it from mmu first.
|
||||||
*/
|
*/
|
||||||
WARN_ON(!kvm_is_mmio_pfn(pfn) && !page_count(pfn_to_page(pfn)));
|
WARN_ON(!kvm_is_reserved_pfn(pfn) && !page_count(pfn_to_page(pfn)));
|
||||||
|
|
||||||
if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
|
if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
|
||||||
kvm_set_pfn_accessed(pfn);
|
kvm_set_pfn_accessed(pfn);
|
||||||
@ -2461,7 +2461,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
|
|||||||
spte |= PT_PAGE_SIZE_MASK;
|
spte |= PT_PAGE_SIZE_MASK;
|
||||||
if (tdp_enabled)
|
if (tdp_enabled)
|
||||||
spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
|
spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
|
||||||
kvm_is_mmio_pfn(pfn));
|
kvm_is_reserved_pfn(pfn));
|
||||||
|
|
||||||
if (host_writable)
|
if (host_writable)
|
||||||
spte |= SPTE_HOST_WRITEABLE;
|
spte |= SPTE_HOST_WRITEABLE;
|
||||||
@ -2737,7 +2737,7 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
|
|||||||
* PT_PAGE_TABLE_LEVEL and there would be no adjustment done
|
* PT_PAGE_TABLE_LEVEL and there would be no adjustment done
|
||||||
* here.
|
* here.
|
||||||
*/
|
*/
|
||||||
if (!is_error_noslot_pfn(pfn) && !kvm_is_mmio_pfn(pfn) &&
|
if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) &&
|
||||||
level == PT_PAGE_TABLE_LEVEL &&
|
level == PT_PAGE_TABLE_LEVEL &&
|
||||||
PageTransCompound(pfn_to_page(pfn)) &&
|
PageTransCompound(pfn_to_page(pfn)) &&
|
||||||
!has_wrprotected_page(vcpu->kvm, gfn, PT_DIRECTORY_LEVEL)) {
|
!has_wrprotected_page(vcpu->kvm, gfn, PT_DIRECTORY_LEVEL)) {
|
||||||
|
@ -703,7 +703,7 @@ void kvm_arch_sync_events(struct kvm *kvm);
|
|||||||
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
|
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
|
||||||
void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
|
void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
|
||||||
|
|
||||||
bool kvm_is_mmio_pfn(pfn_t pfn);
|
bool kvm_is_reserved_pfn(pfn_t pfn);
|
||||||
|
|
||||||
struct kvm_irq_ack_notifier {
|
struct kvm_irq_ack_notifier {
|
||||||
struct hlist_node link;
|
struct hlist_node link;
|
||||||
|
@ -1933,7 +1933,7 @@ out:
|
|||||||
|
|
||||||
int kvm_vgic_create(struct kvm *kvm)
|
int kvm_vgic_create(struct kvm *kvm)
|
||||||
{
|
{
|
||||||
int i, vcpu_lock_idx = -1, ret = 0;
|
int i, vcpu_lock_idx = -1, ret;
|
||||||
struct kvm_vcpu *vcpu;
|
struct kvm_vcpu *vcpu;
|
||||||
|
|
||||||
mutex_lock(&kvm->lock);
|
mutex_lock(&kvm->lock);
|
||||||
@ -1948,6 +1948,7 @@ int kvm_vgic_create(struct kvm *kvm)
|
|||||||
* vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure
|
* vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure
|
||||||
* that no other VCPUs are run while we create the vgic.
|
* that no other VCPUs are run while we create the vgic.
|
||||||
*/
|
*/
|
||||||
|
ret = -EBUSY;
|
||||||
kvm_for_each_vcpu(i, vcpu, kvm) {
|
kvm_for_each_vcpu(i, vcpu, kvm) {
|
||||||
if (!mutex_trylock(&vcpu->mutex))
|
if (!mutex_trylock(&vcpu->mutex))
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
@ -1955,11 +1956,10 @@ int kvm_vgic_create(struct kvm *kvm)
|
|||||||
}
|
}
|
||||||
|
|
||||||
kvm_for_each_vcpu(i, vcpu, kvm) {
|
kvm_for_each_vcpu(i, vcpu, kvm) {
|
||||||
if (vcpu->arch.has_run_once) {
|
if (vcpu->arch.has_run_once)
|
||||||
ret = -EBUSY;
|
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
ret = 0;
|
||||||
|
|
||||||
spin_lock_init(&kvm->arch.vgic.lock);
|
spin_lock_init(&kvm->arch.vgic.lock);
|
||||||
kvm->arch.vgic.in_kernel = true;
|
kvm->arch.vgic.in_kernel = true;
|
||||||
|
@ -107,10 +107,10 @@ EXPORT_SYMBOL_GPL(kvm_rebooting);
|
|||||||
|
|
||||||
static bool largepages_enabled = true;
|
static bool largepages_enabled = true;
|
||||||
|
|
||||||
bool kvm_is_mmio_pfn(pfn_t pfn)
|
bool kvm_is_reserved_pfn(pfn_t pfn)
|
||||||
{
|
{
|
||||||
if (pfn_valid(pfn))
|
if (pfn_valid(pfn))
|
||||||
return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn));
|
return PageReserved(pfn_to_page(pfn));
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@ -1321,7 +1321,7 @@ static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
|
|||||||
else if ((vma->vm_flags & VM_PFNMAP)) {
|
else if ((vma->vm_flags & VM_PFNMAP)) {
|
||||||
pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) +
|
pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) +
|
||||||
vma->vm_pgoff;
|
vma->vm_pgoff;
|
||||||
BUG_ON(!kvm_is_mmio_pfn(pfn));
|
BUG_ON(!kvm_is_reserved_pfn(pfn));
|
||||||
} else {
|
} else {
|
||||||
if (async && vma_is_valid(vma, write_fault))
|
if (async && vma_is_valid(vma, write_fault))
|
||||||
*async = true;
|
*async = true;
|
||||||
@ -1427,7 +1427,7 @@ static struct page *kvm_pfn_to_page(pfn_t pfn)
|
|||||||
if (is_error_noslot_pfn(pfn))
|
if (is_error_noslot_pfn(pfn))
|
||||||
return KVM_ERR_PTR_BAD_PAGE;
|
return KVM_ERR_PTR_BAD_PAGE;
|
||||||
|
|
||||||
if (kvm_is_mmio_pfn(pfn)) {
|
if (kvm_is_reserved_pfn(pfn)) {
|
||||||
WARN_ON(1);
|
WARN_ON(1);
|
||||||
return KVM_ERR_PTR_BAD_PAGE;
|
return KVM_ERR_PTR_BAD_PAGE;
|
||||||
}
|
}
|
||||||
@ -1456,7 +1456,7 @@ EXPORT_SYMBOL_GPL(kvm_release_page_clean);
|
|||||||
|
|
||||||
void kvm_release_pfn_clean(pfn_t pfn)
|
void kvm_release_pfn_clean(pfn_t pfn)
|
||||||
{
|
{
|
||||||
if (!is_error_noslot_pfn(pfn) && !kvm_is_mmio_pfn(pfn))
|
if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn))
|
||||||
put_page(pfn_to_page(pfn));
|
put_page(pfn_to_page(pfn));
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
|
EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
|
||||||
@ -1477,7 +1477,7 @@ static void kvm_release_pfn_dirty(pfn_t pfn)
|
|||||||
|
|
||||||
void kvm_set_pfn_dirty(pfn_t pfn)
|
void kvm_set_pfn_dirty(pfn_t pfn)
|
||||||
{
|
{
|
||||||
if (!kvm_is_mmio_pfn(pfn)) {
|
if (!kvm_is_reserved_pfn(pfn)) {
|
||||||
struct page *page = pfn_to_page(pfn);
|
struct page *page = pfn_to_page(pfn);
|
||||||
if (!PageReserved(page))
|
if (!PageReserved(page))
|
||||||
SetPageDirty(page);
|
SetPageDirty(page);
|
||||||
@ -1487,14 +1487,14 @@ EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
|
|||||||
|
|
||||||
void kvm_set_pfn_accessed(pfn_t pfn)
|
void kvm_set_pfn_accessed(pfn_t pfn)
|
||||||
{
|
{
|
||||||
if (!kvm_is_mmio_pfn(pfn))
|
if (!kvm_is_reserved_pfn(pfn))
|
||||||
mark_page_accessed(pfn_to_page(pfn));
|
mark_page_accessed(pfn_to_page(pfn));
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
|
EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
|
||||||
|
|
||||||
void kvm_get_pfn(pfn_t pfn)
|
void kvm_get_pfn(pfn_t pfn)
|
||||||
{
|
{
|
||||||
if (!kvm_is_mmio_pfn(pfn))
|
if (!kvm_is_reserved_pfn(pfn))
|
||||||
get_page(pfn_to_page(pfn));
|
get_page(pfn_to_page(pfn));
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(kvm_get_pfn);
|
EXPORT_SYMBOL_GPL(kvm_get_pfn);
|
||||||
|
Loading…
Reference in New Issue
Block a user