KVM: selftests: Add coverage of EPT-disabled to vmx_dirty_log_test

Extend vmx_dirty_log_test to include accesses made by L2 when EPT is
disabled.

This commit adds explicit coverage of a bug caught by syzkaller, where
the TDP MMU would clear D-bits instead of write-protecting SPTEs being
used to map an L2, which only happens when L1 does not enable EPT,
causing writes made by L2 to not be reflected in the dirty log when PML
is enabled:

  $ ./vmx_dirty_log_test
  Nested EPT: disabled
  ==== Test Assertion Failure ====
    x86_64/vmx_dirty_log_test.c:151: test_bit(0, bmap)
    pid=72052 tid=72052 errno=4 - Interrupted system call
    (stack trace empty)
    Page 0 incorrectly reported clean

Opportunistically replace the volatile casts with {READ,WRITE}_ONCE().

Link: https://lore.kernel.org/kvm/000000000000c6526f06137f18cc@google.com/
Signed-off-by: David Matlack <dmatlack@google.com>
Link: https://lore.kernel.org/r/20240315230541.1635322-5-dmatlack@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
This commit is contained in:
David Matlack 2024-03-15 16:05:41 -07:00 committed by Sean Christopherson
parent b1a8d2b02b
commit 40e0ee6338

View File

@ -28,16 +28,16 @@
#define NESTED_TEST_MEM1 0xc0001000
#define NESTED_TEST_MEM2 0xc0002000
static void l2_guest_code(void)
static void l2_guest_code(u64 *a, u64 *b)
{
*(volatile uint64_t *)NESTED_TEST_MEM1;
*(volatile uint64_t *)NESTED_TEST_MEM1 = 1;
READ_ONCE(*a);
WRITE_ONCE(*a, 1);
GUEST_SYNC(true);
GUEST_SYNC(false);
*(volatile uint64_t *)NESTED_TEST_MEM2 = 1;
WRITE_ONCE(*b, 1);
GUEST_SYNC(true);
*(volatile uint64_t *)NESTED_TEST_MEM2 = 1;
WRITE_ONCE(*b, 1);
GUEST_SYNC(true);
GUEST_SYNC(false);
@ -45,17 +45,33 @@ static void l2_guest_code(void)
vmcall();
}
static void l2_guest_code_ept_enabled(void)
{
l2_guest_code((u64 *)NESTED_TEST_MEM1, (u64 *)NESTED_TEST_MEM2);
}
static void l2_guest_code_ept_disabled(void)
{
/* Access the same L1 GPAs as l2_guest_code_ept_enabled() */
l2_guest_code((u64 *)GUEST_TEST_MEM, (u64 *)GUEST_TEST_MEM);
}
void l1_guest_code(struct vmx_pages *vmx)
{
#define L2_GUEST_STACK_SIZE 64
unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
void *l2_rip;
GUEST_ASSERT(vmx->vmcs_gpa);
GUEST_ASSERT(prepare_for_vmx_operation(vmx));
GUEST_ASSERT(load_vmcs(vmx));
prepare_vmcs(vmx, l2_guest_code,
&l2_guest_stack[L2_GUEST_STACK_SIZE]);
if (vmx->eptp_gpa)
l2_rip = l2_guest_code_ept_enabled;
else
l2_rip = l2_guest_code_ept_disabled;
prepare_vmcs(vmx, l2_rip, &l2_guest_stack[L2_GUEST_STACK_SIZE]);
GUEST_SYNC(false);
GUEST_ASSERT(!vmlaunch());
@ -64,7 +80,7 @@ void l1_guest_code(struct vmx_pages *vmx)
GUEST_DONE();
}
int main(int argc, char *argv[])
static void test_vmx_dirty_log(bool enable_ept)
{
vm_vaddr_t vmx_pages_gva = 0;
struct vmx_pages *vmx;
@ -76,8 +92,7 @@ int main(int argc, char *argv[])
struct ucall uc;
bool done = false;
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
TEST_REQUIRE(kvm_cpu_has_ept());
pr_info("Nested EPT: %s\n", enable_ept ? "enabled" : "disabled");
/* Create VM */
vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
@ -103,11 +118,16 @@ int main(int argc, char *argv[])
*
* Note that prepare_eptp should be called only L1's GPA map is done,
* meaning after the last call to virt_map.
*
* When EPT is disabled, the L2 guest code will still access the same L1
* GPAs as the EPT enabled case.
*/
prepare_eptp(vmx, vm, 0);
nested_map_memslot(vmx, vm, 0);
nested_map(vmx, vm, NESTED_TEST_MEM1, GUEST_TEST_MEM, 4096);
nested_map(vmx, vm, NESTED_TEST_MEM2, GUEST_TEST_MEM, 4096);
if (enable_ept) {
prepare_eptp(vmx, vm, 0);
nested_map_memslot(vmx, vm, 0);
nested_map(vmx, vm, NESTED_TEST_MEM1, GUEST_TEST_MEM, 4096);
nested_map(vmx, vm, NESTED_TEST_MEM2, GUEST_TEST_MEM, 4096);
}
bmap = bitmap_zalloc(TEST_MEM_PAGES);
host_test_mem = addr_gpa2hva(vm, GUEST_TEST_MEM);
@ -148,3 +168,15 @@ int main(int argc, char *argv[])
}
}
}
int main(int argc, char *argv[])
{
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
test_vmx_dirty_log(/*enable_ept=*/false);
if (kvm_cpu_has_ept())
test_vmx_dirty_log(/*enable_ept=*/true);
return 0;
}