Merge branch kvm-arm64/selftest/access-tracking into kvmarm-master/next

* kvm-arm64/selftest/access-tracking:
  : .
  : Small series to add support for arm64 to access_tracking_perf_test and
  : correct a couple bugs along the way.
  :
  : Patches courtesy of Oliver Upton.
  : .
  KVM: selftests: Build access_tracking_perf_test for arm64
  KVM: selftests: Have perf_test_util signal when to stop vCPUs

Signed-off-by: Marc Zyngier <maz@kernel.org>
This commit is contained in:
Marc Zyngier 2022-12-05 14:16:55 +00:00
commit b1d10ee156
5 changed files with 9 additions and 12 deletions

View File

@ -158,6 +158,7 @@ TEST_GEN_PROGS_aarch64 += aarch64/psci_test
TEST_GEN_PROGS_aarch64 += aarch64/vcpu_width_config
TEST_GEN_PROGS_aarch64 += aarch64/vgic_init
TEST_GEN_PROGS_aarch64 += aarch64/vgic_irq
TEST_GEN_PROGS_aarch64 += access_tracking_perf_test
TEST_GEN_PROGS_aarch64 += demand_paging_test
TEST_GEN_PROGS_aarch64 += dirty_log_test
TEST_GEN_PROGS_aarch64 += dirty_log_perf_test

View File

@ -58,9 +58,6 @@ static enum {
ITERATION_MARK_IDLE,
} iteration_work;
/* Set to true when vCPU threads should exit. */
static bool done;
/* The iteration that was last completed by each vCPU. */
static int vcpu_last_completed_iteration[KVM_MAX_VCPUS];
@ -211,7 +208,7 @@ static bool spin_wait_for_next_iteration(int *current_iteration)
int last_iteration = *current_iteration;
do {
if (READ_ONCE(done))
if (READ_ONCE(perf_test_args.stop_vcpus))
return false;
*current_iteration = READ_ONCE(iteration);
@ -321,9 +318,6 @@ static void run_test(enum vm_guest_mode mode, void *arg)
mark_memory_idle(vm, nr_vcpus);
access_memory(vm, nr_vcpus, ACCESS_READ, "Reading from idle memory");
/* Set done to signal the vCPU threads to exit */
done = true;
perf_test_join_vcpu_threads(nr_vcpus);
perf_test_destroy_vm(vm);
}

View File

@ -40,6 +40,9 @@ struct perf_test_args {
/* Run vCPUs in L2 instead of L1, if the architecture supports it. */
bool nested;
/* Test is done, stop running vCPUs. */
bool stop_vcpus;
struct perf_test_vcpu_args vcpu_args[KVM_MAX_VCPUS];
};

View File

@ -267,6 +267,7 @@ void perf_test_start_vcpu_threads(int nr_vcpus,
vcpu_thread_fn = vcpu_fn;
WRITE_ONCE(all_vcpu_threads_running, false);
WRITE_ONCE(perf_test_args.stop_vcpus, false);
for (i = 0; i < nr_vcpus; i++) {
struct vcpu_thread *vcpu = &vcpu_threads[i];
@ -289,6 +290,8 @@ void perf_test_join_vcpu_threads(int nr_vcpus)
{
int i;
WRITE_ONCE(perf_test_args.stop_vcpus, true);
for (i = 0; i < nr_vcpus; i++)
pthread_join(vcpu_threads[i].thread, NULL);
}

View File

@ -34,8 +34,6 @@
static int nr_vcpus = 1;
static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE;
static bool run_vcpus = true;
static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args)
{
struct kvm_vcpu *vcpu = vcpu_args->vcpu;
@ -45,7 +43,7 @@ static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args)
run = vcpu->run;
/* Let the guest access its memory until a stop signal is received */
while (READ_ONCE(run_vcpus)) {
while (!READ_ONCE(perf_test_args.stop_vcpus)) {
ret = _vcpu_run(vcpu);
TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret);
@ -110,8 +108,6 @@ static void run_test(enum vm_guest_mode mode, void *arg)
add_remove_memslot(vm, p->memslot_modification_delay,
p->nr_memslot_modifications);
run_vcpus = false;
perf_test_join_vcpu_threads(nr_vcpus);
pr_info("All vCPU threads joined\n");