scftorture changes for v6.13

o	Avoid divide operation.
 
 o	Fix cleanup code waiting for IPI handlers.
 
 o	Move memory allocations out of preempt-disable region of code
 	for PREEMPT_RT compatibility.
 
 o	Use a lockless list to avoid freeing memory while interrupts
 	are disabled, again for PREEMPT_RT compatibility.
 
 o	Make lockless list scf_add_to_free_list() correctly handle
 	freeing a NULL pointer.
 -----BEGIN PGP SIGNATURE-----
 
 iQJHBAABCgAxFiEEbK7UrM+RBIrCoViJnr8S83LZ+4wFAmc5X0gTHHBhdWxtY2tA
 a2VybmVsLm9yZwAKCRCevxLzctn7jDVMEACQRdJ0NYxygGFpUzDj2Er2wdOtBG0E
 n1NOqmNX7nlBL8BzseCFa2OiVbvggE7+ynAGcqISzDLZGE6aa4/HwKLkxSGB62UV
 WMXNiJE+t4bb1TsdMwLcQnOmmDniy6ID0NIEA8YHEEZltuDNQGQfjB8ynJewwNmY
 yMU90JDwVvDVmM9+AXUqYYRAar1gR5k7jknQbnXqb+6xT/kMEu+B1z5BGiMB3Z5L
 LylobI+3OZTY417tgJU/iSeRZbLZn7Xs6pxOcJMpeFvvYMn4mkYaUX+WUOU9oTQd
 h91wGxRouTQpS41zGNI5HcqnTtevrnmtXNROyUkei1aipvnq8N9HR11UJDXWgSV4
 24dH8qZVzTv+/cWIuNA3uUH+hu7kFZztQQQeIJdenm3CBtEYIK4ssrlyXUM7U5AY
 JQOjeEzApQLht++VTjGSS3CZhODLCTQU+IeQH1ChM1EZz2M9gsv9RqKfXrnFTDnO
 6UrLNa2YCpvQCEeNj2i8TaFHZAInGTcNFHjhxd+kA4SsCDygi9PYxKq6xVadLVZs
 Kwj6kpgPpatQzZ5w7Il9RF+qTgpOnbqB52JFt3rGjQg8uALfDo5S85wurhvu6+GC
 Qy7XvDWhmUn8fZwvlRO+DABBWOYmXeAVHKxWA3VBxO3O454Pxx5IuVSW4213GFVz
 58sAl0WwK8Jscg==
 =ElHE
 -----END PGP SIGNATURE-----

Merge tag 'scftorture.2024.11.16a' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu

Pull scftorture updates from Paul McKenney:

 - Avoid divide operation

 - Fix cleanup code waiting for IPI handlers

 - Move memory allocations out of preempt-disable region of code for
   PREEMPT_RT compatibility

 - Use a lockless list to avoid freeing memory while interrupts are
   disabled, again for PREEMPT_RT compatibility

 - Make lockless list scf_add_to_free_list() correctly handle freeing a
   NULL pointer

* tag 'scftorture.2024.11.16a' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu:
  scftorture: Handle NULL argument passed to scf_add_to_free_list().
  scftorture: Use a lock-less list to free memory.
  scftorture: Move memory allocation outside of preempt_disable region.
  scftorture: Wait until scf_cleanup_handler() completes.
  scftorture: Avoid additional div operation.
This commit is contained in:
Linus Torvalds 2024-11-19 10:16:59 -08:00
commit d7d4102f0a

View File

@ -97,6 +97,7 @@ struct scf_statistics {
static struct scf_statistics *scf_stats_p; static struct scf_statistics *scf_stats_p;
static struct task_struct *scf_torture_stats_task; static struct task_struct *scf_torture_stats_task;
static DEFINE_PER_CPU(long long, scf_invoked_count); static DEFINE_PER_CPU(long long, scf_invoked_count);
static DEFINE_PER_CPU(struct llist_head, scf_free_pool);
// Data for random primitive selection // Data for random primitive selection
#define SCF_PRIM_RESCHED 0 #define SCF_PRIM_RESCHED 0
@ -133,6 +134,7 @@ struct scf_check {
bool scfc_wait; bool scfc_wait;
bool scfc_rpc; bool scfc_rpc;
struct completion scfc_completion; struct completion scfc_completion;
struct llist_node scf_node;
}; };
// Use to wait for all threads to start. // Use to wait for all threads to start.
@ -148,6 +150,33 @@ static DEFINE_TORTURE_RANDOM_PERCPU(scf_torture_rand);
extern void resched_cpu(int cpu); // An alternative IPI vector. extern void resched_cpu(int cpu); // An alternative IPI vector.
static void scf_add_to_free_list(struct scf_check *scfcp)
{
struct llist_head *pool;
unsigned int cpu;
if (!scfcp)
return;
cpu = raw_smp_processor_id() % nthreads;
pool = &per_cpu(scf_free_pool, cpu);
llist_add(&scfcp->scf_node, pool);
}
static void scf_cleanup_free_list(unsigned int cpu)
{
struct llist_head *pool;
struct llist_node *node;
struct scf_check *scfcp;
pool = &per_cpu(scf_free_pool, cpu);
node = llist_del_all(pool);
while (node) {
scfcp = llist_entry(node, struct scf_check, scf_node);
node = node->next;
kfree(scfcp);
}
}
// Print torture statistics. Caller must ensure serialization. // Print torture statistics. Caller must ensure serialization.
static void scf_torture_stats_print(void) static void scf_torture_stats_print(void)
{ {
@ -296,7 +325,7 @@ out:
if (scfcp->scfc_rpc) if (scfcp->scfc_rpc)
complete(&scfcp->scfc_completion); complete(&scfcp->scfc_completion);
} else { } else {
kfree(scfcp); scf_add_to_free_list(scfcp);
} }
} }
@ -320,10 +349,6 @@ static void scftorture_invoke_one(struct scf_statistics *scfp, struct torture_ra
struct scf_check *scfcp = NULL; struct scf_check *scfcp = NULL;
struct scf_selector *scfsp = scf_sel_rand(trsp); struct scf_selector *scfsp = scf_sel_rand(trsp);
if (use_cpus_read_lock)
cpus_read_lock();
else
preempt_disable();
if (scfsp->scfs_prim == SCF_PRIM_SINGLE || scfsp->scfs_wait) { if (scfsp->scfs_prim == SCF_PRIM_SINGLE || scfsp->scfs_wait) {
scfcp = kmalloc(sizeof(*scfcp), GFP_ATOMIC); scfcp = kmalloc(sizeof(*scfcp), GFP_ATOMIC);
if (!scfcp) { if (!scfcp) {
@ -337,6 +362,10 @@ static void scftorture_invoke_one(struct scf_statistics *scfp, struct torture_ra
scfcp->scfc_rpc = false; scfcp->scfc_rpc = false;
} }
} }
if (use_cpus_read_lock)
cpus_read_lock();
else
preempt_disable();
switch (scfsp->scfs_prim) { switch (scfsp->scfs_prim) {
case SCF_PRIM_RESCHED: case SCF_PRIM_RESCHED:
if (IS_BUILTIN(CONFIG_SCF_TORTURE_TEST)) { if (IS_BUILTIN(CONFIG_SCF_TORTURE_TEST)) {
@ -363,7 +392,7 @@ static void scftorture_invoke_one(struct scf_statistics *scfp, struct torture_ra
scfp->n_single_wait_ofl++; scfp->n_single_wait_ofl++;
else else
scfp->n_single_ofl++; scfp->n_single_ofl++;
kfree(scfcp); scf_add_to_free_list(scfcp);
scfcp = NULL; scfcp = NULL;
} }
break; break;
@ -391,7 +420,7 @@ static void scftorture_invoke_one(struct scf_statistics *scfp, struct torture_ra
preempt_disable(); preempt_disable();
} else { } else {
scfp->n_single_rpc_ofl++; scfp->n_single_rpc_ofl++;
kfree(scfcp); scf_add_to_free_list(scfcp);
scfcp = NULL; scfcp = NULL;
} }
break; break;
@ -428,7 +457,7 @@ static void scftorture_invoke_one(struct scf_statistics *scfp, struct torture_ra
pr_warn("%s: Memory-ordering failure, scfs_prim: %d.\n", __func__, scfsp->scfs_prim); pr_warn("%s: Memory-ordering failure, scfs_prim: %d.\n", __func__, scfsp->scfs_prim);
atomic_inc(&n_mb_out_errs); // Leak rather than trash! atomic_inc(&n_mb_out_errs); // Leak rather than trash!
} else { } else {
kfree(scfcp); scf_add_to_free_list(scfcp);
} }
barrier(); // Prevent race-reduction compiler optimizations. barrier(); // Prevent race-reduction compiler optimizations.
} }
@ -463,7 +492,7 @@ static int scftorture_invoker(void *arg)
// Make sure that the CPU is affinitized appropriately during testing. // Make sure that the CPU is affinitized appropriately during testing.
curcpu = raw_smp_processor_id(); curcpu = raw_smp_processor_id();
WARN_ONCE(curcpu != scfp->cpu % nr_cpu_ids, WARN_ONCE(curcpu != cpu,
"%s: Wanted CPU %d, running on %d, nr_cpu_ids = %d\n", "%s: Wanted CPU %d, running on %d, nr_cpu_ids = %d\n",
__func__, scfp->cpu, curcpu, nr_cpu_ids); __func__, scfp->cpu, curcpu, nr_cpu_ids);
@ -479,6 +508,8 @@ static int scftorture_invoker(void *arg)
VERBOSE_SCFTORTOUT("scftorture_invoker %d started", scfp->cpu); VERBOSE_SCFTORTOUT("scftorture_invoker %d started", scfp->cpu);
do { do {
scf_cleanup_free_list(cpu);
scftorture_invoke_one(scfp, &rand); scftorture_invoke_one(scfp, &rand);
while (cpu_is_offline(cpu) && !torture_must_stop()) { while (cpu_is_offline(cpu) && !torture_must_stop()) {
schedule_timeout_interruptible(HZ / 5); schedule_timeout_interruptible(HZ / 5);
@ -523,12 +554,15 @@ static void scf_torture_cleanup(void)
torture_stop_kthread("scftorture_invoker", scf_stats_p[i].task); torture_stop_kthread("scftorture_invoker", scf_stats_p[i].task);
else else
goto end; goto end;
smp_call_function(scf_cleanup_handler, NULL, 0); smp_call_function(scf_cleanup_handler, NULL, 1);
torture_stop_kthread(scf_torture_stats, scf_torture_stats_task); torture_stop_kthread(scf_torture_stats, scf_torture_stats_task);
scf_torture_stats_print(); // -After- the stats thread is stopped! scf_torture_stats_print(); // -After- the stats thread is stopped!
kfree(scf_stats_p); // -After- the last stats print has completed! kfree(scf_stats_p); // -After- the last stats print has completed!
scf_stats_p = NULL; scf_stats_p = NULL;
for (i = 0; i < nr_cpu_ids; i++)
scf_cleanup_free_list(i);
if (atomic_read(&n_errs) || atomic_read(&n_mb_in_errs) || atomic_read(&n_mb_out_errs)) if (atomic_read(&n_errs) || atomic_read(&n_mb_in_errs) || atomic_read(&n_mb_out_errs))
scftorture_print_module_parms("End of test: FAILURE"); scftorture_print_module_parms("End of test: FAILURE");
else if (torture_onoff_failures()) else if (torture_onoff_failures())