mirror of
https://github.com/torvalds/linux.git
synced 2024-11-25 21:51:40 +00:00
selftests/rseq: Do not skip !allowed_cpus for mm_cid
Indexing with mm_cid is incompatible with skipping disallowed cpumask, because concurrency IDs are based on a virtual ID allocation which is unrelated to the physical CPU mask. These issues can be reproduced by running the rseq selftests under a taskset which excludes CPU 0, e.g. taskset -c 10-20 ./run_param_test.sh Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Cc: Shuah Khan <shuah@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: "Paul E. McKenney" <paulmck@kernel.org> Cc: Boqun Feng <boqun.feng@gmail.com> Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
This commit is contained in:
parent
6613476e22
commit
d53271c059
@ -24,6 +24,11 @@ bool rseq_validate_cpu_id(void)
|
||||
{
|
||||
return rseq_mm_cid_available();
|
||||
}
|
||||
static
|
||||
bool rseq_use_cpu_index(void)
|
||||
{
|
||||
return false; /* Use mm_cid */
|
||||
}
|
||||
#else
|
||||
# define RSEQ_PERCPU RSEQ_PERCPU_CPU_ID
|
||||
static
|
||||
@ -36,6 +41,11 @@ bool rseq_validate_cpu_id(void)
|
||||
{
|
||||
return rseq_current_cpu_raw() >= 0;
|
||||
}
|
||||
static
|
||||
bool rseq_use_cpu_index(void)
|
||||
{
|
||||
return true; /* Use cpu_id as index. */
|
||||
}
|
||||
#endif
|
||||
|
||||
struct percpu_lock_entry {
|
||||
@ -274,7 +284,7 @@ void test_percpu_list(void)
|
||||
/* Generate list entries for every usable cpu. */
|
||||
sched_getaffinity(0, sizeof(allowed_cpus), &allowed_cpus);
|
||||
for (i = 0; i < CPU_SETSIZE; i++) {
|
||||
if (!CPU_ISSET(i, &allowed_cpus))
|
||||
if (rseq_use_cpu_index() && !CPU_ISSET(i, &allowed_cpus))
|
||||
continue;
|
||||
for (j = 1; j <= 100; j++) {
|
||||
struct percpu_list_node *node;
|
||||
@ -299,7 +309,7 @@ void test_percpu_list(void)
|
||||
for (i = 0; i < CPU_SETSIZE; i++) {
|
||||
struct percpu_list_node *node;
|
||||
|
||||
if (!CPU_ISSET(i, &allowed_cpus))
|
||||
if (rseq_use_cpu_index() && !CPU_ISSET(i, &allowed_cpus))
|
||||
continue;
|
||||
|
||||
while ((node = __percpu_list_pop(&list, i))) {
|
||||
|
@ -288,6 +288,11 @@ bool rseq_validate_cpu_id(void)
|
||||
{
|
||||
return rseq_mm_cid_available();
|
||||
}
|
||||
static
|
||||
bool rseq_use_cpu_index(void)
|
||||
{
|
||||
return false; /* Use mm_cid */
|
||||
}
|
||||
# ifdef TEST_MEMBARRIER
|
||||
/*
|
||||
* Membarrier does not currently support targeting a mm_cid, so
|
||||
@ -312,6 +317,11 @@ bool rseq_validate_cpu_id(void)
|
||||
{
|
||||
return rseq_current_cpu_raw() >= 0;
|
||||
}
|
||||
static
|
||||
bool rseq_use_cpu_index(void)
|
||||
{
|
||||
return true; /* Use cpu_id as index. */
|
||||
}
|
||||
# ifdef TEST_MEMBARRIER
|
||||
static
|
||||
int rseq_membarrier_expedited(int cpu)
|
||||
@ -715,7 +725,7 @@ void test_percpu_list(void)
|
||||
/* Generate list entries for every usable cpu. */
|
||||
sched_getaffinity(0, sizeof(allowed_cpus), &allowed_cpus);
|
||||
for (i = 0; i < CPU_SETSIZE; i++) {
|
||||
if (!CPU_ISSET(i, &allowed_cpus))
|
||||
if (rseq_use_cpu_index() && !CPU_ISSET(i, &allowed_cpus))
|
||||
continue;
|
||||
for (j = 1; j <= 100; j++) {
|
||||
struct percpu_list_node *node;
|
||||
@ -752,7 +762,7 @@ void test_percpu_list(void)
|
||||
for (i = 0; i < CPU_SETSIZE; i++) {
|
||||
struct percpu_list_node *node;
|
||||
|
||||
if (!CPU_ISSET(i, &allowed_cpus))
|
||||
if (rseq_use_cpu_index() && !CPU_ISSET(i, &allowed_cpus))
|
||||
continue;
|
||||
|
||||
while ((node = __percpu_list_pop(&list, i))) {
|
||||
@ -902,7 +912,7 @@ void test_percpu_buffer(void)
|
||||
/* Generate list entries for every usable cpu. */
|
||||
sched_getaffinity(0, sizeof(allowed_cpus), &allowed_cpus);
|
||||
for (i = 0; i < CPU_SETSIZE; i++) {
|
||||
if (!CPU_ISSET(i, &allowed_cpus))
|
||||
if (rseq_use_cpu_index() && !CPU_ISSET(i, &allowed_cpus))
|
||||
continue;
|
||||
/* Worse-case is every item in same CPU. */
|
||||
buffer.c[i].array =
|
||||
@ -952,7 +962,7 @@ void test_percpu_buffer(void)
|
||||
for (i = 0; i < CPU_SETSIZE; i++) {
|
||||
struct percpu_buffer_node *node;
|
||||
|
||||
if (!CPU_ISSET(i, &allowed_cpus))
|
||||
if (rseq_use_cpu_index() && !CPU_ISSET(i, &allowed_cpus))
|
||||
continue;
|
||||
|
||||
while ((node = __percpu_buffer_pop(&buffer, i))) {
|
||||
@ -1113,7 +1123,7 @@ void test_percpu_memcpy_buffer(void)
|
||||
/* Generate list entries for every usable cpu. */
|
||||
sched_getaffinity(0, sizeof(allowed_cpus), &allowed_cpus);
|
||||
for (i = 0; i < CPU_SETSIZE; i++) {
|
||||
if (!CPU_ISSET(i, &allowed_cpus))
|
||||
if (rseq_use_cpu_index() && !CPU_ISSET(i, &allowed_cpus))
|
||||
continue;
|
||||
/* Worse-case is every item in same CPU. */
|
||||
buffer.c[i].array =
|
||||
@ -1160,7 +1170,7 @@ void test_percpu_memcpy_buffer(void)
|
||||
for (i = 0; i < CPU_SETSIZE; i++) {
|
||||
struct percpu_memcpy_buffer_node item;
|
||||
|
||||
if (!CPU_ISSET(i, &allowed_cpus))
|
||||
if (rseq_use_cpu_index() && !CPU_ISSET(i, &allowed_cpus))
|
||||
continue;
|
||||
|
||||
while (__percpu_memcpy_buffer_pop(&buffer, &item, i)) {
|
||||
|
Loading…
Reference in New Issue
Block a user