mirror of
https://github.com/torvalds/linux.git
synced 2024-11-27 06:31:52 +00:00
RISC-V: Fix IPI/RFENCE hmask on non-monotonic hartid ordering
If the boot CPU does not have the lowest hartid, "hartid - hbase" can
become negative, leading to an incorrect hmask, causing userspace to
crash with SEGV. This is observed on e.g. Starlight Beta, where cpuid 1
maps to hartid 0, and cpuid 0 maps to hartid 1.
Fix this by detecting this case, and shifting the accumulated mask and
updating hbase, if possible.
Fixes: 26fb751ca3
("RISC-V: Do not use cpumask data structure for hartid bitmap")
Signed-off-by: Geert Uytterhoeven <geert@linux-m68k.org>
Reviewed-by: Atish Patra <atishp@rivosinc.com>
Tested-by: Atish Patra <atishp@rivosinc.com>
Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
This commit is contained in:
parent
2b35d5b7d1
commit
5feef64f4c
@ -250,7 +250,7 @@ static void __sbi_set_timer_v02(uint64_t stime_value)
|
||||
|
||||
static int __sbi_send_ipi_v02(const struct cpumask *cpu_mask)
|
||||
{
|
||||
unsigned long hartid, cpuid, hmask = 0, hbase = 0;
|
||||
unsigned long hartid, cpuid, hmask = 0, hbase = 0, htop = 0;
|
||||
struct sbiret ret = {0};
|
||||
int result;
|
||||
|
||||
@ -259,16 +259,27 @@ static int __sbi_send_ipi_v02(const struct cpumask *cpu_mask)
|
||||
|
||||
for_each_cpu(cpuid, cpu_mask) {
|
||||
hartid = cpuid_to_hartid_map(cpuid);
|
||||
if (hmask && ((hbase + BITS_PER_LONG) <= hartid)) {
|
||||
ret = sbi_ecall(SBI_EXT_IPI, SBI_EXT_IPI_SEND_IPI,
|
||||
hmask, hbase, 0, 0, 0, 0);
|
||||
if (ret.error)
|
||||
goto ecall_failed;
|
||||
hmask = 0;
|
||||
hbase = 0;
|
||||
if (hmask) {
|
||||
if (hartid + BITS_PER_LONG <= htop ||
|
||||
hbase + BITS_PER_LONG <= hartid) {
|
||||
ret = sbi_ecall(SBI_EXT_IPI,
|
||||
SBI_EXT_IPI_SEND_IPI, hmask,
|
||||
hbase, 0, 0, 0, 0);
|
||||
if (ret.error)
|
||||
goto ecall_failed;
|
||||
hmask = 0;
|
||||
} else if (hartid < hbase) {
|
||||
/* shift the mask to fit lower hartid */
|
||||
hmask <<= hbase - hartid;
|
||||
hbase = hartid;
|
||||
}
|
||||
}
|
||||
if (!hmask)
|
||||
if (!hmask) {
|
||||
hbase = hartid;
|
||||
htop = hartid;
|
||||
} else if (hartid > htop) {
|
||||
htop = hartid;
|
||||
}
|
||||
hmask |= BIT(hartid - hbase);
|
||||
}
|
||||
|
||||
@ -345,7 +356,7 @@ static int __sbi_rfence_v02(int fid, const struct cpumask *cpu_mask,
|
||||
unsigned long start, unsigned long size,
|
||||
unsigned long arg4, unsigned long arg5)
|
||||
{
|
||||
unsigned long hartid, cpuid, hmask = 0, hbase = 0;
|
||||
unsigned long hartid, cpuid, hmask = 0, hbase = 0, htop = 0;
|
||||
int result;
|
||||
|
||||
if (!cpu_mask || cpumask_empty(cpu_mask))
|
||||
@ -353,16 +364,26 @@ static int __sbi_rfence_v02(int fid, const struct cpumask *cpu_mask,
|
||||
|
||||
for_each_cpu(cpuid, cpu_mask) {
|
||||
hartid = cpuid_to_hartid_map(cpuid);
|
||||
if (hmask && ((hbase + BITS_PER_LONG) <= hartid)) {
|
||||
result = __sbi_rfence_v02_call(fid, hmask, hbase,
|
||||
start, size, arg4, arg5);
|
||||
if (result)
|
||||
return result;
|
||||
hmask = 0;
|
||||
hbase = 0;
|
||||
if (hmask) {
|
||||
if (hartid + BITS_PER_LONG <= htop ||
|
||||
hbase + BITS_PER_LONG <= hartid) {
|
||||
result = __sbi_rfence_v02_call(fid, hmask,
|
||||
hbase, start, size, arg4, arg5);
|
||||
if (result)
|
||||
return result;
|
||||
hmask = 0;
|
||||
} else if (hartid < hbase) {
|
||||
/* shift the mask to fit lower hartid */
|
||||
hmask <<= hbase - hartid;
|
||||
hbase = hartid;
|
||||
}
|
||||
}
|
||||
if (!hmask)
|
||||
if (!hmask) {
|
||||
hbase = hartid;
|
||||
htop = hartid;
|
||||
} else if (hartid > htop) {
|
||||
htop = hartid;
|
||||
}
|
||||
hmask |= BIT(hartid - hbase);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user