forked from Minki/linux
powerpc fixes for 5.12 #5
Fix a bug on pseries where spurious wakeups from H_PROD would prevent partition migration from succeeding. Fix oopses seen in pcpu_alloc(), caused by parallel faults of the percpu mapping causing us to corrupt the protection key used for the mapping, and cause a fatal key fault. Thanks to Aneesh Kumar K.V, Murilo Opsfelder Araujo, Nathan Lynch. -----BEGIN PGP SIGNATURE----- iQJHBAABCAAxFiEEJFGtCPCthwEv2Y/bUevqPMjhpYAFAmBoUxUTHG1wZUBlbGxl cm1hbi5pZC5hdQAKCRBR6+o8yOGlgDJYEAC74efyI/HCqDLCf9Q8Xu4lQbVpqBCX JG9KJWf97mIj9Dtc1W/Uk6xx1hFxoqDutO9NoS1OkOqa8E/1c++JwHZmUDL1vVRe V+v4zJiHzm/4Tf4hoJ1RHgHanDz9uPDXi7UWSZfh6I89f8AU51YMN2ZFKxgtDfwE eQJWd5l63myabJ0kyQR2agJ/AEWC7U/H8q1h1hxoAj60BlWhu2PhpikQtoNok3jQ Az962IzYrm5Hb9pIetLmgtyrsJmxwRIkmWViwuCujMxxegH335886fniCf8Lk2/W MsrbSBcCpu/Lt39rVRKbex3cOsXMsHjlWCZRW3wArGrA9c6BW3orjdY61PHYXR81 mf/k9hC4WavgZ04d/hoS8gbFsGB3EJsO3csFuer358yFS+K9jTHfu/5KHKngXZVp 4k6JPwz4APeaDhvngkC20F4qhMQJNRA2Huvuq1VBuIOmzH8eF+/Sg0H5YKpW2Vn5 K2jLgsCa0Pq1pzQdn4hEauYwTdAc7gLpGqFNpphhyhwcS10FeQE6XH8aGXxs8mQK +P4p4NR6YOsgMp+rrunvF1AWmqgRnZdO41cHCU1xGgX76gFbIg/E+TB1i0cEhcEY UJIjE8jFsBMRY3A1qlOd979UnhzAshbZS0Wh4LyM8TkgYuiDoLMEXF7uO4lwljFi nUhTwIEnBJ7NDQ== =booR -----END PGP SIGNATURE----- Merge tag 'powerpc-5.12-5' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux Pull powerpc fixes from Michael Ellerman: "Fix a bug on pseries where spurious wakeups from H_PROD would prevent partition migration from succeeding. Fix oopses seen in pcpu_alloc(), caused by parallel faults of the percpu mapping causing us to corrupt the protection key used for the mapping, and cause a fatal key fault. Thanks to Aneesh Kumar K.V, Murilo Opsfelder Araujo, and Nathan Lynch" * tag 'powerpc-5.12-5' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: powerpc/mm/book3s64: Use the correct storage key value when calling H_PROTECT powerpc/pseries/mobility: handle premature return from H_JOIN powerpc/pseries/mobility: use struct for shared state
This commit is contained in:
commit
9c2ef23e4d
@ -887,7 +887,8 @@ static long pSeries_lpar_hpte_updatepp(unsigned long slot,
|
||||
|
||||
want_v = hpte_encode_avpn(vpn, psize, ssize);
|
||||
|
||||
flags = (newpp & 7) | H_AVPN;
|
||||
flags = (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO)) | H_AVPN;
|
||||
flags |= (newpp & HPTE_R_KEY_HI) >> 48;
|
||||
if (mmu_has_feature(MMU_FTR_KERNEL_RO))
|
||||
/* Move pp0 into bit 8 (IBM 55) */
|
||||
flags |= (newpp & HPTE_R_PP0) >> 55;
|
||||
|
@ -452,12 +452,28 @@ static int do_suspend(void)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* struct pseries_suspend_info - State shared between CPUs for join/suspend.
|
||||
* @counter: Threads are to increment this upon resuming from suspend
|
||||
* or if an error is received from H_JOIN. The thread which performs
|
||||
* the first increment (i.e. sets it to 1) is responsible for
|
||||
* waking the other threads.
|
||||
* @done: False if join/suspend is in progress. True if the operation is
|
||||
* complete (successful or not).
|
||||
*/
|
||||
struct pseries_suspend_info {
|
||||
atomic_t counter;
|
||||
bool done;
|
||||
};
|
||||
|
||||
static int do_join(void *arg)
|
||||
{
|
||||
atomic_t *counter = arg;
|
||||
struct pseries_suspend_info *info = arg;
|
||||
atomic_t *counter = &info->counter;
|
||||
long hvrc;
|
||||
int ret;
|
||||
|
||||
retry:
|
||||
/* Must ensure MSR.EE off for H_JOIN. */
|
||||
hard_irq_disable();
|
||||
hvrc = plpar_hcall_norets(H_JOIN);
|
||||
@ -473,8 +489,20 @@ static int do_join(void *arg)
|
||||
case H_SUCCESS:
|
||||
/*
|
||||
* The suspend is complete and this cpu has received a
|
||||
* prod.
|
||||
* prod, or we've received a stray prod from unrelated
|
||||
* code (e.g. paravirt spinlocks) and we need to join
|
||||
* again.
|
||||
*
|
||||
* This barrier orders the return from H_JOIN above vs
|
||||
* the load of info->done. It pairs with the barrier
|
||||
* in the wakeup/prod path below.
|
||||
*/
|
||||
smp_mb();
|
||||
if (READ_ONCE(info->done) == false) {
|
||||
pr_info_ratelimited("premature return from H_JOIN on CPU %i, retrying",
|
||||
smp_processor_id());
|
||||
goto retry;
|
||||
}
|
||||
ret = 0;
|
||||
break;
|
||||
case H_BAD_MODE:
|
||||
@ -488,6 +516,13 @@ static int do_join(void *arg)
|
||||
|
||||
if (atomic_inc_return(counter) == 1) {
|
||||
pr_info("CPU %u waking all threads\n", smp_processor_id());
|
||||
WRITE_ONCE(info->done, true);
|
||||
/*
|
||||
* This barrier orders the store to info->done vs subsequent
|
||||
* H_PRODs to wake the other CPUs. It pairs with the barrier
|
||||
* in the H_SUCCESS case above.
|
||||
*/
|
||||
smp_mb();
|
||||
prod_others();
|
||||
}
|
||||
/*
|
||||
@ -535,11 +570,16 @@ static int pseries_suspend(u64 handle)
|
||||
int ret;
|
||||
|
||||
while (true) {
|
||||
atomic_t counter = ATOMIC_INIT(0);
|
||||
struct pseries_suspend_info info;
|
||||
unsigned long vasi_state;
|
||||
int vasi_err;
|
||||
|
||||
ret = stop_machine(do_join, &counter, cpu_online_mask);
|
||||
info = (struct pseries_suspend_info) {
|
||||
.counter = ATOMIC_INIT(0),
|
||||
.done = false,
|
||||
};
|
||||
|
||||
ret = stop_machine(do_join, &info, cpu_online_mask);
|
||||
if (ret == 0)
|
||||
break;
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user