mirror of
https://github.com/torvalds/linux.git
synced 2024-12-29 14:21:47 +00:00
powerpc/64s: move stack SLB pinning out of line from _switch
The large hunk of SLB pinning in _switch asm code makes it more difficult to see everything else that's going on. It is a less important path now, so icache and fetch footprint overhead can be avoided. Move context switch stack SLB pinning out of line. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://msgid.link/20230606132447.315714-2-npiggin@gmail.com
This commit is contained in:
parent
31b4f69dba
commit
d6b87c3eb6
@ -105,6 +105,64 @@ flush_branch_caches:
|
||||
.endr
|
||||
|
||||
blr
|
||||
|
||||
#ifdef CONFIG_PPC_64S_HASH_MMU
|
||||
.balign 32
|
||||
/*
|
||||
* New stack pointer in r8, old stack pointer in r1, must not clobber r3
|
||||
*/
|
||||
pin_stack_slb:
|
||||
BEGIN_FTR_SECTION
|
||||
clrrdi r6,r8,28 /* get its ESID */
|
||||
clrrdi r9,r1,28 /* get current sp ESID */
|
||||
FTR_SECTION_ELSE
|
||||
clrrdi r6,r8,40 /* get its 1T ESID */
|
||||
clrrdi r9,r1,40 /* get current sp 1T ESID */
|
||||
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_1T_SEGMENT)
|
||||
clrldi. r0,r6,2 /* is new ESID c00000000? */
|
||||
cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
|
||||
cror eq,4*cr1+eq,eq
|
||||
beq 2f /* if yes, don't slbie it */
|
||||
|
||||
/* Bolt in the new stack SLB entry */
|
||||
ld r7,KSP_VSID(r4) /* Get new stack's VSID */
|
||||
oris r0,r6,(SLB_ESID_V)@h
|
||||
ori r0,r0,(SLB_NUM_BOLTED-1)@l
|
||||
BEGIN_FTR_SECTION
|
||||
li r9,MMU_SEGSIZE_1T /* insert B field */
|
||||
oris r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h
|
||||
rldimi r7,r9,SLB_VSID_SSIZE_SHIFT,0
|
||||
END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
|
||||
|
||||
/* Update the last bolted SLB. No write barriers are needed
|
||||
* here, provided we only update the current CPU's SLB shadow
|
||||
* buffer.
|
||||
*/
|
||||
ld r9,PACA_SLBSHADOWPTR(r13)
|
||||
li r12,0
|
||||
std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */
|
||||
li r12,SLBSHADOW_STACKVSID
|
||||
STDX_BE r7,r12,r9 /* Save VSID */
|
||||
li r12,SLBSHADOW_STACKESID
|
||||
STDX_BE r0,r12,r9 /* Save ESID */
|
||||
|
||||
/* No need to check for MMU_FTR_NO_SLBIE_B here, since when
|
||||
* we have 1TB segments, the only CPUs known to have the errata
|
||||
* only support less than 1TB of system memory and we'll never
|
||||
* actually hit this code path.
|
||||
*/
|
||||
|
||||
isync
|
||||
slbie r6
|
||||
BEGIN_FTR_SECTION
|
||||
slbie r6 /* Workaround POWER5 < DD2.1 issue */
|
||||
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
|
||||
slbmte r7,r0
|
||||
isync
|
||||
2: blr
|
||||
.size pin_stack_slb,.-pin_stack_slb
|
||||
#endif /* CONFIG_PPC_64S_HASH_MMU */
|
||||
|
||||
#else
|
||||
#define FLUSH_COUNT_CACHE
|
||||
#endif /* CONFIG_PPC_BOOK3S_64 */
|
||||
@ -182,59 +240,12 @@ _GLOBAL(_switch)
|
||||
#endif
|
||||
|
||||
ld r8,KSP(r4) /* new stack pointer */
|
||||
|
||||
#ifdef CONFIG_PPC_64S_HASH_MMU
|
||||
BEGIN_MMU_FTR_SECTION
|
||||
b 2f
|
||||
END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
|
||||
BEGIN_FTR_SECTION
|
||||
clrrdi r6,r8,28 /* get its ESID */
|
||||
clrrdi r9,r1,28 /* get current sp ESID */
|
||||
FTR_SECTION_ELSE
|
||||
clrrdi r6,r8,40 /* get its 1T ESID */
|
||||
clrrdi r9,r1,40 /* get current sp 1T ESID */
|
||||
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_1T_SEGMENT)
|
||||
clrldi. r0,r6,2 /* is new ESID c00000000? */
|
||||
cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
|
||||
cror eq,4*cr1+eq,eq
|
||||
beq 2f /* if yes, don't slbie it */
|
||||
|
||||
/* Bolt in the new stack SLB entry */
|
||||
ld r7,KSP_VSID(r4) /* Get new stack's VSID */
|
||||
oris r0,r6,(SLB_ESID_V)@h
|
||||
ori r0,r0,(SLB_NUM_BOLTED-1)@l
|
||||
BEGIN_FTR_SECTION
|
||||
li r9,MMU_SEGSIZE_1T /* insert B field */
|
||||
oris r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h
|
||||
rldimi r7,r9,SLB_VSID_SSIZE_SHIFT,0
|
||||
END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
|
||||
|
||||
/* Update the last bolted SLB. No write barriers are needed
|
||||
* here, provided we only update the current CPU's SLB shadow
|
||||
* buffer.
|
||||
*/
|
||||
ld r9,PACA_SLBSHADOWPTR(r13)
|
||||
li r12,0
|
||||
std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */
|
||||
li r12,SLBSHADOW_STACKVSID
|
||||
STDX_BE r7,r12,r9 /* Save VSID */
|
||||
li r12,SLBSHADOW_STACKESID
|
||||
STDX_BE r0,r12,r9 /* Save ESID */
|
||||
|
||||
/* No need to check for MMU_FTR_NO_SLBIE_B here, since when
|
||||
* we have 1TB segments, the only CPUs known to have the errata
|
||||
* only support less than 1TB of system memory and we'll never
|
||||
* actually hit this code path.
|
||||
*/
|
||||
|
||||
isync
|
||||
slbie r6
|
||||
BEGIN_FTR_SECTION
|
||||
slbie r6 /* Workaround POWER5 < DD2.1 issue */
|
||||
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
|
||||
slbmte r7,r0
|
||||
isync
|
||||
2:
|
||||
#endif /* CONFIG_PPC_64S_HASH_MMU */
|
||||
bl pin_stack_slb
|
||||
END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
|
||||
#endif
|
||||
|
||||
clrrdi r7, r8, THREAD_SHIFT /* base of new stack */
|
||||
/* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
|
||||
|
Loading…
Reference in New Issue
Block a user