forked from Minki/linux
powerpc fixes for 5.6 #3
- Three fixes for the recently added VMAP_STACK on 32-bit. - Three fixes related to hugepages on 8xx (32-bit). - A fix for a bug in our transactional memory handling that could lead to a kernel crash if we saw a page fault during signal delivery. - A fix for a deadlock in our PCI EEH (Enhanced Error Handling) code. - A couple of other minor fixes. Thanks to: Christophe Leroy, Erhard F, Frederic Barrat, Gustavo Luiz Duarte, Larry Finger, Leonardo Bras, Oliver O'Halloran, Sam Bobroff. -----BEGIN PGP SIGNATURE----- iQJHBAABCAAxFiEEJFGtCPCthwEv2Y/bUevqPMjhpYAFAl5PvLYTHG1wZUBlbGxl cm1hbi5pZC5hdQAKCRBR6+o8yOGlgAO+EAC61YFLTfFvmoPsSSXW0SjIulqogba3 qqFInT+bFKE8vmYA7VJqlNegOTmHTVc7GiKHSIB8pfhcVZbDWoFrkXUtGsXhr4BJ 2zG2l49S555ZkHZUWETXS03fWcEtc9lNhLIIzBqQKpdL2vi6wghIIqjKf0f29C8B nouJkDOIaaD4VEW+AvTwxZHHBa2z30pYS/wprYdlaDWgPQnvZWEcMsAbO6PiWhG/ fLb9uj/GxVGjExjn+1lyp5GZD6XrXF/2wFDA3rsLsyL2KZjemOE/QQETPT3tPrSv iWZs9C8D8IB7Lx+JAFq1OUV4LVfS79cNewXRJr+shVvohjHUtj/mVg18F7AQZ6X8 VEKl5MPAmvo9S0R1GU1gRx4fVCjMqmwnVPmu+aFN0xdLKShi1QJWj9ODGLlvgAyR OL8X6xtvTGuK0OTZ6Fykx9//YWvwbadIRa9p2bFnpW+ptzfCqOSXcHXXYMFnFiBW mbDogxcNlnTR395nBLD0rTjwT/XWoF8wFj18NtMOvtP29ulJgwNKSPA4DzdukqA6 BEHoCcPfUeXRKtp99Ry9oW7tEuSQMAD7JyZW/0ls31EdSHZJCxqQwlM5HTswX/pB ZNje2NtiuLaKQTO8jtb4221ES+gU1MpYVWWBYI2Q9+ClrDP/jxh/0Gz33lHdAdD/ 3Q6HcJZhBTAL1A== =tD6n -----END PGP SIGNATURE----- Merge tag 'powerpc-5.6-3' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux Pull powerpc fixes from Michael Ellerman: "Some more powerpc fixes for 5.6. This is two weeks worth as I was out sick last week: - Three fixes for the recently added VMAP_STACK on 32-bit. - Three fixes related to hugepages on 8xx (32-bit). - A fix for a bug in our transactional memory handling that could lead to a kernel crash if we saw a page fault during signal delivery. - A fix for a deadlock in our PCI EEH (Enhanced Error Handling) code. - A couple of other minor fixes. Thanks to: Christophe Leroy, Erhard F, Frederic Barrat, Gustavo Luiz Duarte, Larry Finger, Leonardo Bras, Oliver O'Halloran, Sam Bobroff" * tag 'powerpc-5.6-3' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: powerpc/entry: Fix an #if which should be an #ifdef in entry_32.S powerpc/xmon: Fix whitespace handling in getstring() powerpc/6xx: Fix power_save_ppc32_restore() with CONFIG_VMAP_STACK powerpc/chrp: Fix enter_rtas() with CONFIG_VMAP_STACK powerpc/32s: Fix DSI and ISI exceptions for CONFIG_VMAP_STACK powerpc/tm: Fix clearing MSR[TS] in current when reclaiming on signal delivery powerpc/8xx: Fix clearing of bits 20-23 in ITLB miss powerpc/hugetlb: Fix 8M hugepages on 8xx powerpc/hugetlb: Fix 512k hugepages on 8xx with 16k page size powerpc/eeh: Fix deadlock handling dead PHB
This commit is contained in:
commit
2865936259
@ -295,8 +295,13 @@ static inline bool pfn_valid(unsigned long pfn)
|
||||
/*
|
||||
* Some number of bits at the level of the page table that points to
|
||||
* a hugepte are used to encode the size. This masks those bits.
|
||||
* On 8xx, HW assistance requires 4k alignment for the hugepte.
|
||||
*/
|
||||
#ifdef CONFIG_PPC_8xx
|
||||
#define HUGEPD_SHIFT_MASK 0xfff
|
||||
#else
|
||||
#define HUGEPD_SHIFT_MASK 0x3f
|
||||
#endif
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
|
@ -168,6 +168,10 @@ struct thread_struct {
|
||||
unsigned long srr1;
|
||||
unsigned long dar;
|
||||
unsigned long dsisr;
|
||||
#ifdef CONFIG_PPC_BOOK3S_32
|
||||
unsigned long r0, r3, r4, r5, r6, r8, r9, r11;
|
||||
unsigned long lr, ctr;
|
||||
#endif
|
||||
#endif
|
||||
/* Debug Registers */
|
||||
struct debug_reg debug;
|
||||
|
@ -132,6 +132,18 @@ int main(void)
|
||||
OFFSET(SRR1, thread_struct, srr1);
|
||||
OFFSET(DAR, thread_struct, dar);
|
||||
OFFSET(DSISR, thread_struct, dsisr);
|
||||
#ifdef CONFIG_PPC_BOOK3S_32
|
||||
OFFSET(THR0, thread_struct, r0);
|
||||
OFFSET(THR3, thread_struct, r3);
|
||||
OFFSET(THR4, thread_struct, r4);
|
||||
OFFSET(THR5, thread_struct, r5);
|
||||
OFFSET(THR6, thread_struct, r6);
|
||||
OFFSET(THR8, thread_struct, r8);
|
||||
OFFSET(THR9, thread_struct, r9);
|
||||
OFFSET(THR11, thread_struct, r11);
|
||||
OFFSET(THLR, thread_struct, lr);
|
||||
OFFSET(THCTR, thread_struct, ctr);
|
||||
#endif
|
||||
#endif
|
||||
#ifdef CONFIG_SPE
|
||||
OFFSET(THREAD_EVR0, thread_struct, evr[0]);
|
||||
|
@ -1184,6 +1184,17 @@ void eeh_handle_special_event(void)
|
||||
eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
|
||||
eeh_handle_normal_event(pe);
|
||||
} else {
|
||||
eeh_for_each_pe(pe, tmp_pe)
|
||||
eeh_pe_for_each_dev(tmp_pe, edev, tmp_edev)
|
||||
edev->mode &= ~EEH_DEV_NO_HANDLER;
|
||||
|
||||
/* Notify all devices to be down */
|
||||
eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true);
|
||||
eeh_set_channel_state(pe, pci_channel_io_perm_failure);
|
||||
eeh_pe_report(
|
||||
"error_detected(permanent failure)", pe,
|
||||
eeh_report_failure, NULL);
|
||||
|
||||
pci_lock_rescan_remove();
|
||||
list_for_each_entry(hose, &hose_list, list_node) {
|
||||
phb_pe = eeh_phb_pe_get(hose);
|
||||
@ -1192,16 +1203,6 @@ void eeh_handle_special_event(void)
|
||||
(phb_pe->state & EEH_PE_RECOVERING))
|
||||
continue;
|
||||
|
||||
eeh_for_each_pe(pe, tmp_pe)
|
||||
eeh_pe_for_each_dev(tmp_pe, edev, tmp_edev)
|
||||
edev->mode &= ~EEH_DEV_NO_HANDLER;
|
||||
|
||||
/* Notify all devices to be down */
|
||||
eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true);
|
||||
eeh_set_channel_state(pe, pci_channel_io_perm_failure);
|
||||
eeh_pe_report(
|
||||
"error_detected(permanent failure)", pe,
|
||||
eeh_report_failure, NULL);
|
||||
bus = eeh_pe_bus_get(phb_pe);
|
||||
if (!bus) {
|
||||
pr_err("%s: Cannot find PCI bus for "
|
||||
|
@ -783,7 +783,7 @@ fast_exception_return:
|
||||
1: lis r3,exc_exit_restart_end@ha
|
||||
addi r3,r3,exc_exit_restart_end@l
|
||||
cmplw r12,r3
|
||||
#if CONFIG_PPC_BOOK3S_601
|
||||
#ifdef CONFIG_PPC_BOOK3S_601
|
||||
bge 2b
|
||||
#else
|
||||
bge 3f
|
||||
@ -791,7 +791,7 @@ fast_exception_return:
|
||||
lis r4,exc_exit_restart@ha
|
||||
addi r4,r4,exc_exit_restart@l
|
||||
cmplw r12,r4
|
||||
#if CONFIG_PPC_BOOK3S_601
|
||||
#ifdef CONFIG_PPC_BOOK3S_601
|
||||
blt 2b
|
||||
#else
|
||||
blt 3f
|
||||
@ -1354,12 +1354,17 @@ _GLOBAL(enter_rtas)
|
||||
mtspr SPRN_SRR0,r8
|
||||
mtspr SPRN_SRR1,r9
|
||||
RFI
|
||||
1: tophys(r9,r1)
|
||||
1: tophys_novmstack r9, r1
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
li r0, MSR_KERNEL & ~MSR_IR /* can take DTLB miss */
|
||||
mtmsr r0
|
||||
isync
|
||||
#endif
|
||||
lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */
|
||||
lwz r9,8(r9) /* original msr value */
|
||||
addi r1,r1,INT_FRAME_SIZE
|
||||
li r0,0
|
||||
tophys(r7, r2)
|
||||
tophys_novmstack r7, r2
|
||||
stw r0, THREAD + RTAS_SP(r7)
|
||||
mtspr SPRN_SRR0,r8
|
||||
mtspr SPRN_SRR1,r9
|
||||
|
@ -290,17 +290,55 @@ MachineCheck:
|
||||
7: EXCEPTION_PROLOG_2
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
#ifdef CONFIG_PPC_CHRP
|
||||
bne cr1,1f
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
mfspr r4, SPRN_SPRG_THREAD
|
||||
tovirt(r4, r4)
|
||||
lwz r4, RTAS_SP(r4)
|
||||
cmpwi cr1, r4, 0
|
||||
#endif
|
||||
EXC_XFER_STD(0x200, machine_check_exception)
|
||||
#ifdef CONFIG_PPC_CHRP
|
||||
1: b machine_check_in_rtas
|
||||
beq cr1, machine_check_tramp
|
||||
b machine_check_in_rtas
|
||||
#else
|
||||
b machine_check_tramp
|
||||
#endif
|
||||
|
||||
/* Data access exception. */
|
||||
. = 0x300
|
||||
DO_KVM 0x300
|
||||
DataAccess:
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
mtspr SPRN_SPRG_SCRATCH0,r10
|
||||
mfspr r10, SPRN_SPRG_THREAD
|
||||
BEGIN_MMU_FTR_SECTION
|
||||
stw r11, THR11(r10)
|
||||
mfspr r10, SPRN_DSISR
|
||||
mfcr r11
|
||||
#ifdef CONFIG_PPC_KUAP
|
||||
andis. r10, r10, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH | DSISR_PROTFAULT)@h
|
||||
#else
|
||||
andis. r10, r10, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH)@h
|
||||
#endif
|
||||
mfspr r10, SPRN_SPRG_THREAD
|
||||
beq hash_page_dsi
|
||||
.Lhash_page_dsi_cont:
|
||||
mtcr r11
|
||||
lwz r11, THR11(r10)
|
||||
END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
|
||||
mtspr SPRN_SPRG_SCRATCH1,r11
|
||||
mfspr r11, SPRN_DAR
|
||||
stw r11, DAR(r10)
|
||||
mfspr r11, SPRN_DSISR
|
||||
stw r11, DSISR(r10)
|
||||
mfspr r11, SPRN_SRR0
|
||||
stw r11, SRR0(r10)
|
||||
mfspr r11, SPRN_SRR1 /* check whether user or kernel */
|
||||
stw r11, SRR1(r10)
|
||||
mfcr r10
|
||||
andi. r11, r11, MSR_PR
|
||||
|
||||
EXCEPTION_PROLOG_1
|
||||
b handle_page_fault_tramp_1
|
||||
#else /* CONFIG_VMAP_STACK */
|
||||
EXCEPTION_PROLOG handle_dar_dsisr=1
|
||||
get_and_save_dar_dsisr_on_stack r4, r5, r11
|
||||
BEGIN_MMU_FTR_SECTION
|
||||
@ -316,11 +354,32 @@ BEGIN_MMU_FTR_SECTION
|
||||
FTR_SECTION_ELSE
|
||||
b handle_page_fault_tramp_2
|
||||
ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE)
|
||||
#endif /* CONFIG_VMAP_STACK */
|
||||
|
||||
/* Instruction access exception. */
|
||||
. = 0x400
|
||||
DO_KVM 0x400
|
||||
InstructionAccess:
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
mtspr SPRN_SPRG_SCRATCH0,r10
|
||||
mtspr SPRN_SPRG_SCRATCH1,r11
|
||||
mfspr r10, SPRN_SPRG_THREAD
|
||||
mfspr r11, SPRN_SRR0
|
||||
stw r11, SRR0(r10)
|
||||
mfspr r11, SPRN_SRR1 /* check whether user or kernel */
|
||||
stw r11, SRR1(r10)
|
||||
mfcr r10
|
||||
BEGIN_MMU_FTR_SECTION
|
||||
andis. r11, r11, SRR1_ISI_NOPT@h /* no pte found? */
|
||||
bne hash_page_isi
|
||||
.Lhash_page_isi_cont:
|
||||
mfspr r11, SPRN_SRR1 /* check whether user or kernel */
|
||||
END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
|
||||
andi. r11, r11, MSR_PR
|
||||
|
||||
EXCEPTION_PROLOG_1
|
||||
EXCEPTION_PROLOG_2
|
||||
#else /* CONFIG_VMAP_STACK */
|
||||
EXCEPTION_PROLOG
|
||||
andis. r0,r9,SRR1_ISI_NOPT@h /* no pte found? */
|
||||
beq 1f /* if so, try to put a PTE */
|
||||
@ -329,6 +388,7 @@ InstructionAccess:
|
||||
BEGIN_MMU_FTR_SECTION
|
||||
bl hash_page
|
||||
END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
|
||||
#endif /* CONFIG_VMAP_STACK */
|
||||
1: mr r4,r12
|
||||
andis. r5,r9,DSISR_SRR1_MATCH_32S@h /* Filter relevant SRR1 bits */
|
||||
stw r4, _DAR(r11)
|
||||
@ -344,7 +404,7 @@ Alignment:
|
||||
EXCEPTION_PROLOG handle_dar_dsisr=1
|
||||
save_dar_dsisr_on_stack r4, r5, r11
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
EXC_XFER_STD(0x600, alignment_exception)
|
||||
b alignment_exception_tramp
|
||||
|
||||
/* Program check exception */
|
||||
EXCEPTION(0x700, ProgramCheck, program_check_exception, EXC_XFER_STD)
|
||||
@ -645,15 +705,100 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
|
||||
|
||||
. = 0x3000
|
||||
|
||||
machine_check_tramp:
|
||||
EXC_XFER_STD(0x200, machine_check_exception)
|
||||
|
||||
alignment_exception_tramp:
|
||||
EXC_XFER_STD(0x600, alignment_exception)
|
||||
|
||||
handle_page_fault_tramp_1:
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
EXCEPTION_PROLOG_2 handle_dar_dsisr=1
|
||||
#endif
|
||||
lwz r4, _DAR(r11)
|
||||
lwz r5, _DSISR(r11)
|
||||
/* fall through */
|
||||
handle_page_fault_tramp_2:
|
||||
EXC_XFER_LITE(0x300, handle_page_fault)
|
||||
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
.macro save_regs_thread thread
|
||||
stw r0, THR0(\thread)
|
||||
stw r3, THR3(\thread)
|
||||
stw r4, THR4(\thread)
|
||||
stw r5, THR5(\thread)
|
||||
stw r6, THR6(\thread)
|
||||
stw r8, THR8(\thread)
|
||||
stw r9, THR9(\thread)
|
||||
mflr r0
|
||||
stw r0, THLR(\thread)
|
||||
mfctr r0
|
||||
stw r0, THCTR(\thread)
|
||||
.endm
|
||||
|
||||
.macro restore_regs_thread thread
|
||||
lwz r0, THLR(\thread)
|
||||
mtlr r0
|
||||
lwz r0, THCTR(\thread)
|
||||
mtctr r0
|
||||
lwz r0, THR0(\thread)
|
||||
lwz r3, THR3(\thread)
|
||||
lwz r4, THR4(\thread)
|
||||
lwz r5, THR5(\thread)
|
||||
lwz r6, THR6(\thread)
|
||||
lwz r8, THR8(\thread)
|
||||
lwz r9, THR9(\thread)
|
||||
.endm
|
||||
|
||||
hash_page_dsi:
|
||||
save_regs_thread r10
|
||||
mfdsisr r3
|
||||
mfdar r4
|
||||
mfsrr0 r5
|
||||
mfsrr1 r9
|
||||
rlwinm r3, r3, 32 - 15, _PAGE_RW /* DSISR_STORE -> _PAGE_RW */
|
||||
bl hash_page
|
||||
mfspr r10, SPRN_SPRG_THREAD
|
||||
restore_regs_thread r10
|
||||
b .Lhash_page_dsi_cont
|
||||
|
||||
hash_page_isi:
|
||||
mr r11, r10
|
||||
mfspr r10, SPRN_SPRG_THREAD
|
||||
save_regs_thread r10
|
||||
li r3, 0
|
||||
lwz r4, SRR0(r10)
|
||||
lwz r9, SRR1(r10)
|
||||
bl hash_page
|
||||
mfspr r10, SPRN_SPRG_THREAD
|
||||
restore_regs_thread r10
|
||||
mr r10, r11
|
||||
b .Lhash_page_isi_cont
|
||||
|
||||
.globl fast_hash_page_return
|
||||
fast_hash_page_return:
|
||||
andis. r10, r9, SRR1_ISI_NOPT@h /* Set on ISI, cleared on DSI */
|
||||
mfspr r10, SPRN_SPRG_THREAD
|
||||
restore_regs_thread r10
|
||||
bne 1f
|
||||
|
||||
/* DSI */
|
||||
mtcr r11
|
||||
lwz r11, THR11(r10)
|
||||
mfspr r10, SPRN_SPRG_SCRATCH0
|
||||
SYNC
|
||||
RFI
|
||||
|
||||
1: /* ISI */
|
||||
mtcr r11
|
||||
mfspr r11, SPRN_SPRG_SCRATCH1
|
||||
mfspr r10, SPRN_SPRG_SCRATCH0
|
||||
SYNC
|
||||
RFI
|
||||
|
||||
stack_overflow:
|
||||
vmap_stack_overflow_exception
|
||||
#endif
|
||||
|
||||
AltiVecUnavailable:
|
||||
EXCEPTION_PROLOG
|
||||
|
@ -64,11 +64,25 @@
|
||||
.endm
|
||||
|
||||
.macro EXCEPTION_PROLOG_2 handle_dar_dsisr=0
|
||||
#if defined(CONFIG_VMAP_STACK) && defined(CONFIG_PPC_BOOK3S)
|
||||
BEGIN_MMU_FTR_SECTION
|
||||
mtcr r10
|
||||
FTR_SECTION_ELSE
|
||||
stw r10, _CCR(r11)
|
||||
ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE)
|
||||
#else
|
||||
stw r10,_CCR(r11) /* save registers */
|
||||
#endif
|
||||
mfspr r10, SPRN_SPRG_SCRATCH0
|
||||
stw r12,GPR12(r11)
|
||||
stw r9,GPR9(r11)
|
||||
mfspr r10,SPRN_SPRG_SCRATCH0
|
||||
stw r10,GPR10(r11)
|
||||
#if defined(CONFIG_VMAP_STACK) && defined(CONFIG_PPC_BOOK3S)
|
||||
BEGIN_MMU_FTR_SECTION
|
||||
mfcr r10
|
||||
stw r10, _CCR(r11)
|
||||
END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
|
||||
#endif
|
||||
mfspr r12,SPRN_SPRG_SCRATCH1
|
||||
stw r12,GPR11(r11)
|
||||
mflr r10
|
||||
@ -83,6 +97,11 @@
|
||||
stw r10, _DSISR(r11)
|
||||
.endif
|
||||
lwz r9, SRR1(r12)
|
||||
#if defined(CONFIG_VMAP_STACK) && defined(CONFIG_PPC_BOOK3S)
|
||||
BEGIN_MMU_FTR_SECTION
|
||||
andi. r10, r9, MSR_PR
|
||||
END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
|
||||
#endif
|
||||
lwz r12, SRR0(r12)
|
||||
#else
|
||||
mfspr r12,SPRN_SRR0
|
||||
|
@ -256,7 +256,7 @@ InstructionTLBMiss:
|
||||
* set. All other Linux PTE bits control the behavior
|
||||
* of the MMU.
|
||||
*/
|
||||
rlwimi r10, r10, 0, 0x0f00 /* Clear bits 20-23 */
|
||||
rlwinm r10, r10, 0, ~0x0f00 /* Clear bits 20-23 */
|
||||
rlwimi r10, r10, 4, 0x0400 /* Copy _PAGE_EXEC into bit 21 */
|
||||
ori r10, r10, RPN_PATTERN | 0x200 /* Set 22 and 24-27 */
|
||||
mtspr SPRN_MI_RPN, r10 /* Update TLB entry */
|
||||
|
@ -166,7 +166,11 @@ BEGIN_FTR_SECTION
|
||||
mfspr r9,SPRN_HID0
|
||||
andis. r9,r9,HID0_NAP@h
|
||||
beq 1f
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
addis r9, r11, nap_save_msscr0@ha
|
||||
#else
|
||||
addis r9,r11,(nap_save_msscr0-KERNELBASE)@ha
|
||||
#endif
|
||||
lwz r9,nap_save_msscr0@l(r9)
|
||||
mtspr SPRN_MSSCR0, r9
|
||||
sync
|
||||
@ -174,7 +178,11 @@ BEGIN_FTR_SECTION
|
||||
1:
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
|
||||
BEGIN_FTR_SECTION
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
addis r9, r11, nap_save_hid1@ha
|
||||
#else
|
||||
addis r9,r11,(nap_save_hid1-KERNELBASE)@ha
|
||||
#endif
|
||||
lwz r9,nap_save_hid1@l(r9)
|
||||
mtspr SPRN_HID1, r9
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX)
|
||||
|
@ -200,14 +200,27 @@ unsigned long get_tm_stackpointer(struct task_struct *tsk)
|
||||
* normal/non-checkpointed stack pointer.
|
||||
*/
|
||||
|
||||
unsigned long ret = tsk->thread.regs->gpr[1];
|
||||
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
BUG_ON(tsk != current);
|
||||
|
||||
if (MSR_TM_ACTIVE(tsk->thread.regs->msr)) {
|
||||
preempt_disable();
|
||||
tm_reclaim_current(TM_CAUSE_SIGNAL);
|
||||
if (MSR_TM_TRANSACTIONAL(tsk->thread.regs->msr))
|
||||
return tsk->thread.ckpt_regs.gpr[1];
|
||||
ret = tsk->thread.ckpt_regs.gpr[1];
|
||||
|
||||
/*
|
||||
* If we treclaim, we must clear the current thread's TM bits
|
||||
* before re-enabling preemption. Otherwise we might be
|
||||
* preempted and have the live MSR[TS] changed behind our back
|
||||
* (tm_recheckpoint_new_task() would recheckpoint). Besides, we
|
||||
* enter the signal handler in non-transactional state.
|
||||
*/
|
||||
tsk->thread.regs->msr &= ~MSR_TS_MASK;
|
||||
preempt_enable();
|
||||
}
|
||||
#endif
|
||||
return tsk->thread.regs->gpr[1];
|
||||
return ret;
|
||||
}
|
||||
|
@ -489,19 +489,11 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
|
||||
*/
|
||||
static int save_tm_user_regs(struct pt_regs *regs,
|
||||
struct mcontext __user *frame,
|
||||
struct mcontext __user *tm_frame, int sigret)
|
||||
struct mcontext __user *tm_frame, int sigret,
|
||||
unsigned long msr)
|
||||
{
|
||||
unsigned long msr = regs->msr;
|
||||
|
||||
WARN_ON(tm_suspend_disabled);
|
||||
|
||||
/* Remove TM bits from thread's MSR. The MSR in the sigcontext
|
||||
* just indicates to userland that we were doing a transaction, but we
|
||||
* don't want to return in transactional state. This also ensures
|
||||
* that flush_fp_to_thread won't set TIF_RESTORE_TM again.
|
||||
*/
|
||||
regs->msr &= ~MSR_TS_MASK;
|
||||
|
||||
/* Save both sets of general registers */
|
||||
if (save_general_regs(¤t->thread.ckpt_regs, frame)
|
||||
|| save_general_regs(regs, tm_frame))
|
||||
@ -912,6 +904,10 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
|
||||
int sigret;
|
||||
unsigned long tramp;
|
||||
struct pt_regs *regs = tsk->thread.regs;
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
/* Save the thread's msr before get_tm_stackpointer() changes it */
|
||||
unsigned long msr = regs->msr;
|
||||
#endif
|
||||
|
||||
BUG_ON(tsk != current);
|
||||
|
||||
@ -944,13 +940,13 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
|
||||
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
tm_frame = &rt_sf->uc_transact.uc_mcontext;
|
||||
if (MSR_TM_ACTIVE(regs->msr)) {
|
||||
if (MSR_TM_ACTIVE(msr)) {
|
||||
if (__put_user((unsigned long)&rt_sf->uc_transact,
|
||||
&rt_sf->uc.uc_link) ||
|
||||
__put_user((unsigned long)tm_frame,
|
||||
&rt_sf->uc_transact.uc_regs))
|
||||
goto badframe;
|
||||
if (save_tm_user_regs(regs, frame, tm_frame, sigret))
|
||||
if (save_tm_user_regs(regs, frame, tm_frame, sigret, msr))
|
||||
goto badframe;
|
||||
}
|
||||
else
|
||||
@ -1369,6 +1365,10 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
|
||||
int sigret;
|
||||
unsigned long tramp;
|
||||
struct pt_regs *regs = tsk->thread.regs;
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
/* Save the thread's msr before get_tm_stackpointer() changes it */
|
||||
unsigned long msr = regs->msr;
|
||||
#endif
|
||||
|
||||
BUG_ON(tsk != current);
|
||||
|
||||
@ -1402,9 +1402,9 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
|
||||
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
tm_mctx = &frame->mctx_transact;
|
||||
if (MSR_TM_ACTIVE(regs->msr)) {
|
||||
if (MSR_TM_ACTIVE(msr)) {
|
||||
if (save_tm_user_regs(regs, &frame->mctx, &frame->mctx_transact,
|
||||
sigret))
|
||||
sigret, msr))
|
||||
goto badframe;
|
||||
}
|
||||
else
|
||||
|
@ -192,7 +192,8 @@ static long setup_sigcontext(struct sigcontext __user *sc,
|
||||
static long setup_tm_sigcontexts(struct sigcontext __user *sc,
|
||||
struct sigcontext __user *tm_sc,
|
||||
struct task_struct *tsk,
|
||||
int signr, sigset_t *set, unsigned long handler)
|
||||
int signr, sigset_t *set, unsigned long handler,
|
||||
unsigned long msr)
|
||||
{
|
||||
/* When CONFIG_ALTIVEC is set, we _always_ setup v_regs even if the
|
||||
* process never used altivec yet (MSR_VEC is zero in pt_regs of
|
||||
@ -207,12 +208,11 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
|
||||
elf_vrreg_t __user *tm_v_regs = sigcontext_vmx_regs(tm_sc);
|
||||
#endif
|
||||
struct pt_regs *regs = tsk->thread.regs;
|
||||
unsigned long msr = tsk->thread.regs->msr;
|
||||
long err = 0;
|
||||
|
||||
BUG_ON(tsk != current);
|
||||
|
||||
BUG_ON(!MSR_TM_ACTIVE(regs->msr));
|
||||
BUG_ON(!MSR_TM_ACTIVE(msr));
|
||||
|
||||
WARN_ON(tm_suspend_disabled);
|
||||
|
||||
@ -222,13 +222,6 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
|
||||
*/
|
||||
msr |= tsk->thread.ckpt_regs.msr & (MSR_FP | MSR_VEC | MSR_VSX);
|
||||
|
||||
/* Remove TM bits from thread's MSR. The MSR in the sigcontext
|
||||
* just indicates to userland that we were doing a transaction, but we
|
||||
* don't want to return in transactional state. This also ensures
|
||||
* that flush_fp_to_thread won't set TIF_RESTORE_TM again.
|
||||
*/
|
||||
regs->msr &= ~MSR_TS_MASK;
|
||||
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
err |= __put_user(v_regs, &sc->v_regs);
|
||||
err |= __put_user(tm_v_regs, &tm_sc->v_regs);
|
||||
@ -824,6 +817,10 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
|
||||
unsigned long newsp = 0;
|
||||
long err = 0;
|
||||
struct pt_regs *regs = tsk->thread.regs;
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
/* Save the thread's msr before get_tm_stackpointer() changes it */
|
||||
unsigned long msr = regs->msr;
|
||||
#endif
|
||||
|
||||
BUG_ON(tsk != current);
|
||||
|
||||
@ -841,7 +838,7 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
|
||||
err |= __put_user(0, &frame->uc.uc_flags);
|
||||
err |= __save_altstack(&frame->uc.uc_stack, regs->gpr[1]);
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
if (MSR_TM_ACTIVE(regs->msr)) {
|
||||
if (MSR_TM_ACTIVE(msr)) {
|
||||
/* The ucontext_t passed to userland points to the second
|
||||
* ucontext_t (for transactional state) with its uc_link ptr.
|
||||
*/
|
||||
@ -849,7 +846,8 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
|
||||
err |= setup_tm_sigcontexts(&frame->uc.uc_mcontext,
|
||||
&frame->uc_transact.uc_mcontext,
|
||||
tsk, ksig->sig, NULL,
|
||||
(unsigned long)ksig->ka.sa.sa_handler);
|
||||
(unsigned long)ksig->ka.sa.sa_handler,
|
||||
msr);
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
|
@ -25,12 +25,6 @@
|
||||
#include <asm/feature-fixups.h>
|
||||
#include <asm/code-patching-asm.h>
|
||||
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
#define ADDR_OFFSET 0
|
||||
#else
|
||||
#define ADDR_OFFSET PAGE_OFFSET
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
.section .bss
|
||||
.align 2
|
||||
@ -53,8 +47,8 @@ mmu_hash_lock:
|
||||
.text
|
||||
_GLOBAL(hash_page)
|
||||
#ifdef CONFIG_SMP
|
||||
lis r8, (mmu_hash_lock - ADDR_OFFSET)@h
|
||||
ori r8, r8, (mmu_hash_lock - ADDR_OFFSET)@l
|
||||
lis r8, (mmu_hash_lock - PAGE_OFFSET)@h
|
||||
ori r8, r8, (mmu_hash_lock - PAGE_OFFSET)@l
|
||||
lis r0,0x0fff
|
||||
b 10f
|
||||
11: lwz r6,0(r8)
|
||||
@ -72,12 +66,9 @@ _GLOBAL(hash_page)
|
||||
cmplw 0,r4,r0
|
||||
ori r3,r3,_PAGE_USER|_PAGE_PRESENT /* test low addresses as user */
|
||||
mfspr r5, SPRN_SPRG_PGDIR /* phys page-table root */
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
tovirt(r5, r5)
|
||||
#endif
|
||||
blt+ 112f /* assume user more likely */
|
||||
lis r5, (swapper_pg_dir - ADDR_OFFSET)@ha /* if kernel address, use */
|
||||
addi r5 ,r5 ,(swapper_pg_dir - ADDR_OFFSET)@l /* kernel page table */
|
||||
lis r5, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
|
||||
addi r5 ,r5 ,(swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
|
||||
rlwimi r3,r9,32-12,29,29 /* MSR_PR -> _PAGE_USER */
|
||||
112:
|
||||
#ifndef CONFIG_PTE_64BIT
|
||||
@ -89,9 +80,6 @@ _GLOBAL(hash_page)
|
||||
lwzx r8,r8,r5 /* Get L1 entry */
|
||||
rlwinm. r8,r8,0,0,20 /* extract pt base address */
|
||||
#endif
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
tovirt(r8, r8)
|
||||
#endif
|
||||
#ifdef CONFIG_SMP
|
||||
beq- hash_page_out /* return if no mapping */
|
||||
#else
|
||||
@ -143,30 +131,36 @@ retry:
|
||||
bne- retry /* retry if someone got there first */
|
||||
|
||||
mfsrin r3,r4 /* get segment reg for segment */
|
||||
#ifndef CONFIG_VMAP_STACK
|
||||
mfctr r0
|
||||
stw r0,_CTR(r11)
|
||||
#endif
|
||||
bl create_hpte /* add the hash table entry */
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
eieio
|
||||
lis r8, (mmu_hash_lock - ADDR_OFFSET)@ha
|
||||
lis r8, (mmu_hash_lock - PAGE_OFFSET)@ha
|
||||
li r0,0
|
||||
stw r0, (mmu_hash_lock - ADDR_OFFSET)@l(r8)
|
||||
stw r0, (mmu_hash_lock - PAGE_OFFSET)@l(r8)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
b fast_hash_page_return
|
||||
#else
|
||||
/* Return from the exception */
|
||||
lwz r5,_CTR(r11)
|
||||
mtctr r5
|
||||
lwz r0,GPR0(r11)
|
||||
lwz r8,GPR8(r11)
|
||||
b fast_exception_return
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
hash_page_out:
|
||||
eieio
|
||||
lis r8, (mmu_hash_lock - ADDR_OFFSET)@ha
|
||||
lis r8, (mmu_hash_lock - PAGE_OFFSET)@ha
|
||||
li r0,0
|
||||
stw r0, (mmu_hash_lock - ADDR_OFFSET)@l(r8)
|
||||
stw r0, (mmu_hash_lock - PAGE_OFFSET)@l(r8)
|
||||
blr
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
@ -341,7 +335,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
|
||||
patch_site 1f, patch__hash_page_A1
|
||||
patch_site 2f, patch__hash_page_A2
|
||||
/* Get the address of the primary PTE group in the hash table (r3) */
|
||||
0: lis r0, (Hash_base - ADDR_OFFSET)@h /* base address of hash table */
|
||||
0: lis r0, (Hash_base - PAGE_OFFSET)@h /* base address of hash table */
|
||||
1: rlwimi r0,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* VSID -> hash */
|
||||
2: rlwinm r3,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */
|
||||
xor r3,r3,r0 /* make primary hash */
|
||||
@ -355,10 +349,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
|
||||
beq+ 10f /* no PTE: go look for an empty slot */
|
||||
tlbie r4
|
||||
|
||||
lis r4, (htab_hash_searches - ADDR_OFFSET)@ha
|
||||
lwz r6, (htab_hash_searches - ADDR_OFFSET)@l(r4)
|
||||
lis r4, (htab_hash_searches - PAGE_OFFSET)@ha
|
||||
lwz r6, (htab_hash_searches - PAGE_OFFSET)@l(r4)
|
||||
addi r6,r6,1 /* count how many searches we do */
|
||||
stw r6, (htab_hash_searches - ADDR_OFFSET)@l(r4)
|
||||
stw r6, (htab_hash_searches - PAGE_OFFSET)@l(r4)
|
||||
|
||||
/* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */
|
||||
mtctr r0
|
||||
@ -390,10 +384,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
|
||||
beq+ found_empty
|
||||
|
||||
/* update counter of times that the primary PTEG is full */
|
||||
lis r4, (primary_pteg_full - ADDR_OFFSET)@ha
|
||||
lwz r6, (primary_pteg_full - ADDR_OFFSET)@l(r4)
|
||||
lis r4, (primary_pteg_full - PAGE_OFFSET)@ha
|
||||
lwz r6, (primary_pteg_full - PAGE_OFFSET)@l(r4)
|
||||
addi r6,r6,1
|
||||
stw r6, (primary_pteg_full - ADDR_OFFSET)@l(r4)
|
||||
stw r6, (primary_pteg_full - PAGE_OFFSET)@l(r4)
|
||||
|
||||
patch_site 0f, patch__hash_page_C
|
||||
/* Search the secondary PTEG for an empty slot */
|
||||
@ -427,8 +421,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
|
||||
* lockup here but that shouldn't happen
|
||||
*/
|
||||
|
||||
1: lis r4, (next_slot - ADDR_OFFSET)@ha /* get next evict slot */
|
||||
lwz r6, (next_slot - ADDR_OFFSET)@l(r4)
|
||||
1: lis r4, (next_slot - PAGE_OFFSET)@ha /* get next evict slot */
|
||||
lwz r6, (next_slot - PAGE_OFFSET)@l(r4)
|
||||
addi r6,r6,HPTE_SIZE /* search for candidate */
|
||||
andi. r6,r6,7*HPTE_SIZE
|
||||
stw r6,next_slot@l(r4)
|
||||
|
@ -413,7 +413,7 @@ void __init MMU_init_hw(void)
|
||||
void __init MMU_init_hw_patch(void)
|
||||
{
|
||||
unsigned int hmask = Hash_mask >> (16 - LG_HPTEG_SIZE);
|
||||
unsigned int hash;
|
||||
unsigned int hash = (unsigned int)Hash - PAGE_OFFSET;
|
||||
|
||||
if (ppc_md.progress)
|
||||
ppc_md.progress("hash:patch", 0x345);
|
||||
@ -425,11 +425,6 @@ void __init MMU_init_hw_patch(void)
|
||||
/*
|
||||
* Patch up the instructions in hashtable.S:create_hpte
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_VMAP_STACK))
|
||||
hash = (unsigned int)Hash;
|
||||
else
|
||||
hash = (unsigned int)Hash - PAGE_OFFSET;
|
||||
|
||||
modify_instruction_site(&patch__hash_page_A0, 0xffff, hash >> 16);
|
||||
modify_instruction_site(&patch__hash_page_A1, 0x7c0, hash_mb << 6);
|
||||
modify_instruction_site(&patch__hash_page_A2, 0x7c0, hash_mb2 << 6);
|
||||
@ -439,8 +434,7 @@ void __init MMU_init_hw_patch(void)
|
||||
/*
|
||||
* Patch up the instructions in hashtable.S:flush_hash_page
|
||||
*/
|
||||
modify_instruction_site(&patch__flush_hash_A0, 0xffff,
|
||||
((unsigned int)Hash - PAGE_OFFSET) >> 16);
|
||||
modify_instruction_site(&patch__flush_hash_A0, 0xffff, hash >> 16);
|
||||
modify_instruction_site(&patch__flush_hash_A1, 0x7c0, hash_mb << 6);
|
||||
modify_instruction_site(&patch__flush_hash_A2, 0x7c0, hash_mb2 << 6);
|
||||
modify_instruction_site(&patch__flush_hash_B, 0xffff, hmask);
|
||||
|
@ -53,20 +53,24 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
|
||||
if (pshift >= pdshift) {
|
||||
cachep = PGT_CACHE(PTE_T_ORDER);
|
||||
num_hugepd = 1 << (pshift - pdshift);
|
||||
new = NULL;
|
||||
} else if (IS_ENABLED(CONFIG_PPC_8xx)) {
|
||||
cachep = PGT_CACHE(PTE_INDEX_SIZE);
|
||||
cachep = NULL;
|
||||
num_hugepd = 1;
|
||||
new = pte_alloc_one(mm);
|
||||
} else {
|
||||
cachep = PGT_CACHE(pdshift - pshift);
|
||||
num_hugepd = 1;
|
||||
new = NULL;
|
||||
}
|
||||
|
||||
if (!cachep) {
|
||||
if (!cachep && !new) {
|
||||
WARN_ONCE(1, "No page table cache created for hugetlb tables");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
new = kmem_cache_alloc(cachep, pgtable_gfp_flags(mm, GFP_KERNEL));
|
||||
if (cachep)
|
||||
new = kmem_cache_alloc(cachep, pgtable_gfp_flags(mm, GFP_KERNEL));
|
||||
|
||||
BUG_ON(pshift > HUGEPD_SHIFT_MASK);
|
||||
BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK);
|
||||
@ -97,7 +101,10 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
|
||||
if (i < num_hugepd) {
|
||||
for (i = i - 1 ; i >= 0; i--, hpdp--)
|
||||
*hpdp = __hugepd(0);
|
||||
kmem_cache_free(cachep, new);
|
||||
if (cachep)
|
||||
kmem_cache_free(cachep, new);
|
||||
else
|
||||
pte_free(mm, new);
|
||||
} else {
|
||||
kmemleak_ignore(new);
|
||||
}
|
||||
@ -324,8 +331,7 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif
|
||||
if (shift >= pdshift)
|
||||
hugepd_free(tlb, hugepte);
|
||||
else if (IS_ENABLED(CONFIG_PPC_8xx))
|
||||
pgtable_free_tlb(tlb, hugepte,
|
||||
get_hugepd_cache_index(PTE_INDEX_SIZE));
|
||||
pgtable_free_tlb(tlb, hugepte, 0);
|
||||
else
|
||||
pgtable_free_tlb(tlb, hugepte,
|
||||
get_hugepd_cache_index(pdshift - shift));
|
||||
@ -639,12 +645,13 @@ static int __init hugetlbpage_init(void)
|
||||
* if we have pdshift and shift value same, we don't
|
||||
* use pgt cache for hugepd.
|
||||
*/
|
||||
if (pdshift > shift && IS_ENABLED(CONFIG_PPC_8xx))
|
||||
pgtable_cache_add(PTE_INDEX_SIZE);
|
||||
else if (pdshift > shift)
|
||||
pgtable_cache_add(pdshift - shift);
|
||||
else if (IS_ENABLED(CONFIG_PPC_FSL_BOOK3E) || IS_ENABLED(CONFIG_PPC_8xx))
|
||||
if (pdshift > shift) {
|
||||
if (!IS_ENABLED(CONFIG_PPC_8xx))
|
||||
pgtable_cache_add(pdshift - shift);
|
||||
} else if (IS_ENABLED(CONFIG_PPC_FSL_BOOK3E) ||
|
||||
IS_ENABLED(CONFIG_PPC_8xx)) {
|
||||
pgtable_cache_add(PTE_T_ORDER);
|
||||
}
|
||||
|
||||
configured = true;
|
||||
}
|
||||
|
@ -185,8 +185,7 @@ u8 __initdata early_hash[256 << 10] __aligned(256 << 10) = {0};
|
||||
|
||||
static void __init kasan_early_hash_table(void)
|
||||
{
|
||||
unsigned int hash = IS_ENABLED(CONFIG_VMAP_STACK) ? (unsigned int)early_hash :
|
||||
__pa(early_hash);
|
||||
unsigned int hash = __pa(early_hash);
|
||||
|
||||
modify_instruction_site(&patch__hash_page_A0, 0xffff, hash >> 16);
|
||||
modify_instruction_site(&patch__flush_hash_A0, 0xffff, hash >> 16);
|
||||
|
@ -3435,6 +3435,11 @@ getstring(char *s, int size)
|
||||
int c;
|
||||
|
||||
c = skipbl();
|
||||
if (c == '\n') {
|
||||
*s = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
do {
|
||||
if( size > 1 ){
|
||||
*s++ = c;
|
||||
|
Loading…
Reference in New Issue
Block a user