2019-05-27 06:55:01 +00:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
2005-09-26 06:04:21 +00:00
|
|
|
/*
|
|
|
|
* Kernel execution entry point code.
|
|
|
|
*
|
|
|
|
* Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org>
|
2007-09-27 13:43:35 +00:00
|
|
|
* Initial PowerPC version.
|
2005-09-26 06:04:21 +00:00
|
|
|
* Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu>
|
2007-09-27 13:43:35 +00:00
|
|
|
* Rewritten for PReP
|
2005-09-26 06:04:21 +00:00
|
|
|
* Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
|
2007-09-27 13:43:35 +00:00
|
|
|
* Low-level exception handers, MMU support, and rewrite.
|
2005-09-26 06:04:21 +00:00
|
|
|
* Copyright (c) 1997 Dan Malek <dmalek@jlc.net>
|
2007-09-27 13:43:35 +00:00
|
|
|
* PowerPC 8xx modifications.
|
2005-09-26 06:04:21 +00:00
|
|
|
* Copyright (c) 1998-1999 TiVo, Inc.
|
2007-09-27 13:43:35 +00:00
|
|
|
* PowerPC 403GCX modifications.
|
2005-09-26 06:04:21 +00:00
|
|
|
* Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
|
2007-09-27 13:43:35 +00:00
|
|
|
* PowerPC 403GCX/405GP modifications.
|
2005-09-26 06:04:21 +00:00
|
|
|
* Copyright 2000 MontaVista Software Inc.
|
|
|
|
* PPC405 modifications
|
2007-09-27 13:43:35 +00:00
|
|
|
* PowerPC 403GCX/405GP modifications.
|
|
|
|
* Author: MontaVista Software, Inc.
|
|
|
|
* frank_rowand@mvista.com or source@mvista.com
|
|
|
|
* debbie_chu@mvista.com
|
2005-09-26 06:04:21 +00:00
|
|
|
* Copyright 2002-2004 MontaVista Software, Inc.
|
2007-09-27 13:43:35 +00:00
|
|
|
* PowerPC 44x support, Matt Porter <mporter@kernel.crashing.org>
|
2005-09-26 06:04:21 +00:00
|
|
|
* Copyright 2004 Freescale Semiconductor, Inc
|
2007-09-27 13:43:35 +00:00
|
|
|
* PowerPC e500 modifications, Kumar Gala <galak@kernel.crashing.org>
|
2005-09-26 06:04:21 +00:00
|
|
|
*/
|
|
|
|
|
2009-04-26 02:11:05 +00:00
|
|
|
#include <linux/init.h>
|
2005-09-26 06:04:21 +00:00
|
|
|
#include <linux/threads.h>
|
2020-06-09 04:32:42 +00:00
|
|
|
#include <linux/pgtable.h>
|
2022-11-14 17:57:44 +00:00
|
|
|
#include <linux/linkage.h>
|
|
|
|
|
2005-09-26 06:04:21 +00:00
|
|
|
#include <asm/processor.h>
|
|
|
|
#include <asm/page.h>
|
|
|
|
#include <asm/mmu.h>
|
|
|
|
#include <asm/cputable.h>
|
|
|
|
#include <asm/thread_info.h>
|
|
|
|
#include <asm/ppc_asm.h>
|
|
|
|
#include <asm/asm-offsets.h>
|
2008-06-18 21:26:52 +00:00
|
|
|
#include <asm/cache.h>
|
2010-11-18 15:06:17 +00:00
|
|
|
#include <asm/ptrace.h>
|
2016-01-14 04:33:46 +00:00
|
|
|
#include <asm/export.h>
|
2018-07-05 16:25:01 +00:00
|
|
|
#include <asm/feature-fixups.h>
|
2005-09-26 06:04:21 +00:00
|
|
|
#include "head_booke.h"
|
|
|
|
|
|
|
|
/* As with the other PowerPC ports, it is expected that when code
|
|
|
|
* execution begins here, the following registers contain valid, yet
|
|
|
|
* optional, information:
|
|
|
|
*
|
|
|
|
* r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.)
|
|
|
|
* r4 - Starting address of the init RAM disk
|
|
|
|
* r5 - Ending address of the init RAM disk
|
|
|
|
* r6 - Start of kernel command line string (e.g. "mem=128")
|
|
|
|
* r7 - End of kernel command line string
|
|
|
|
*
|
|
|
|
*/
|
2009-04-26 02:11:05 +00:00
|
|
|
__HEAD
|
2021-11-30 12:04:50 +00:00
|
|
|
_GLOBAL(_stext);
|
|
|
|
_GLOBAL(_start);
|
2005-09-26 06:04:21 +00:00
|
|
|
/*
|
|
|
|
* Reserve a word at a fixed location to store the address
|
|
|
|
* of abatron_pteptrs
|
|
|
|
*/
|
|
|
|
nop
|
2011-07-25 11:29:33 +00:00
|
|
|
|
|
|
|
/* Translate device tree address to physical, save in r30/r31 */
|
2013-12-24 07:12:04 +00:00
|
|
|
bl get_phys_addr
|
|
|
|
mr r30,r3
|
|
|
|
mr r31,r4
|
2011-07-25 11:29:33 +00:00
|
|
|
|
|
|
|
li r25,0 /* phys kernel start (low) */
|
|
|
|
li r24,0 /* CPU number */
|
|
|
|
li r23,0 /* phys kernel start (high) */
|
2005-09-26 06:04:21 +00:00
|
|
|
|
2013-12-24 07:12:06 +00:00
|
|
|
#ifdef CONFIG_RELOCATABLE
|
|
|
|
LOAD_REG_ADDR_PIC(r3, _stext) /* Get our current runtime base */
|
|
|
|
|
|
|
|
/* Translate _stext address to physical, save in r23/r25 */
|
|
|
|
bl get_phys_addr
|
|
|
|
mr r23,r3
|
|
|
|
mr r25,r4
|
|
|
|
|
2021-08-24 07:56:26 +00:00
|
|
|
bcl 20,31,$+4
|
2013-12-24 07:12:10 +00:00
|
|
|
0: mflr r8
|
|
|
|
addis r3,r8,(is_second_reloc - 0b)@ha
|
|
|
|
lwz r19,(is_second_reloc - 0b)@l(r3)
|
|
|
|
|
|
|
|
/* Check if this is the second relocation. */
|
|
|
|
cmpwi r19,1
|
|
|
|
bne 1f
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For the second relocation, we already get the real memstart_addr
|
|
|
|
* from device tree. So we will map PAGE_OFFSET to memstart_addr,
|
|
|
|
* then the virtual address of start kernel should be:
|
|
|
|
* PAGE_OFFSET + (kernstart_addr - memstart_addr)
|
|
|
|
* Since the offset between kernstart_addr and memstart_addr should
|
|
|
|
* never be beyond 1G, so we can just use the lower 32bit of them
|
|
|
|
* for the calculation.
|
|
|
|
*/
|
|
|
|
lis r3,PAGE_OFFSET@h
|
|
|
|
|
|
|
|
addis r4,r8,(kernstart_addr - 0b)@ha
|
|
|
|
addi r4,r4,(kernstart_addr - 0b)@l
|
|
|
|
lwz r5,4(r4)
|
|
|
|
|
|
|
|
addis r6,r8,(memstart_addr - 0b)@ha
|
|
|
|
addi r6,r6,(memstart_addr - 0b)@l
|
|
|
|
lwz r7,4(r6)
|
|
|
|
|
|
|
|
subf r5,r7,r5
|
|
|
|
add r3,r3,r5
|
|
|
|
b 2f
|
|
|
|
|
|
|
|
1:
|
2013-12-24 07:12:06 +00:00
|
|
|
/*
|
2021-03-14 22:04:36 +00:00
|
|
|
* We have the runtime (virtual) address of our base.
|
2013-12-24 07:12:06 +00:00
|
|
|
* We calculate our shift of offset from a 64M page.
|
|
|
|
* We could map the 64M page we belong to at PAGE_OFFSET and
|
|
|
|
* get going from there.
|
|
|
|
*/
|
|
|
|
lis r4,KERNELBASE@h
|
|
|
|
ori r4,r4,KERNELBASE@l
|
|
|
|
rlwinm r6,r25,0,0x3ffffff /* r6 = PHYS_START % 64M */
|
|
|
|
rlwinm r5,r4,0,0x3ffffff /* r5 = KERNELBASE % 64M */
|
|
|
|
subf r3,r5,r6 /* r3 = r6 - r5 */
|
|
|
|
add r3,r4,r3 /* Required Virtual Address */
|
|
|
|
|
2013-12-24 07:12:10 +00:00
|
|
|
2: bl relocate
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For the second relocation, we already set the right tlb entries
|
2022-09-19 17:01:31 +00:00
|
|
|
* for the kernel space, so skip the code in 85xx_entry_mapping.S
|
2013-12-24 07:12:10 +00:00
|
|
|
*/
|
|
|
|
cmpwi r19,1
|
|
|
|
beq set_ivor
|
2013-12-24 07:12:06 +00:00
|
|
|
#endif
|
|
|
|
|
2005-09-26 06:04:21 +00:00
|
|
|
/* We try to not make any assumptions about how the boot loader
|
|
|
|
* setup or used the TLBs. We invalidate all mappings from the
|
|
|
|
* boot loader and load a single entry in TLB1[0] to map the
|
2007-11-22 15:46:20 +00:00
|
|
|
* first 64M of kernel memory. Any boot info passed from the
|
|
|
|
* bootloader needs to live in this first 64M.
|
2005-09-26 06:04:21 +00:00
|
|
|
*
|
|
|
|
* Requirement on bootloader:
|
|
|
|
* - The page we're executing in needs to reside in TLB1 and
|
|
|
|
* have IPROT=1. If not an invalidate broadcast could
|
|
|
|
* evict the entry we're currently executing in.
|
|
|
|
*
|
|
|
|
* r3 = Index of TLB1 were executing in
|
|
|
|
* r4 = Current MSR[IS]
|
|
|
|
* r5 = Index of TLB1 temp mapping
|
|
|
|
*
|
|
|
|
* Later in mapin_ram we will correctly map lowmem, and resize TLB1[0]
|
|
|
|
* if needed
|
|
|
|
*/
|
|
|
|
|
2021-11-30 12:04:50 +00:00
|
|
|
_GLOBAL(__early_start)
|
2019-09-20 09:45:40 +00:00
|
|
|
LOAD_REG_ADDR_PIC(r20, kernstart_virt_addr)
|
|
|
|
lwz r20,0(r20)
|
2009-01-08 14:31:20 +00:00
|
|
|
|
2010-04-04 20:19:03 +00:00
|
|
|
#define ENTRY_MAPPING_BOOT_SETUP
|
2022-09-19 17:01:31 +00:00
|
|
|
#include "85xx_entry_mapping.S"
|
2010-04-04 20:19:03 +00:00
|
|
|
#undef ENTRY_MAPPING_BOOT_SETUP
|
2005-09-26 06:04:21 +00:00
|
|
|
|
2013-12-24 07:12:10 +00:00
|
|
|
set_ivor:
|
2005-09-26 06:04:21 +00:00
|
|
|
/* Establish the interrupt vector offsets */
|
|
|
|
SET_IVOR(0, CriticalInput);
|
|
|
|
SET_IVOR(1, MachineCheck);
|
|
|
|
SET_IVOR(2, DataStorage);
|
|
|
|
SET_IVOR(3, InstructionStorage);
|
|
|
|
SET_IVOR(4, ExternalInput);
|
|
|
|
SET_IVOR(5, Alignment);
|
|
|
|
SET_IVOR(6, Program);
|
|
|
|
SET_IVOR(7, FloatingPointUnavailable);
|
|
|
|
SET_IVOR(8, SystemCall);
|
|
|
|
SET_IVOR(9, AuxillaryProcessorUnavailable);
|
|
|
|
SET_IVOR(10, Decrementer);
|
|
|
|
SET_IVOR(11, FixedIntervalTimer);
|
|
|
|
SET_IVOR(12, WatchdogTimer);
|
|
|
|
SET_IVOR(13, DataTLBError);
|
|
|
|
SET_IVOR(14, InstructionTLBError);
|
2008-04-09 11:06:11 +00:00
|
|
|
SET_IVOR(15, DebugCrit);
|
2005-09-26 06:04:21 +00:00
|
|
|
|
|
|
|
/* Establish the interrupt vector base */
|
|
|
|
lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */
|
|
|
|
mtspr SPRN_IVPR,r4
|
|
|
|
|
|
|
|
/* Setup the defaults for TLB entries */
|
2009-02-11 00:10:50 +00:00
|
|
|
li r2,(MAS4_TSIZED(BOOK3E_PAGESZ_4K))@l
|
2007-09-27 13:43:35 +00:00
|
|
|
mtspr SPRN_MAS4, r2
|
2005-09-26 06:04:21 +00:00
|
|
|
|
|
|
|
#if !defined(CONFIG_BDI_SWITCH)
|
|
|
|
/*
|
|
|
|
* The Abatron BDI JTAG debugger does not tolerate others
|
|
|
|
* mucking with the debug registers.
|
|
|
|
*/
|
|
|
|
lis r2,DBCR0_IDM@h
|
|
|
|
mtspr SPRN_DBCR0,r2
|
2006-02-08 22:41:26 +00:00
|
|
|
isync
|
2005-09-26 06:04:21 +00:00
|
|
|
/* clear any residual debug events */
|
|
|
|
li r2,-1
|
|
|
|
mtspr SPRN_DBSR,r2
|
|
|
|
#endif
|
|
|
|
|
2008-11-19 15:35:56 +00:00
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
/* Check to see if we're the second processor, and jump
|
|
|
|
* to the secondary_start code if so
|
|
|
|
*/
|
2013-12-24 07:12:11 +00:00
|
|
|
LOAD_REG_ADDR_PIC(r24, boot_cpuid)
|
2010-08-31 23:24:45 +00:00
|
|
|
lwz r24, 0(r24)
|
|
|
|
cmpwi r24, -1
|
|
|
|
mfspr r24,SPRN_PIR
|
2008-11-19 15:35:56 +00:00
|
|
|
bne __secondary_start
|
|
|
|
#endif
|
|
|
|
|
2005-09-26 06:04:21 +00:00
|
|
|
/*
|
|
|
|
* This is where the main kernel code starts.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* ptr to current */
|
|
|
|
lis r2,init_task@h
|
|
|
|
ori r2,r2,init_task@l
|
|
|
|
|
|
|
|
/* ptr to current thread */
|
|
|
|
addi r4,r2,THREAD /* init task's THREAD */
|
2009-07-14 20:52:54 +00:00
|
|
|
mtspr SPRN_SPRG_THREAD,r4
|
2005-09-26 06:04:21 +00:00
|
|
|
|
|
|
|
/* stack */
|
|
|
|
lis r1,init_thread_union@h
|
|
|
|
ori r1,r1,init_thread_union@l
|
|
|
|
li r0,0
|
2022-11-27 12:49:40 +00:00
|
|
|
stwu r0,THREAD_SIZE-STACK_FRAME_MIN_SIZE(r1)
|
2005-09-26 06:04:21 +00:00
|
|
|
|
2019-01-31 10:08:50 +00:00
|
|
|
#ifdef CONFIG_SMP
|
2019-01-31 10:09:04 +00:00
|
|
|
stw r24, TASK_CPU(r2)
|
2019-01-31 10:08:50 +00:00
|
|
|
#endif
|
2010-08-31 23:24:45 +00:00
|
|
|
|
2005-09-26 06:04:21 +00:00
|
|
|
bl early_init
|
|
|
|
|
2019-11-29 14:26:41 +00:00
|
|
|
#ifdef CONFIG_KASAN
|
|
|
|
bl kasan_early_init
|
|
|
|
#endif
|
2013-12-24 07:12:06 +00:00
|
|
|
#ifdef CONFIG_RELOCATABLE
|
2013-12-24 07:12:10 +00:00
|
|
|
mr r3,r30
|
|
|
|
mr r4,r31
|
2013-12-24 07:12:06 +00:00
|
|
|
#ifdef CONFIG_PHYS_64BIT
|
2013-12-24 07:12:10 +00:00
|
|
|
mr r5,r23
|
|
|
|
mr r6,r25
|
2013-12-24 07:12:06 +00:00
|
|
|
#else
|
2013-12-24 07:12:10 +00:00
|
|
|
mr r5,r25
|
2013-12-24 07:12:06 +00:00
|
|
|
#endif
|
|
|
|
bl relocate_init
|
|
|
|
#endif
|
|
|
|
|
2011-12-14 22:57:15 +00:00
|
|
|
#ifdef CONFIG_DYNAMIC_MEMSTART
|
2008-04-21 18:22:34 +00:00
|
|
|
lis r3,kernstart_addr@ha
|
|
|
|
la r3,kernstart_addr@l(r3)
|
|
|
|
#ifdef CONFIG_PHYS_64BIT
|
|
|
|
stw r23,0(r3)
|
|
|
|
stw r25,4(r3)
|
|
|
|
#else
|
|
|
|
stw r25,0(r3)
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
|
2005-09-26 06:04:21 +00:00
|
|
|
/*
|
|
|
|
* Decide what sort of machine this is and initialize the MMU.
|
|
|
|
*/
|
2011-07-25 11:29:33 +00:00
|
|
|
mr r3,r30
|
|
|
|
mr r4,r31
|
2005-09-26 06:04:21 +00:00
|
|
|
bl machine_init
|
|
|
|
bl MMU_init
|
|
|
|
|
|
|
|
/* Setup PTE pointers for the Abatron bdiGDB */
|
|
|
|
lis r6, swapper_pg_dir@h
|
|
|
|
ori r6, r6, swapper_pg_dir@l
|
|
|
|
lis r5, abatron_pteptrs@h
|
|
|
|
ori r5, r5, abatron_pteptrs@l
|
2019-09-20 09:45:40 +00:00
|
|
|
lis r3, kernstart_virt_addr@ha
|
|
|
|
lwz r4, kernstart_virt_addr@l(r3)
|
2005-09-26 06:04:21 +00:00
|
|
|
stw r5, 0(r4) /* Save abatron_pteptrs at a fixed location */
|
|
|
|
stw r6, 0(r5)
|
|
|
|
|
|
|
|
/* Let's move on */
|
|
|
|
lis r4,start_kernel@h
|
|
|
|
ori r4,r4,start_kernel@l
|
|
|
|
lis r3,MSR_KERNEL@h
|
|
|
|
ori r3,r3,MSR_KERNEL@l
|
|
|
|
mtspr SPRN_SRR0,r4
|
|
|
|
mtspr SPRN_SRR1,r3
|
|
|
|
rfi /* change context and jump to start_kernel */
|
|
|
|
|
|
|
|
/* Macros to hide the PTE size differences
|
|
|
|
*
|
|
|
|
* FIND_PTE -- walks the page tables given EA & pgdir pointer
|
|
|
|
* r10 -- EA of fault
|
|
|
|
* r11 -- PGDIR pointer
|
|
|
|
* r12 -- free
|
|
|
|
* label 2: is the bailout case
|
|
|
|
*
|
|
|
|
* if we find the pte (fall through):
|
|
|
|
* r11 is low pte word
|
|
|
|
* r12 is pointer to the pte
|
2011-06-28 09:54:48 +00:00
|
|
|
* r10 is the pshift from the PGD, if we're a hugepage
|
2005-09-26 06:04:21 +00:00
|
|
|
*/
|
|
|
|
#ifdef CONFIG_PTE_64BIT
|
2011-06-28 09:54:48 +00:00
|
|
|
#ifdef CONFIG_HUGETLB_PAGE
|
|
|
|
#define FIND_PTE \
|
|
|
|
rlwinm r12, r10, 13, 19, 29; /* Compute pgdir/pmd offset */ \
|
|
|
|
lwzx r11, r12, r11; /* Get pgd/pmd entry */ \
|
|
|
|
rlwinm. r12, r11, 0, 0, 20; /* Extract pt base address */ \
|
|
|
|
blt 1000f; /* Normal non-huge page */ \
|
|
|
|
beq 2f; /* Bail if no table */ \
|
|
|
|
oris r11, r11, PD_HUGE@h; /* Put back address bit */ \
|
|
|
|
andi. r10, r11, HUGEPD_SHIFT_MASK@l; /* extract size field */ \
|
|
|
|
xor r12, r10, r11; /* drop size bits from pointer */ \
|
|
|
|
b 1001f; \
|
|
|
|
1000: rlwimi r12, r10, 23, 20, 28; /* Compute pte address */ \
|
|
|
|
li r10, 0; /* clear r10 */ \
|
|
|
|
1001: lwz r11, 4(r12); /* Get pte entry */
|
|
|
|
#else
|
2005-09-26 06:04:21 +00:00
|
|
|
#define FIND_PTE \
|
2007-09-27 13:43:35 +00:00
|
|
|
rlwinm r12, r10, 13, 19, 29; /* Compute pgdir/pmd offset */ \
|
2005-09-26 06:04:21 +00:00
|
|
|
lwzx r11, r12, r11; /* Get pgd/pmd entry */ \
|
|
|
|
rlwinm. r12, r11, 0, 0, 20; /* Extract pt base address */ \
|
|
|
|
beq 2f; /* Bail if no table */ \
|
|
|
|
rlwimi r12, r10, 23, 20, 28; /* Compute pte address */ \
|
|
|
|
lwz r11, 4(r12); /* Get pte entry */
|
2011-06-28 09:54:48 +00:00
|
|
|
#endif /* HUGEPAGE */
|
|
|
|
#else /* !PTE_64BIT */
|
2005-09-26 06:04:21 +00:00
|
|
|
#define FIND_PTE \
|
|
|
|
rlwimi r11, r10, 12, 20, 29; /* Create L1 (pgdir/pmd) address */ \
|
|
|
|
lwz r11, 0(r11); /* Get L1 entry */ \
|
|
|
|
rlwinm. r12, r11, 0, 0, 19; /* Extract L2 (pte) base address */ \
|
|
|
|
beq 2f; /* Bail if no table */ \
|
|
|
|
rlwimi r12, r10, 22, 20, 29; /* Compute PTE address */ \
|
|
|
|
lwz r11, 0(r12); /* Get Linux PTE */
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Interrupt vector entry code
|
|
|
|
*
|
|
|
|
* The Book E MMUs are always on so we don't need to handle
|
|
|
|
* interrupts in real mode as with previous PPC processors. In
|
|
|
|
* this case we handle interrupts in the kernel virtual address
|
|
|
|
* space.
|
|
|
|
*
|
|
|
|
* Interrupt vectors are dynamically placed relative to the
|
|
|
|
* interrupt prefix as determined by the address of interrupt_base.
|
|
|
|
* The interrupt vectors offsets are programmed using the labels
|
|
|
|
* for each interrupt vector entry.
|
|
|
|
*
|
|
|
|
* Interrupt vectors must be aligned on a 16 byte boundary.
|
|
|
|
* We align on a 32 byte cache line boundary for good measure.
|
|
|
|
*/
|
|
|
|
|
|
|
|
interrupt_base:
|
|
|
|
/* Critical Input Interrupt */
|
2011-12-20 15:34:40 +00:00
|
|
|
CRITICAL_EXCEPTION(0x0100, CRITICAL, CriticalInput, unknown_exception)
|
2005-09-26 06:04:21 +00:00
|
|
|
|
|
|
|
/* Machine Check Interrupt */
|
2005-10-01 08:43:42 +00:00
|
|
|
MCHECK_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
|
2005-09-26 06:04:21 +00:00
|
|
|
|
|
|
|
/* Data Storage Interrupt */
|
|
|
|
START_EXCEPTION(DataStorage)
|
2021-03-12 12:50:38 +00:00
|
|
|
NORMAL_EXCEPTION_PROLOG(0x300, DATA_STORAGE)
|
2021-01-30 13:08:17 +00:00
|
|
|
mfspr r5,SPRN_ESR /* Grab the ESR, save it */
|
2008-07-09 15:03:28 +00:00
|
|
|
stw r5,_ESR(r11)
|
2021-01-30 13:08:17 +00:00
|
|
|
mfspr r4,SPRN_DEAR /* Grab the DEAR, save it */
|
|
|
|
stw r4, _DEAR(r11)
|
2008-07-09 15:03:28 +00:00
|
|
|
andis. r10,r5,(ESR_ILK|ESR_DLK)@h
|
|
|
|
bne 1f
|
2021-03-12 12:50:41 +00:00
|
|
|
prepare_transfer_to_handler
|
|
|
|
bl do_page_fault
|
|
|
|
b interrupt_return
|
2008-07-09 15:03:28 +00:00
|
|
|
1:
|
2021-03-12 12:50:41 +00:00
|
|
|
prepare_transfer_to_handler
|
|
|
|
bl CacheLockingException
|
|
|
|
b interrupt_return
|
2005-09-26 06:04:21 +00:00
|
|
|
|
|
|
|
/* Instruction Storage Interrupt */
|
|
|
|
INSTRUCTION_STORAGE_EXCEPTION
|
|
|
|
|
|
|
|
/* External Input Interrupt */
|
2021-03-12 12:50:42 +00:00
|
|
|
EXCEPTION(0x0500, EXTERNAL, ExternalInput, do_IRQ)
|
2005-09-26 06:04:21 +00:00
|
|
|
|
|
|
|
/* Alignment Interrupt */
|
|
|
|
ALIGNMENT_EXCEPTION
|
|
|
|
|
|
|
|
/* Program Interrupt */
|
|
|
|
PROGRAM_EXCEPTION
|
|
|
|
|
|
|
|
/* Floating Point Unavailable Interrupt */
|
|
|
|
#ifdef CONFIG_PPC_FPU
|
|
|
|
FP_UNAVAILABLE_EXCEPTION
|
|
|
|
#else
|
2021-03-12 12:50:42 +00:00
|
|
|
EXCEPTION(0x0800, FP_UNAVAIL, FloatingPointUnavailable, unknown_exception)
|
2005-09-26 06:04:21 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
/* System Call Interrupt */
|
|
|
|
START_EXCEPTION(SystemCall)
|
2019-05-23 08:39:27 +00:00
|
|
|
SYSCALL_ENTRY 0xc00 BOOKE_INTERRUPT_SYSCALL SPRN_SRR1
|
2005-09-26 06:04:21 +00:00
|
|
|
|
2011-03-31 01:57:33 +00:00
|
|
|
/* Auxiliary Processor Unavailable Interrupt */
|
2021-03-12 12:50:42 +00:00
|
|
|
EXCEPTION(0x2900, AP_UNAVAIL, AuxillaryProcessorUnavailable, unknown_exception)
|
2005-09-26 06:04:21 +00:00
|
|
|
|
|
|
|
/* Decrementer Interrupt */
|
|
|
|
DECREMENTER_EXCEPTION
|
|
|
|
|
|
|
|
/* Fixed Internal Timer Interrupt */
|
|
|
|
/* TODO: Add FIT support */
|
2021-03-12 12:50:42 +00:00
|
|
|
EXCEPTION(0x3100, FIT, FixedIntervalTimer, unknown_exception)
|
2005-09-26 06:04:21 +00:00
|
|
|
|
|
|
|
/* Watchdog Timer Interrupt */
|
|
|
|
#ifdef CONFIG_BOOKE_WDT
|
2011-12-20 15:34:40 +00:00
|
|
|
CRITICAL_EXCEPTION(0x3200, WATCHDOG, WatchdogTimer, WatchdogException)
|
2005-09-26 06:04:21 +00:00
|
|
|
#else
|
2011-12-20 15:34:40 +00:00
|
|
|
CRITICAL_EXCEPTION(0x3200, WATCHDOG, WatchdogTimer, unknown_exception)
|
2005-09-26 06:04:21 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Data TLB Error Interrupt */
|
|
|
|
START_EXCEPTION(DataTLBError)
|
2009-07-14 20:52:54 +00:00
|
|
|
mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */
|
2011-04-22 21:48:27 +00:00
|
|
|
mfspr r10, SPRN_SPRG_THREAD
|
|
|
|
stw r11, THREAD_NORMSAVE(0)(r10)
|
2011-12-20 15:34:47 +00:00
|
|
|
#ifdef CONFIG_KVM_BOOKE_HV
|
|
|
|
BEGIN_FTR_SECTION
|
|
|
|
mfspr r11, SPRN_SRR1
|
|
|
|
END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
|
|
|
|
#endif
|
2011-04-22 21:48:27 +00:00
|
|
|
stw r12, THREAD_NORMSAVE(1)(r10)
|
|
|
|
stw r13, THREAD_NORMSAVE(2)(r10)
|
|
|
|
mfcr r13
|
|
|
|
stw r13, THREAD_NORMSAVE(3)(r10)
|
2011-12-20 15:34:47 +00:00
|
|
|
DO_KVM BOOKE_INTERRUPT_DTLB_MISS SPRN_SRR1
|
2018-12-12 14:03:06 +00:00
|
|
|
START_BTB_FLUSH_SECTION
|
|
|
|
mfspr r11, SPRN_SRR1
|
|
|
|
andi. r10,r11,MSR_PR
|
|
|
|
beq 1f
|
|
|
|
BTB_FLUSH(r10)
|
|
|
|
1:
|
|
|
|
END_BTB_FLUSH_SECTION
|
2005-09-26 06:04:21 +00:00
|
|
|
mfspr r10, SPRN_DEAR /* Get faulting address */
|
|
|
|
|
|
|
|
/* If we are faulting a kernel address, we have to use the
|
|
|
|
* kernel page tables.
|
|
|
|
*/
|
2007-10-11 18:36:52 +00:00
|
|
|
lis r11, PAGE_OFFSET@h
|
2005-09-26 06:04:21 +00:00
|
|
|
cmplw 5, r10, r11
|
|
|
|
blt 5, 3f
|
|
|
|
lis r11, swapper_pg_dir@h
|
|
|
|
ori r11, r11, swapper_pg_dir@l
|
|
|
|
|
|
|
|
mfspr r12,SPRN_MAS1 /* Set TID to 0 */
|
|
|
|
rlwinm r12,r12,0,16,1
|
|
|
|
mtspr SPRN_MAS1,r12
|
|
|
|
|
|
|
|
b 4f
|
|
|
|
|
|
|
|
/* Get the PGD for the current thread */
|
|
|
|
3:
|
2009-07-14 20:52:54 +00:00
|
|
|
mfspr r11,SPRN_SPRG_THREAD
|
2005-09-26 06:04:21 +00:00
|
|
|
lwz r11,PGDIR(r11)
|
|
|
|
|
2021-10-19 07:29:31 +00:00
|
|
|
#ifdef CONFIG_PPC_KUAP
|
|
|
|
mfspr r12, SPRN_MAS1
|
|
|
|
rlwinm. r12,r12,0,0x3fff0000
|
|
|
|
beq 2f /* KUAP fault */
|
|
|
|
#endif
|
|
|
|
|
2005-09-26 06:04:21 +00:00
|
|
|
4:
|
2008-07-09 15:03:28 +00:00
|
|
|
/* Mask of required permission bits. Note that while we
|
|
|
|
* do copy ESR:ST to _PAGE_RW position as trying to write
|
|
|
|
* to an RO page is pretty common, we don't do it with
|
|
|
|
* _PAGE_DIRTY. We could do it, but it's a fairly rare
|
|
|
|
* event so I'd rather take the overhead when it happens
|
|
|
|
* rather than adding an instruction here. We should measure
|
|
|
|
* whether the whole thing is worth it in the first place
|
|
|
|
* as we could avoid loading SPRN_ESR completely in the first
|
|
|
|
* place...
|
|
|
|
*
|
|
|
|
* TODO: Is it worth doing that mfspr & rlwimi in the first
|
|
|
|
* place or can we save a couple of instructions here ?
|
|
|
|
*/
|
|
|
|
mfspr r12,SPRN_ESR
|
2009-09-01 15:48:42 +00:00
|
|
|
#ifdef CONFIG_PTE_64BIT
|
|
|
|
li r13,_PAGE_PRESENT
|
|
|
|
oris r13,r13,_PAGE_ACCESSED@h
|
|
|
|
#else
|
2008-07-09 15:03:28 +00:00
|
|
|
li r13,_PAGE_PRESENT|_PAGE_ACCESSED
|
2009-09-01 15:48:42 +00:00
|
|
|
#endif
|
2008-07-09 15:03:28 +00:00
|
|
|
rlwimi r13,r12,11,29,29
|
|
|
|
|
2005-09-26 06:04:21 +00:00
|
|
|
FIND_PTE
|
2008-07-09 15:03:28 +00:00
|
|
|
andc. r13,r13,r11 /* Check permission */
|
2005-09-26 06:04:21 +00:00
|
|
|
|
|
|
|
#ifdef CONFIG_PTE_64BIT
|
2008-07-16 21:17:08 +00:00
|
|
|
#ifdef CONFIG_SMP
|
2011-06-28 09:54:48 +00:00
|
|
|
subf r13,r11,r12 /* create false data dep */
|
|
|
|
lwzx r13,r11,r13 /* Get upper pte bits */
|
2008-07-16 21:17:08 +00:00
|
|
|
#else
|
|
|
|
lwz r13,0(r12) /* Get upper pte bits */
|
|
|
|
#endif
|
2005-09-26 06:04:21 +00:00
|
|
|
#endif
|
|
|
|
|
2021-03-14 22:04:36 +00:00
|
|
|
bne 2f /* Bail if permission/valid mismatch */
|
2008-07-16 21:17:08 +00:00
|
|
|
|
|
|
|
/* Jump to common tlb load */
|
2005-09-26 06:04:21 +00:00
|
|
|
b finish_tlb_load
|
|
|
|
2:
|
|
|
|
/* The bailout. Restore registers to pre-exception conditions
|
|
|
|
* and call the heavyweights to help us out.
|
|
|
|
*/
|
2011-04-22 21:48:27 +00:00
|
|
|
mfspr r10, SPRN_SPRG_THREAD
|
|
|
|
lwz r11, THREAD_NORMSAVE(3)(r10)
|
2005-09-26 06:04:21 +00:00
|
|
|
mtcr r11
|
2011-04-22 21:48:27 +00:00
|
|
|
lwz r13, THREAD_NORMSAVE(2)(r10)
|
|
|
|
lwz r12, THREAD_NORMSAVE(1)(r10)
|
|
|
|
lwz r11, THREAD_NORMSAVE(0)(r10)
|
2009-07-14 20:52:54 +00:00
|
|
|
mfspr r10, SPRN_SPRG_RSCRATCH0
|
2008-07-09 15:03:28 +00:00
|
|
|
b DataStorage
|
2005-09-26 06:04:21 +00:00
|
|
|
|
|
|
|
/* Instruction TLB Error Interrupt */
|
|
|
|
/*
|
|
|
|
* Nearly the same as above, except we get our
|
|
|
|
* information from different registers and bailout
|
|
|
|
* to a different point.
|
|
|
|
*/
|
|
|
|
START_EXCEPTION(InstructionTLBError)
|
2009-07-14 20:52:54 +00:00
|
|
|
mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */
|
2011-04-22 21:48:27 +00:00
|
|
|
mfspr r10, SPRN_SPRG_THREAD
|
|
|
|
stw r11, THREAD_NORMSAVE(0)(r10)
|
2011-12-20 15:34:47 +00:00
|
|
|
#ifdef CONFIG_KVM_BOOKE_HV
|
|
|
|
BEGIN_FTR_SECTION
|
|
|
|
mfspr r11, SPRN_SRR1
|
|
|
|
END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
|
|
|
|
#endif
|
2011-04-22 21:48:27 +00:00
|
|
|
stw r12, THREAD_NORMSAVE(1)(r10)
|
|
|
|
stw r13, THREAD_NORMSAVE(2)(r10)
|
|
|
|
mfcr r13
|
|
|
|
stw r13, THREAD_NORMSAVE(3)(r10)
|
2011-12-20 15:34:47 +00:00
|
|
|
DO_KVM BOOKE_INTERRUPT_ITLB_MISS SPRN_SRR1
|
2018-12-12 14:03:06 +00:00
|
|
|
START_BTB_FLUSH_SECTION
|
|
|
|
mfspr r11, SPRN_SRR1
|
|
|
|
andi. r10,r11,MSR_PR
|
|
|
|
beq 1f
|
|
|
|
BTB_FLUSH(r10)
|
|
|
|
1:
|
|
|
|
END_BTB_FLUSH_SECTION
|
|
|
|
|
2005-09-26 06:04:21 +00:00
|
|
|
mfspr r10, SPRN_SRR0 /* Get faulting address */
|
|
|
|
|
|
|
|
/* If we are faulting a kernel address, we have to use the
|
|
|
|
* kernel page tables.
|
|
|
|
*/
|
2007-10-11 18:36:52 +00:00
|
|
|
lis r11, PAGE_OFFSET@h
|
2005-09-26 06:04:21 +00:00
|
|
|
cmplw 5, r10, r11
|
|
|
|
blt 5, 3f
|
|
|
|
lis r11, swapper_pg_dir@h
|
|
|
|
ori r11, r11, swapper_pg_dir@l
|
|
|
|
|
|
|
|
mfspr r12,SPRN_MAS1 /* Set TID to 0 */
|
|
|
|
rlwinm r12,r12,0,16,1
|
|
|
|
mtspr SPRN_MAS1,r12
|
|
|
|
|
2010-05-07 08:38:34 +00:00
|
|
|
/* Make up the required permissions for kernel code */
|
|
|
|
#ifdef CONFIG_PTE_64BIT
|
|
|
|
li r13,_PAGE_PRESENT | _PAGE_BAP_SX
|
|
|
|
oris r13,r13,_PAGE_ACCESSED@h
|
|
|
|
#else
|
|
|
|
li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
|
|
|
|
#endif
|
2005-09-26 06:04:21 +00:00
|
|
|
b 4f
|
|
|
|
|
|
|
|
/* Get the PGD for the current thread */
|
|
|
|
3:
|
2009-07-14 20:52:54 +00:00
|
|
|
mfspr r11,SPRN_SPRG_THREAD
|
2005-09-26 06:04:21 +00:00
|
|
|
lwz r11,PGDIR(r11)
|
|
|
|
|
2021-10-19 07:29:31 +00:00
|
|
|
#ifdef CONFIG_PPC_KUAP
|
|
|
|
mfspr r12, SPRN_MAS1
|
|
|
|
rlwinm. r12,r12,0,0x3fff0000
|
|
|
|
beq 2f /* KUAP fault */
|
|
|
|
#endif
|
|
|
|
|
2010-05-07 08:38:34 +00:00
|
|
|
/* Make up the required permissions for user code */
|
2009-09-01 15:48:42 +00:00
|
|
|
#ifdef CONFIG_PTE_64BIT
|
2010-05-07 08:38:34 +00:00
|
|
|
li r13,_PAGE_PRESENT | _PAGE_BAP_UX
|
2009-09-01 15:48:42 +00:00
|
|
|
oris r13,r13,_PAGE_ACCESSED@h
|
|
|
|
#else
|
2009-08-18 19:00:34 +00:00
|
|
|
li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
|
2009-09-01 15:48:42 +00:00
|
|
|
#endif
|
2008-07-09 15:03:28 +00:00
|
|
|
|
2010-05-07 08:38:34 +00:00
|
|
|
4:
|
2005-09-26 06:04:21 +00:00
|
|
|
FIND_PTE
|
2008-07-09 15:03:28 +00:00
|
|
|
andc. r13,r13,r11 /* Check permission */
|
2008-07-16 21:17:08 +00:00
|
|
|
|
|
|
|
#ifdef CONFIG_PTE_64BIT
|
|
|
|
#ifdef CONFIG_SMP
|
2011-06-28 09:54:48 +00:00
|
|
|
subf r13,r11,r12 /* create false data dep */
|
|
|
|
lwzx r13,r11,r13 /* Get upper pte bits */
|
2008-07-16 21:17:08 +00:00
|
|
|
#else
|
|
|
|
lwz r13,0(r12) /* Get upper pte bits */
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
|
2021-03-14 22:04:36 +00:00
|
|
|
bne 2f /* Bail if permission mismatch */
|
2005-09-26 06:04:21 +00:00
|
|
|
|
|
|
|
/* Jump to common TLB load point */
|
|
|
|
b finish_tlb_load
|
|
|
|
|
|
|
|
2:
|
|
|
|
/* The bailout. Restore registers to pre-exception conditions
|
|
|
|
* and call the heavyweights to help us out.
|
|
|
|
*/
|
2011-04-22 21:48:27 +00:00
|
|
|
mfspr r10, SPRN_SPRG_THREAD
|
|
|
|
lwz r11, THREAD_NORMSAVE(3)(r10)
|
2005-09-26 06:04:21 +00:00
|
|
|
mtcr r11
|
2011-04-22 21:48:27 +00:00
|
|
|
lwz r13, THREAD_NORMSAVE(2)(r10)
|
|
|
|
lwz r12, THREAD_NORMSAVE(1)(r10)
|
|
|
|
lwz r11, THREAD_NORMSAVE(0)(r10)
|
2009-07-14 20:52:54 +00:00
|
|
|
mfspr r10, SPRN_SPRG_RSCRATCH0
|
2005-09-26 06:04:21 +00:00
|
|
|
b InstructionStorage
|
|
|
|
|
2020-11-17 05:07:58 +00:00
|
|
|
/* Define SPE handlers for e500v2 */
|
2005-09-26 06:04:21 +00:00
|
|
|
#ifdef CONFIG_SPE
|
|
|
|
/* SPE Unavailable */
|
|
|
|
START_EXCEPTION(SPEUnavailable)
|
2021-03-12 12:50:38 +00:00
|
|
|
NORMAL_EXCEPTION_PROLOG(0x2010, SPE_UNAVAIL)
|
2012-03-01 01:20:19 +00:00
|
|
|
beq 1f
|
|
|
|
bl load_up_spe
|
|
|
|
b fast_exception_return
|
2021-03-12 12:50:41 +00:00
|
|
|
1: prepare_transfer_to_handler
|
|
|
|
bl KernelSPE
|
|
|
|
b interrupt_return
|
2014-08-20 13:09:03 +00:00
|
|
|
#elif defined(CONFIG_SPE_POSSIBLE)
|
2021-03-12 12:50:42 +00:00
|
|
|
EXCEPTION(0x2020, SPE_UNAVAIL, SPEUnavailable, unknown_exception)
|
2014-08-20 13:09:03 +00:00
|
|
|
#endif /* CONFIG_SPE_POSSIBLE */
|
2005-09-26 06:04:21 +00:00
|
|
|
|
|
|
|
/* SPE Floating Point Data */
|
|
|
|
#ifdef CONFIG_SPE
|
2021-03-12 12:50:40 +00:00
|
|
|
START_EXCEPTION(SPEFloatingPointData)
|
|
|
|
NORMAL_EXCEPTION_PROLOG(0x2030, SPE_FP_DATA)
|
|
|
|
prepare_transfer_to_handler
|
|
|
|
bl SPEFloatingPointException
|
|
|
|
REST_NVGPRS(r1)
|
|
|
|
b interrupt_return
|
2005-09-26 06:04:21 +00:00
|
|
|
|
|
|
|
/* SPE Floating Point Round */
|
2021-03-12 12:50:40 +00:00
|
|
|
START_EXCEPTION(SPEFloatingPointRound)
|
|
|
|
NORMAL_EXCEPTION_PROLOG(0x2050, SPE_FP_ROUND)
|
|
|
|
prepare_transfer_to_handler
|
|
|
|
bl SPEFloatingPointRoundException
|
|
|
|
REST_NVGPRS(r1)
|
|
|
|
b interrupt_return
|
2014-08-20 13:09:03 +00:00
|
|
|
#elif defined(CONFIG_SPE_POSSIBLE)
|
2021-03-12 12:50:42 +00:00
|
|
|
EXCEPTION(0x2040, SPE_FP_DATA, SPEFloatingPointData, unknown_exception)
|
|
|
|
EXCEPTION(0x2050, SPE_FP_ROUND, SPEFloatingPointRound, unknown_exception)
|
2014-08-20 13:09:03 +00:00
|
|
|
#endif /* CONFIG_SPE_POSSIBLE */
|
|
|
|
|
2005-09-26 06:04:21 +00:00
|
|
|
|
|
|
|
/* Performance Monitor */
|
2011-12-20 15:34:40 +00:00
|
|
|
EXCEPTION(0x2060, PERFORMANCE_MONITOR, PerformanceMonitor, \
|
2021-03-12 12:50:42 +00:00
|
|
|
performance_monitor_exception)
|
2005-09-26 06:04:21 +00:00
|
|
|
|
2021-03-12 12:50:42 +00:00
|
|
|
EXCEPTION(0x2070, DOORBELL, Doorbell, doorbell_exception)
|
2009-02-12 13:54:53 +00:00
|
|
|
|
2011-12-20 15:34:40 +00:00
|
|
|
CRITICAL_EXCEPTION(0x2080, DOORBELL_CRITICAL, \
|
|
|
|
CriticalDoorbell, unknown_exception)
|
2005-09-26 06:04:21 +00:00
|
|
|
|
|
|
|
/* Debug Interrupt */
|
2008-04-09 11:06:11 +00:00
|
|
|
DEBUG_DEBUG_EXCEPTION
|
|
|
|
DEBUG_CRIT_EXCEPTION
|
2005-09-26 06:04:21 +00:00
|
|
|
|
2011-12-20 15:34:47 +00:00
|
|
|
GUEST_DOORBELL_EXCEPTION
|
|
|
|
|
|
|
|
CRITICAL_EXCEPTION(0, GUEST_DBELL_CRIT, CriticalGuestDoorbell, \
|
|
|
|
unknown_exception)
|
|
|
|
|
|
|
|
/* Hypercall */
|
2021-03-12 12:50:42 +00:00
|
|
|
EXCEPTION(0, HV_SYSCALL, Hypercall, unknown_exception)
|
2011-12-20 15:34:47 +00:00
|
|
|
|
|
|
|
/* Embedded Hypervisor Privilege */
|
2021-03-12 12:50:42 +00:00
|
|
|
EXCEPTION(0, HV_PRIV, Ehvpriv, unknown_exception)
|
2011-12-20 15:34:47 +00:00
|
|
|
|
2013-04-29 22:18:11 +00:00
|
|
|
interrupt_end:
|
|
|
|
|
2005-09-26 06:04:21 +00:00
|
|
|
/*
|
|
|
|
* Local functions
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Both the instruction and data TLB miss get to this
|
|
|
|
* point to load the TLB.
|
2011-06-28 09:54:48 +00:00
|
|
|
* r10 - tsize encoding (if HUGETLB_PAGE) or available to use
|
2007-09-27 13:43:35 +00:00
|
|
|
* r11 - TLB (info from Linux PTE)
|
2008-07-09 15:03:28 +00:00
|
|
|
* r12 - available to use
|
|
|
|
* r13 - upper bits of PTE (if PTE_64BIT) or available to use
|
2007-10-11 18:36:52 +00:00
|
|
|
* CR5 - results of addr >= PAGE_OFFSET
|
2005-09-26 06:04:21 +00:00
|
|
|
* MAS0, MAS1 - loaded with proper value when we get here
|
|
|
|
* MAS2, MAS3 - will need additional info from Linux PTE
|
|
|
|
* Upon exit, we reload everything and RFI.
|
|
|
|
*/
|
|
|
|
finish_tlb_load:
|
2011-06-28 09:54:48 +00:00
|
|
|
#ifdef CONFIG_HUGETLB_PAGE
|
|
|
|
cmpwi 6, r10, 0 /* check for huge page */
|
|
|
|
beq 6, finish_tlb_load_cont /* !huge */
|
|
|
|
|
|
|
|
/* Alas, we need more scratch registers for hugepages */
|
|
|
|
mfspr r12, SPRN_SPRG_THREAD
|
|
|
|
stw r14, THREAD_NORMSAVE(4)(r12)
|
|
|
|
stw r15, THREAD_NORMSAVE(5)(r12)
|
|
|
|
stw r16, THREAD_NORMSAVE(6)(r12)
|
|
|
|
stw r17, THREAD_NORMSAVE(7)(r12)
|
|
|
|
|
|
|
|
/* Get the next_tlbcam_idx percpu var */
|
|
|
|
#ifdef CONFIG_SMP
|
2019-01-31 10:09:04 +00:00
|
|
|
lwz r15, TASK_CPU-THREAD(r12)
|
2011-06-28 09:54:48 +00:00
|
|
|
lis r14, __per_cpu_offset@h
|
|
|
|
ori r14, r14, __per_cpu_offset@l
|
|
|
|
rlwinm r15, r15, 2, 0, 29
|
|
|
|
lwzx r16, r14, r15
|
|
|
|
#else
|
|
|
|
li r16, 0
|
|
|
|
#endif
|
|
|
|
lis r17, next_tlbcam_idx@h
|
|
|
|
ori r17, r17, next_tlbcam_idx@l
|
|
|
|
add r17, r17, r16 /* r17 = *next_tlbcam_idx */
|
|
|
|
lwz r15, 0(r17) /* r15 = next_tlbcam_idx */
|
|
|
|
|
|
|
|
lis r14, MAS0_TLBSEL(1)@h /* select TLB1 (TLBCAM) */
|
|
|
|
rlwimi r14, r15, 16, 4, 15 /* next_tlbcam_idx entry */
|
|
|
|
mtspr SPRN_MAS0, r14
|
|
|
|
|
|
|
|
/* Extract TLB1CFG(NENTRY) */
|
|
|
|
mfspr r16, SPRN_TLB1CFG
|
|
|
|
andi. r16, r16, 0xfff
|
|
|
|
|
|
|
|
/* Update next_tlbcam_idx, wrapping when necessary */
|
|
|
|
addi r15, r15, 1
|
|
|
|
cmpw r15, r16
|
|
|
|
blt 100f
|
|
|
|
lis r14, tlbcam_index@h
|
|
|
|
ori r14, r14, tlbcam_index@l
|
|
|
|
lwz r15, 0(r14)
|
|
|
|
100: stw r15, 0(r17)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Calc MAS1_TSIZE from r10 (which has pshift encoded)
|
|
|
|
* tlb_enc = (pshift - 10).
|
|
|
|
*/
|
|
|
|
subi r15, r10, 10
|
|
|
|
mfspr r16, SPRN_MAS1
|
|
|
|
rlwimi r16, r15, 7, 20, 24
|
|
|
|
mtspr SPRN_MAS1, r16
|
|
|
|
|
|
|
|
/* copy the pshift for use later */
|
|
|
|
mr r14, r10
|
|
|
|
|
|
|
|
/* fall through */
|
|
|
|
|
|
|
|
#endif /* CONFIG_HUGETLB_PAGE */
|
|
|
|
|
2005-09-26 06:04:21 +00:00
|
|
|
/*
|
|
|
|
* We set execute, because we don't have the granularity to
|
|
|
|
* properly set this at the page level (Linux problem).
|
|
|
|
* Many of these bits are software only. Bits we don't set
|
|
|
|
* here we (properly should) assume have the appropriate value.
|
|
|
|
*/
|
2011-06-28 09:54:48 +00:00
|
|
|
finish_tlb_load_cont:
|
2009-09-01 15:48:42 +00:00
|
|
|
#ifdef CONFIG_PTE_64BIT
|
|
|
|
rlwinm r12, r11, 32-2, 26, 31 /* Move in perm bits */
|
|
|
|
andi. r10, r11, _PAGE_DIRTY
|
|
|
|
bne 1f
|
|
|
|
li r10, MAS3_SW | MAS3_UW
|
|
|
|
andc r12, r12, r10
|
|
|
|
1: rlwimi r12, r13, 20, 0, 11 /* grab RPN[32:43] */
|
|
|
|
rlwimi r12, r11, 20, 12, 19 /* grab RPN[44:51] */
|
2011-06-28 09:54:48 +00:00
|
|
|
2: mtspr SPRN_MAS3, r12
|
2009-09-01 15:48:42 +00:00
|
|
|
BEGIN_MMU_FTR_SECTION
|
|
|
|
srwi r10, r13, 12 /* grab RPN[12:31] */
|
|
|
|
mtspr SPRN_MAS7, r10
|
|
|
|
END_MMU_FTR_SECTION_IFSET(MMU_FTR_BIG_PHYS)
|
|
|
|
#else
|
2009-08-18 19:00:34 +00:00
|
|
|
li r10, (_PAGE_EXEC | _PAGE_PRESENT)
|
2011-06-28 09:54:48 +00:00
|
|
|
mr r13, r11
|
2008-07-09 15:03:28 +00:00
|
|
|
rlwimi r10, r11, 31, 29, 29 /* extract _PAGE_DIRTY into SW */
|
|
|
|
and r12, r11, r10
|
2005-09-26 06:04:21 +00:00
|
|
|
andi. r10, r11, _PAGE_USER /* Test for _PAGE_USER */
|
2008-07-09 15:03:28 +00:00
|
|
|
slwi r10, r12, 1
|
|
|
|
or r10, r10, r12
|
2021-10-19 07:29:15 +00:00
|
|
|
rlwinm r10, r10, 0, ~_PAGE_EXEC /* Clear SX on user pages */
|
2008-07-09 15:03:28 +00:00
|
|
|
iseleq r12, r12, r10
|
2011-06-28 09:54:48 +00:00
|
|
|
rlwimi r13, r12, 0, 20, 31 /* Get RPN from PTE, merge w/ perms */
|
|
|
|
mtspr SPRN_MAS3, r13
|
2005-09-26 06:04:21 +00:00
|
|
|
#endif
|
2011-06-28 09:54:48 +00:00
|
|
|
|
|
|
|
mfspr r12, SPRN_MAS2
|
|
|
|
#ifdef CONFIG_PTE_64BIT
|
|
|
|
rlwimi r12, r11, 32-19, 27, 31 /* extract WIMGE from pte */
|
|
|
|
#else
|
|
|
|
rlwimi r12, r11, 26, 27, 31 /* extract WIMGE from pte */
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_HUGETLB_PAGE
|
|
|
|
beq 6, 3f /* don't mask if page isn't huge */
|
|
|
|
li r13, 1
|
|
|
|
slw r13, r13, r14
|
|
|
|
subi r13, r13, 1
|
|
|
|
rlwinm r13, r13, 0, 0, 19 /* bottom bits used for WIMGE/etc */
|
|
|
|
andc r12, r12, r13 /* mask off ea bits within the page */
|
|
|
|
#endif
|
|
|
|
3: mtspr SPRN_MAS2, r12
|
|
|
|
|
|
|
|
tlb_write_entry:
|
2005-09-26 06:04:21 +00:00
|
|
|
tlbwe
|
|
|
|
|
|
|
|
/* Done...restore registers and get out of here. */
|
2011-04-22 21:48:27 +00:00
|
|
|
mfspr r10, SPRN_SPRG_THREAD
|
2011-06-28 09:54:48 +00:00
|
|
|
#ifdef CONFIG_HUGETLB_PAGE
|
|
|
|
beq 6, 8f /* skip restore for 4k page faults */
|
|
|
|
lwz r14, THREAD_NORMSAVE(4)(r10)
|
|
|
|
lwz r15, THREAD_NORMSAVE(5)(r10)
|
|
|
|
lwz r16, THREAD_NORMSAVE(6)(r10)
|
|
|
|
lwz r17, THREAD_NORMSAVE(7)(r10)
|
|
|
|
#endif
|
|
|
|
8: lwz r11, THREAD_NORMSAVE(3)(r10)
|
2005-09-26 06:04:21 +00:00
|
|
|
mtcr r11
|
2011-04-22 21:48:27 +00:00
|
|
|
lwz r13, THREAD_NORMSAVE(2)(r10)
|
|
|
|
lwz r12, THREAD_NORMSAVE(1)(r10)
|
|
|
|
lwz r11, THREAD_NORMSAVE(0)(r10)
|
2009-07-14 20:52:54 +00:00
|
|
|
mfspr r10, SPRN_SPRG_RSCRATCH0
|
2005-09-26 06:04:21 +00:00
|
|
|
rfi /* Force context change */
|
|
|
|
|
|
|
|
#ifdef CONFIG_SPE
|
|
|
|
/* Note that the SPE support is closely modeled after the AltiVec
|
|
|
|
* support. Changes to one are likely to be applicable to the
|
|
|
|
* other! */
|
2012-03-01 01:20:19 +00:00
|
|
|
_GLOBAL(load_up_spe)
|
2005-09-26 06:04:21 +00:00
|
|
|
/*
|
|
|
|
* Disable SPE for the task which had SPE previously,
|
|
|
|
* and save its SPE registers in its thread_struct.
|
|
|
|
* Enables SPE for use in the kernel on return.
|
|
|
|
* On SMP we know the SPE units are free, since we give it up every
|
|
|
|
* switch. -- Kumar
|
|
|
|
*/
|
|
|
|
mfmsr r5
|
|
|
|
oris r5,r5,MSR_SPE@h
|
|
|
|
mtmsr r5 /* enable use of SPE now */
|
|
|
|
isync
|
|
|
|
/* enable use of SPE after return */
|
|
|
|
oris r9,r9,MSR_SPE@h
|
2009-07-14 20:52:54 +00:00
|
|
|
mfspr r5,SPRN_SPRG_THREAD /* current task's THREAD (phys) */
|
2005-09-26 06:04:21 +00:00
|
|
|
li r4,1
|
|
|
|
li r10,THREAD_ACC
|
|
|
|
stw r4,THREAD_USED_SPE(r5)
|
|
|
|
evlddx evr4,r10,r5
|
|
|
|
evmra evr4,evr4
|
2011-06-14 23:34:27 +00:00
|
|
|
REST_32EVRS(0,r10,r5,THREAD_EVR0)
|
2012-03-01 01:20:19 +00:00
|
|
|
blr
|
2005-09-26 06:04:21 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* SPE unavailable trap from kernel - print a message, but let
|
|
|
|
* the task use SPE in the kernel until it returns to user mode.
|
|
|
|
*/
|
2023-01-28 12:41:38 +00:00
|
|
|
SYM_FUNC_START_LOCAL(KernelSPE)
|
2005-09-26 06:04:21 +00:00
|
|
|
lwz r3,_MSR(r1)
|
|
|
|
oris r3,r3,MSR_SPE@h
|
|
|
|
stw r3,_MSR(r1) /* enable use of SPE after return */
|
2010-03-06 22:43:55 +00:00
|
|
|
#ifdef CONFIG_PRINTK
|
2005-09-26 06:04:21 +00:00
|
|
|
lis r3,87f@h
|
|
|
|
ori r3,r3,87f@l
|
|
|
|
mr r4,r2 /* current */
|
|
|
|
lwz r5,_NIP(r1)
|
printk: Userspace format indexing support
We have a number of systems industry-wide that have a subset of their
functionality that works as follows:
1. Receive a message from local kmsg, serial console, or netconsole;
2. Apply a set of rules to classify the message;
3. Do something based on this classification (like scheduling a
remediation for the machine), rinse, and repeat.
As a couple of examples of places we have this implemented just inside
Facebook, although this isn't a Facebook-specific problem, we have this
inside our netconsole processing (for alarm classification), and as part
of our machine health checking. We use these messages to determine
fairly important metrics around production health, and it's important
that we get them right.
While for some kinds of issues we have counters, tracepoints, or metrics
with a stable interface which can reliably indicate the issue, in order
to react to production issues quickly we need to work with the interface
which most kernel developers naturally use when developing: printk.
Most production issues come from unexpected phenomena, and as such
usually the code in question doesn't have easily usable tracepoints or
other counters available for the specific problem being mitigated. We
have a number of lines of monitoring defence against problems in
production (host metrics, process metrics, service metrics, etc), and
where it's not feasible to reliably monitor at another level, this kind
of pragmatic netconsole monitoring is essential.
As one would expect, monitoring using printk is rather brittle for a
number of reasons -- most notably that the message might disappear
entirely in a new version of the kernel, or that the message may change
in some way that the regex or other classification methods start to
silently fail.
One factor that makes this even harder is that, under normal operation,
many of these messages are never expected to be hit. For example, there
may be a rare hardware bug which one wants to detect if it was to ever
happen again, but its recurrence is not likely or anticipated. This
precludes using something like checking whether the printk in question
was printed somewhere fleetwide recently to determine whether the
message in question is still present or not, since we don't anticipate
that it should be printed anywhere, but still need to monitor for its
future presence in the long-term.
This class of issue has happened on a number of occasions, causing
unhealthy machines with hardware issues to remain in production for
longer than ideal. As a recent example, some monitoring around
blk_update_request fell out of date and caused semi-broken machines to
remain in production for longer than would be desirable.
Searching through the codebase to find the message is also extremely
fragile, because many of the messages are further constructed beyond
their callsite (eg. btrfs_printk and other module-specific wrappers,
each with their own functionality). Even if they aren't, guessing the
format and formulation of the underlying message based on the aesthetics
of the message emitted is not a recipe for success at scale, and our
previous issues with fleetwide machine health checking demonstrate as
much.
This provides a solution to the issue of silently changed or deleted
printks: we record pointers to all printk format strings known at
compile time into a new .printk_index section, both in vmlinux and
modules. At runtime, this can then be iterated by looking at
<debugfs>/printk/index/<module>, which emits the following format, both
readable by humans and able to be parsed by machines:
$ head -1 vmlinux; shuf -n 5 vmlinux
# <level[,flags]> filename:line function "format"
<5> block/blk-settings.c:661 disk_stack_limits "%s: Warning: Device %s is misaligned\n"
<4> kernel/trace/trace.c:8296 trace_create_file "Could not create tracefs '%s' entry\n"
<6> arch/x86/kernel/hpet.c:144 _hpet_print_config "hpet: %s(%d):\n"
<6> init/do_mounts.c:605 prepare_namespace "Waiting for root device %s...\n"
<6> drivers/acpi/osl.c:1410 acpi_no_auto_serialize_setup "ACPI: auto-serialization disabled\n"
This mitigates the majority of cases where we have a highly-specific
printk which we want to match on, as we can now enumerate and check
whether the format changed or the printk callsite disappeared entirely
in userspace. This allows us to catch changes to printks we monitor
earlier and decide what to do about it before it becomes problematic.
There is no additional runtime cost for printk callers or printk itself,
and the assembly generated is exactly the same.
Signed-off-by: Chris Down <chris@chrisdown.name>
Cc: Petr Mladek <pmladek@suse.com>
Cc: Jessica Yu <jeyu@kernel.org>
Cc: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Cc: John Ogness <john.ogness@linutronix.de>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Kees Cook <keescook@chromium.org>
Reviewed-by: Petr Mladek <pmladek@suse.com>
Tested-by: Petr Mladek <pmladek@suse.com>
Reported-by: kernel test robot <lkp@intel.com>
Acked-by: Andy Shevchenko <andy.shevchenko@gmail.com>
Acked-by: Jessica Yu <jeyu@kernel.org> # for module.{c,h}
Signed-off-by: Petr Mladek <pmladek@suse.com>
Link: https://lore.kernel.org/r/e42070983637ac5e384f17fbdbe86d19c7b212a5.1623775748.git.chris@chrisdown.name
2021-06-15 16:52:53 +00:00
|
|
|
bl _printk
|
2010-03-06 22:43:55 +00:00
|
|
|
#endif
|
2021-03-12 12:50:41 +00:00
|
|
|
b interrupt_return
|
2010-03-06 22:43:55 +00:00
|
|
|
#ifdef CONFIG_PRINTK
|
2005-09-26 06:04:21 +00:00
|
|
|
87: .string "SPE used in kernel (task=%p, pc=%x) \n"
|
2010-03-06 22:43:55 +00:00
|
|
|
#endif
|
2005-09-26 06:04:21 +00:00
|
|
|
.align 4,0
|
|
|
|
|
2023-01-28 12:41:38 +00:00
|
|
|
SYM_FUNC_END(KernelSPE)
|
2005-09-26 06:04:21 +00:00
|
|
|
#endif /* CONFIG_SPE */
|
|
|
|
|
2013-12-24 07:12:04 +00:00
|
|
|
/*
|
|
|
|
* Translate the effec addr in r3 to phys addr. The phys addr will be put
|
|
|
|
* into r3(higher 32bit) and r4(lower 32bit)
|
|
|
|
*/
|
2022-11-14 17:57:44 +00:00
|
|
|
SYM_FUNC_START_LOCAL(get_phys_addr)
|
2013-12-24 07:12:04 +00:00
|
|
|
mfmsr r8
|
|
|
|
mfspr r9,SPRN_PID
|
|
|
|
rlwinm r9,r9,16,0x3fff0000 /* turn PID into MAS6[SPID] */
|
|
|
|
rlwimi r9,r8,28,0x00000001 /* turn MSR[DS] into MAS6[SAS] */
|
|
|
|
mtspr SPRN_MAS6,r9
|
|
|
|
|
|
|
|
tlbsx 0,r3 /* must succeed */
|
|
|
|
|
|
|
|
mfspr r8,SPRN_MAS1
|
|
|
|
mfspr r12,SPRN_MAS3
|
|
|
|
rlwinm r9,r8,25,0x1f /* r9 = log2(page size) */
|
|
|
|
li r10,1024
|
|
|
|
slw r10,r10,r9 /* r10 = page size */
|
|
|
|
addi r10,r10,-1
|
|
|
|
and r11,r3,r10 /* r11 = page offset */
|
|
|
|
andc r4,r12,r10 /* r4 = page base */
|
|
|
|
or r4,r4,r11 /* r4 = devtree phys addr */
|
|
|
|
#ifdef CONFIG_PHYS_64BIT
|
|
|
|
mfspr r3,SPRN_MAS7
|
|
|
|
#endif
|
|
|
|
blr
|
2022-11-14 17:57:44 +00:00
|
|
|
SYM_FUNC_END(get_phys_addr)
|
2013-12-24 07:12:04 +00:00
|
|
|
|
2005-09-26 06:04:21 +00:00
|
|
|
/*
|
|
|
|
* Global functions
|
|
|
|
*/
|
|
|
|
|
2022-09-19 17:01:35 +00:00
|
|
|
#ifdef CONFIG_PPC_E500
|
2014-08-20 13:09:03 +00:00
|
|
|
#ifndef CONFIG_PPC_E500MC
|
2009-01-08 14:31:20 +00:00
|
|
|
/* Adjust or setup IVORs for e500v1/v2 */
|
|
|
|
_GLOBAL(__setup_e500_ivors)
|
|
|
|
li r3,DebugCrit@l
|
|
|
|
mtspr SPRN_IVOR15,r3
|
|
|
|
li r3,SPEUnavailable@l
|
|
|
|
mtspr SPRN_IVOR32,r3
|
|
|
|
li r3,SPEFloatingPointData@l
|
|
|
|
mtspr SPRN_IVOR33,r3
|
|
|
|
li r3,SPEFloatingPointRound@l
|
|
|
|
mtspr SPRN_IVOR34,r3
|
|
|
|
li r3,PerformanceMonitor@l
|
|
|
|
mtspr SPRN_IVOR35,r3
|
|
|
|
sync
|
|
|
|
blr
|
2014-08-20 13:09:03 +00:00
|
|
|
#else
|
2009-01-08 14:31:20 +00:00
|
|
|
/* Adjust or setup IVORs for e500mc */
|
|
|
|
_GLOBAL(__setup_e500mc_ivors)
|
|
|
|
li r3,DebugDebug@l
|
|
|
|
mtspr SPRN_IVOR15,r3
|
|
|
|
li r3,PerformanceMonitor@l
|
|
|
|
mtspr SPRN_IVOR35,r3
|
|
|
|
li r3,Doorbell@l
|
|
|
|
mtspr SPRN_IVOR36,r3
|
2009-02-12 13:54:53 +00:00
|
|
|
li r3,CriticalDoorbell@l
|
|
|
|
mtspr SPRN_IVOR37,r3
|
2012-07-09 12:55:31 +00:00
|
|
|
sync
|
|
|
|
blr
|
2011-12-20 15:34:47 +00:00
|
|
|
|
2012-07-09 12:55:31 +00:00
|
|
|
/* setup ehv ivors for */
|
|
|
|
_GLOBAL(__setup_ehv_ivors)
|
2011-12-20 15:34:47 +00:00
|
|
|
li r3,GuestDoorbell@l
|
|
|
|
mtspr SPRN_IVOR38,r3
|
|
|
|
li r3,CriticalGuestDoorbell@l
|
|
|
|
mtspr SPRN_IVOR39,r3
|
|
|
|
li r3,Hypercall@l
|
|
|
|
mtspr SPRN_IVOR40,r3
|
|
|
|
li r3,Ehvpriv@l
|
|
|
|
mtspr SPRN_IVOR41,r3
|
2009-01-08 14:31:20 +00:00
|
|
|
sync
|
|
|
|
blr
|
2014-08-20 13:09:03 +00:00
|
|
|
#endif /* CONFIG_PPC_E500MC */
|
2022-09-19 17:01:35 +00:00
|
|
|
#endif /* CONFIG_PPC_E500 */
|
2009-01-08 14:31:20 +00:00
|
|
|
|
2005-09-26 06:04:21 +00:00
|
|
|
#ifdef CONFIG_SPE
|
|
|
|
/*
|
2015-10-29 00:44:01 +00:00
|
|
|
* extern void __giveup_spe(struct task_struct *prev)
|
2005-09-26 06:04:21 +00:00
|
|
|
*
|
|
|
|
*/
|
2015-10-29 00:44:01 +00:00
|
|
|
_GLOBAL(__giveup_spe)
|
2005-09-26 06:04:21 +00:00
|
|
|
addi r3,r3,THREAD /* want THREAD of task */
|
|
|
|
lwz r5,PT_REGS(r3)
|
|
|
|
cmpi 0,r5,0
|
2011-06-14 23:34:27 +00:00
|
|
|
SAVE_32EVRS(0, r4, r3, THREAD_EVR0)
|
2007-09-27 13:43:35 +00:00
|
|
|
evxor evr6, evr6, evr6 /* clear out evr6 */
|
2005-09-26 06:04:21 +00:00
|
|
|
evmwumiaa evr6, evr6, evr6 /* evr6 <- ACC = 0 * 0 + ACC */
|
|
|
|
li r4,THREAD_ACC
|
2007-09-27 13:43:35 +00:00
|
|
|
evstddx evr6, r4, r3 /* save off accumulator */
|
2005-09-26 06:04:21 +00:00
|
|
|
beq 1f
|
2022-11-27 12:49:32 +00:00
|
|
|
lwz r4,_MSR-STACK_INT_FRAME_REGS(r5)
|
2005-09-26 06:04:21 +00:00
|
|
|
lis r3,MSR_SPE@h
|
|
|
|
andc r4,r4,r3 /* disable SPE for previous task */
|
2022-11-27 12:49:32 +00:00
|
|
|
stw r4,_MSR-STACK_INT_FRAME_REGS(r5)
|
2005-09-26 06:04:21 +00:00
|
|
|
1:
|
|
|
|
blr
|
|
|
|
#endif /* CONFIG_SPE */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* extern void abort(void)
|
|
|
|
*
|
|
|
|
* At present, this routine just applies a system reset.
|
|
|
|
*/
|
|
|
|
_GLOBAL(abort)
|
|
|
|
li r13,0
|
2007-09-27 13:43:35 +00:00
|
|
|
mtspr SPRN_DBCR0,r13 /* disable all debug events */
|
2006-02-08 22:41:26 +00:00
|
|
|
isync
|
2005-09-26 06:04:21 +00:00
|
|
|
mfmsr r13
|
|
|
|
ori r13,r13,MSR_DE@l /* Enable Debug Events */
|
|
|
|
mtmsr r13
|
2006-02-08 22:41:26 +00:00
|
|
|
isync
|
2007-09-27 13:43:35 +00:00
|
|
|
mfspr r13,SPRN_DBCR0
|
|
|
|
lis r13,(DBCR0_IDM|DBCR0_RST_CHIP)@h
|
|
|
|
mtspr SPRN_DBCR0,r13
|
2006-02-08 22:41:26 +00:00
|
|
|
isync
|
2005-09-26 06:04:21 +00:00
|
|
|
|
2008-11-19 15:35:56 +00:00
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
/* When we get here, r24 needs to hold the CPU # */
|
|
|
|
.globl __secondary_start
|
|
|
|
__secondary_start:
|
2013-12-24 07:12:11 +00:00
|
|
|
LOAD_REG_ADDR_PIC(r3, tlbcam_index)
|
|
|
|
lwz r3,0(r3)
|
2008-11-19 15:35:56 +00:00
|
|
|
mtctr r3
|
|
|
|
li r26,0 /* r26 safe? */
|
|
|
|
|
2013-12-24 07:12:11 +00:00
|
|
|
bl switch_to_as1
|
|
|
|
mr r27,r3 /* tlb entry */
|
2008-11-19 15:35:56 +00:00
|
|
|
/* Load each CAM entry */
|
|
|
|
1: mr r3,r26
|
|
|
|
bl loadcam_entry
|
|
|
|
addi r26,r26,1
|
|
|
|
bdnz 1b
|
2013-12-24 07:12:11 +00:00
|
|
|
mr r3,r27 /* tlb entry */
|
|
|
|
LOAD_REG_ADDR_PIC(r4, memstart_addr)
|
|
|
|
lwz r4,0(r4)
|
|
|
|
mr r5,r25 /* phys kernel start */
|
|
|
|
rlwinm r5,r5,0,~0x3ffffff /* aligned 64M */
|
|
|
|
subf r4,r5,r4 /* memstart_addr - phys kernel start */
|
2019-09-20 09:45:40 +00:00
|
|
|
lis r7,KERNELBASE@h
|
|
|
|
ori r7,r7,KERNELBASE@l
|
|
|
|
cmpw r20,r7 /* if kernstart_virt_addr != KERNELBASE, randomized */
|
|
|
|
beq 2f
|
|
|
|
li r4,0
|
|
|
|
2: li r5,0 /* no device tree */
|
2013-12-24 07:12:11 +00:00
|
|
|
li r6,0 /* not boot cpu */
|
|
|
|
bl restore_to_as0
|
|
|
|
|
|
|
|
|
|
|
|
lis r3,__secondary_hold_acknowledge@h
|
|
|
|
ori r3,r3,__secondary_hold_acknowledge@l
|
|
|
|
stw r24,0(r3)
|
|
|
|
|
|
|
|
li r3,0
|
|
|
|
mr r4,r24 /* Why? */
|
|
|
|
bl call_setup_cpu
|
2008-11-19 15:35:56 +00:00
|
|
|
|
2019-01-17 12:25:53 +00:00
|
|
|
/* get current's stack and current */
|
2019-01-31 10:09:02 +00:00
|
|
|
lis r2,secondary_current@ha
|
|
|
|
lwz r2,secondary_current@l(r2)
|
2019-01-31 10:08:58 +00:00
|
|
|
lwz r1,TASK_STACK(r2)
|
2008-11-19 15:35:56 +00:00
|
|
|
|
|
|
|
/* stack */
|
2022-11-27 12:49:40 +00:00
|
|
|
addi r1,r1,THREAD_SIZE-STACK_FRAME_MIN_SIZE
|
2008-11-19 15:35:56 +00:00
|
|
|
li r0,0
|
|
|
|
stw r0,0(r1)
|
|
|
|
|
|
|
|
/* ptr to current thread */
|
|
|
|
addi r4,r2,THREAD /* address of our thread_struct */
|
2009-07-14 20:52:54 +00:00
|
|
|
mtspr SPRN_SPRG_THREAD,r4
|
2008-11-19 15:35:56 +00:00
|
|
|
|
|
|
|
/* Setup the defaults for TLB entries */
|
2009-02-11 00:10:50 +00:00
|
|
|
li r4,(MAS4_TSIZED(BOOK3E_PAGESZ_4K))@l
|
2008-11-19 15:35:56 +00:00
|
|
|
mtspr SPRN_MAS4,r4
|
|
|
|
|
|
|
|
/* Jump to start_secondary */
|
|
|
|
lis r4,MSR_KERNEL@h
|
|
|
|
ori r4,r4,MSR_KERNEL@l
|
|
|
|
lis r3,start_secondary@h
|
|
|
|
ori r3,r3,start_secondary@l
|
|
|
|
mtspr SPRN_SRR0,r3
|
|
|
|
mtspr SPRN_SRR1,r4
|
|
|
|
sync
|
|
|
|
rfi
|
|
|
|
sync
|
|
|
|
|
|
|
|
.globl __secondary_hold_acknowledge
|
|
|
|
__secondary_hold_acknowledge:
|
|
|
|
.long -1
|
|
|
|
#endif
|
|
|
|
|
2019-09-20 09:45:38 +00:00
|
|
|
/*
|
|
|
|
* Create a 64M tlb by address and entry
|
|
|
|
* r3 - entry
|
|
|
|
* r4 - virtual address
|
|
|
|
* r5/r6 - physical address
|
|
|
|
*/
|
|
|
|
_GLOBAL(create_kaslr_tlb_entry)
|
|
|
|
lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
|
|
|
|
rlwimi r7,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r6) */
|
|
|
|
mtspr SPRN_MAS0,r7 /* Write MAS0 */
|
|
|
|
|
|
|
|
lis r3,(MAS1_VALID|MAS1_IPROT)@h
|
|
|
|
ori r3,r3,(MAS1_TSIZE(BOOK3E_PAGESZ_64M))@l
|
|
|
|
mtspr SPRN_MAS1,r3 /* Write MAS1 */
|
|
|
|
|
|
|
|
lis r3,MAS2_EPN_MASK(BOOK3E_PAGESZ_64M)@h
|
|
|
|
ori r3,r3,MAS2_EPN_MASK(BOOK3E_PAGESZ_64M)@l
|
|
|
|
and r3,r3,r4
|
|
|
|
ori r3,r3,MAS2_M_IF_NEEDED@l
|
|
|
|
mtspr SPRN_MAS2,r3 /* Write MAS2(EPN) */
|
|
|
|
|
|
|
|
#ifdef CONFIG_PHYS_64BIT
|
|
|
|
ori r8,r6,(MAS3_SW|MAS3_SR|MAS3_SX)
|
|
|
|
mtspr SPRN_MAS3,r8 /* Write MAS3(RPN) */
|
|
|
|
mtspr SPRN_MAS7,r5
|
|
|
|
#else
|
|
|
|
ori r8,r5,(MAS3_SW|MAS3_SR|MAS3_SX)
|
|
|
|
mtspr SPRN_MAS3,r8 /* Write MAS3(RPN) */
|
|
|
|
#endif
|
|
|
|
|
|
|
|
tlbwe /* Write TLB */
|
|
|
|
isync
|
|
|
|
sync
|
|
|
|
blr
|
|
|
|
|
2019-09-20 09:45:39 +00:00
|
|
|
/*
|
|
|
|
* Return to the start of the relocated kernel and run again
|
|
|
|
* r3 - virtual address of fdt
|
|
|
|
* r4 - entry of the kernel
|
|
|
|
*/
|
|
|
|
_GLOBAL(reloc_kernel_entry)
|
|
|
|
mfmsr r7
|
|
|
|
rlwinm r7, r7, 0, ~(MSR_IS | MSR_DS)
|
|
|
|
|
|
|
|
mtspr SPRN_SRR0,r4
|
|
|
|
mtspr SPRN_SRR1,r7
|
|
|
|
rfi
|
|
|
|
|
2013-12-24 07:12:07 +00:00
|
|
|
/*
|
|
|
|
* Create a tlb entry with the same effective and physical address as
|
|
|
|
* the tlb entry used by the current running code. But set the TS to 1.
|
|
|
|
* Then switch to the address space 1. It will return with the r3 set to
|
|
|
|
* the ESEL of the new created tlb.
|
|
|
|
*/
|
|
|
|
_GLOBAL(switch_to_as1)
|
|
|
|
mflr r5
|
|
|
|
|
|
|
|
/* Find a entry not used */
|
|
|
|
mfspr r3,SPRN_TLB1CFG
|
|
|
|
andi. r3,r3,0xfff
|
|
|
|
mfspr r4,SPRN_PID
|
|
|
|
rlwinm r4,r4,16,0x3fff0000 /* turn PID into MAS6[SPID] */
|
|
|
|
mtspr SPRN_MAS6,r4
|
|
|
|
1: lis r4,0x1000 /* Set MAS0(TLBSEL) = 1 */
|
|
|
|
addi r3,r3,-1
|
|
|
|
rlwimi r4,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */
|
|
|
|
mtspr SPRN_MAS0,r4
|
|
|
|
tlbre
|
|
|
|
mfspr r4,SPRN_MAS1
|
|
|
|
andis. r4,r4,MAS1_VALID@h
|
|
|
|
bne 1b
|
|
|
|
|
|
|
|
/* Get the tlb entry used by the current running code */
|
2021-08-24 07:56:26 +00:00
|
|
|
bcl 20,31,$+4
|
2013-12-24 07:12:07 +00:00
|
|
|
0: mflr r4
|
|
|
|
tlbsx 0,r4
|
|
|
|
|
|
|
|
mfspr r4,SPRN_MAS1
|
|
|
|
ori r4,r4,MAS1_TS /* Set the TS = 1 */
|
|
|
|
mtspr SPRN_MAS1,r4
|
|
|
|
|
|
|
|
mfspr r4,SPRN_MAS0
|
|
|
|
rlwinm r4,r4,0,~MAS0_ESEL_MASK
|
|
|
|
rlwimi r4,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */
|
|
|
|
mtspr SPRN_MAS0,r4
|
|
|
|
tlbwe
|
|
|
|
isync
|
|
|
|
sync
|
|
|
|
|
|
|
|
mfmsr r4
|
|
|
|
ori r4,r4,MSR_IS | MSR_DS
|
|
|
|
mtspr SPRN_SRR0,r5
|
|
|
|
mtspr SPRN_SRR1,r4
|
|
|
|
sync
|
|
|
|
rfi
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Restore to the address space 0 and also invalidate the tlb entry created
|
|
|
|
* by switch_to_as1.
|
2013-12-24 07:12:10 +00:00
|
|
|
* r3 - the tlb entry which should be invalidated
|
|
|
|
* r4 - __pa(PAGE_OFFSET in AS1) - __pa(PAGE_OFFSET in AS0)
|
|
|
|
* r5 - device tree virtual address. If r4 is 0, r5 is ignored.
|
2013-12-24 07:12:11 +00:00
|
|
|
* r6 - boot cpu
|
2013-12-24 07:12:07 +00:00
|
|
|
*/
|
|
|
|
_GLOBAL(restore_to_as0)
|
|
|
|
mflr r0
|
|
|
|
|
2021-08-24 07:56:26 +00:00
|
|
|
bcl 20,31,$+4
|
2013-12-24 07:12:07 +00:00
|
|
|
0: mflr r9
|
|
|
|
addi r9,r9,1f - 0b
|
|
|
|
|
2013-12-24 07:12:10 +00:00
|
|
|
/*
|
|
|
|
* We may map the PAGE_OFFSET in AS0 to a different physical address,
|
|
|
|
* so we need calculate the right jump and device tree address based
|
|
|
|
* on the offset passed by r4.
|
|
|
|
*/
|
|
|
|
add r9,r9,r4
|
|
|
|
add r5,r5,r4
|
2013-12-24 07:12:11 +00:00
|
|
|
add r0,r0,r4
|
2013-12-24 07:12:10 +00:00
|
|
|
|
|
|
|
2: mfmsr r7
|
2013-12-24 07:12:07 +00:00
|
|
|
li r8,(MSR_IS | MSR_DS)
|
|
|
|
andc r7,r7,r8
|
|
|
|
|
|
|
|
mtspr SPRN_SRR0,r9
|
|
|
|
mtspr SPRN_SRR1,r7
|
|
|
|
sync
|
|
|
|
rfi
|
|
|
|
|
|
|
|
/* Invalidate the temporary tlb entry for AS1 */
|
|
|
|
1: lis r9,0x1000 /* Set MAS0(TLBSEL) = 1 */
|
|
|
|
rlwimi r9,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */
|
|
|
|
mtspr SPRN_MAS0,r9
|
|
|
|
tlbre
|
|
|
|
mfspr r9,SPRN_MAS1
|
|
|
|
rlwinm r9,r9,0,2,31 /* Clear MAS1 Valid and IPPROT */
|
|
|
|
mtspr SPRN_MAS1,r9
|
|
|
|
tlbwe
|
|
|
|
isync
|
2013-12-24 07:12:10 +00:00
|
|
|
|
|
|
|
cmpwi r4,0
|
2013-12-24 07:12:11 +00:00
|
|
|
cmpwi cr1,r6,0
|
|
|
|
cror eq,4*cr1+eq,eq
|
|
|
|
bne 3f /* offset != 0 && is_boot_cpu */
|
2013-12-24 07:12:07 +00:00
|
|
|
mtlr r0
|
|
|
|
blr
|
|
|
|
|
2013-12-24 07:12:10 +00:00
|
|
|
/*
|
|
|
|
* The PAGE_OFFSET will map to a different physical address,
|
|
|
|
* jump to _start to do another relocation again.
|
|
|
|
*/
|
|
|
|
3: mr r3,r5
|
|
|
|
bl _start
|