forked from Minki/linux
54622f10a6
This adds relocatable kernel support for kdump. With this one can use the same regular kernel to capture the kdump. A signature (0xfeed1234) is passed in r6 from panic code to the next kernel through kexec_sequence and purgatory code. The signature is used to differentiate between kdump kernel and non-kdump kernels. The purgatory code compares the signature and sets the __kdump_flag in head_64.S. During the boot up, kernel code checks __kdump_flag and if it is set, the kernel will behave as relocatable kdump kernel. This kernel will boot at the address where it was loaded by kexec-tools ie. at the address reserved through crashkernel boot parameter. CONFIG_CRASH_DUMP depends on CONFIG_RELOCATABLE option to build kdump kernel as relocatable. So the same kernel can be used as production and kdump kernel. This patch incorporates the changes suggested by Paul Mackerras to avoid GOT use and to avoid two copies of the code. Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Mohan Kumar M <mohan@in.ibm.com> Signed-off-by: Michael Ellerman <michael@ellerman.id.au> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
144 lines
3.8 KiB
C
144 lines
3.8 KiB
C
/*
|
|
* Routines for doing kexec-based kdump.
|
|
*
|
|
* Copyright (C) 2005, IBM Corp.
|
|
*
|
|
* Created by: Michael Ellerman
|
|
*
|
|
* This source code is licensed under the GNU General Public License,
|
|
* Version 2. See the file COPYING for more details.
|
|
*/
|
|
|
|
#undef DEBUG
|
|
|
|
#include <linux/crash_dump.h>
|
|
#include <linux/bootmem.h>
|
|
#include <linux/lmb.h>
|
|
#include <asm/code-patching.h>
|
|
#include <asm/kdump.h>
|
|
#include <asm/prom.h>
|
|
#include <asm/firmware.h>
|
|
#include <asm/uaccess.h>
|
|
|
|
#ifdef DEBUG
|
|
#include <asm/udbg.h>
|
|
#define DBG(fmt...) udbg_printf(fmt)
|
|
#else
|
|
#define DBG(fmt...)
|
|
#endif
|
|
|
|
/* Stores the physical address of elf header of crash image. */
|
|
unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX;
|
|
|
|
#ifndef CONFIG_RELOCATABLE
|
|
void __init reserve_kdump_trampoline(void)
|
|
{
|
|
lmb_reserve(0, KDUMP_RESERVE_LIMIT);
|
|
}
|
|
|
|
static void __init create_trampoline(unsigned long addr)
|
|
{
|
|
unsigned int *p = (unsigned int *)addr;
|
|
|
|
/* The maximum range of a single instruction branch, is the current
|
|
* instruction's address + (32 MB - 4) bytes. For the trampoline we
|
|
* need to branch to current address + 32 MB. So we insert a nop at
|
|
* the trampoline address, then the next instruction (+ 4 bytes)
|
|
* does a branch to (32 MB - 4). The net effect is that when we
|
|
* branch to "addr" we jump to ("addr" + 32 MB). Although it requires
|
|
* two instructions it doesn't require any registers.
|
|
*/
|
|
patch_instruction(p, PPC_NOP_INSTR);
|
|
patch_branch(++p, addr + PHYSICAL_START, 0);
|
|
}
|
|
|
|
void __init setup_kdump_trampoline(void)
|
|
{
|
|
unsigned long i;
|
|
|
|
DBG(" -> setup_kdump_trampoline()\n");
|
|
|
|
for (i = KDUMP_TRAMPOLINE_START; i < KDUMP_TRAMPOLINE_END; i += 8) {
|
|
create_trampoline(i);
|
|
}
|
|
|
|
#ifdef CONFIG_PPC_PSERIES
|
|
create_trampoline(__pa(system_reset_fwnmi) - PHYSICAL_START);
|
|
create_trampoline(__pa(machine_check_fwnmi) - PHYSICAL_START);
|
|
#endif /* CONFIG_PPC_PSERIES */
|
|
|
|
DBG(" <- setup_kdump_trampoline()\n");
|
|
}
|
|
#endif /* CONFIG_RELOCATABLE */
|
|
|
|
/*
|
|
* Note: elfcorehdr_addr is not just limited to vmcore. It is also used by
|
|
* is_kdump_kernel() to determine if we are booting after a panic. Hence
|
|
* ifdef it under CONFIG_CRASH_DUMP and not CONFIG_PROC_VMCORE.
|
|
*/
|
|
static int __init parse_elfcorehdr(char *p)
|
|
{
|
|
if (p)
|
|
elfcorehdr_addr = memparse(p, &p);
|
|
|
|
return 1;
|
|
}
|
|
__setup("elfcorehdr=", parse_elfcorehdr);
|
|
|
|
static int __init parse_savemaxmem(char *p)
|
|
{
|
|
if (p)
|
|
saved_max_pfn = (memparse(p, &p) >> PAGE_SHIFT) - 1;
|
|
|
|
return 1;
|
|
}
|
|
__setup("savemaxmem=", parse_savemaxmem);
|
|
|
|
|
|
static size_t copy_oldmem_vaddr(void *vaddr, char *buf, size_t csize,
|
|
unsigned long offset, int userbuf)
|
|
{
|
|
if (userbuf) {
|
|
if (copy_to_user((char __user *)buf, (vaddr + offset), csize))
|
|
return -EFAULT;
|
|
} else
|
|
memcpy(buf, (vaddr + offset), csize);
|
|
|
|
return csize;
|
|
}
|
|
|
|
/**
|
|
* copy_oldmem_page - copy one page from "oldmem"
|
|
* @pfn: page frame number to be copied
|
|
* @buf: target memory address for the copy; this can be in kernel address
|
|
* space or user address space (see @userbuf)
|
|
* @csize: number of bytes to copy
|
|
* @offset: offset in bytes into the page (based on pfn) to begin the copy
|
|
* @userbuf: if set, @buf is in user address space, use copy_to_user(),
|
|
* otherwise @buf is in kernel address space, use memcpy().
|
|
*
|
|
* Copy a page from "oldmem". For this page, there is no pte mapped
|
|
* in the current kernel. We stitch up a pte, similar to kmap_atomic.
|
|
*/
|
|
ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
|
|
size_t csize, unsigned long offset, int userbuf)
|
|
{
|
|
void *vaddr;
|
|
|
|
if (!csize)
|
|
return 0;
|
|
|
|
csize = min(csize, PAGE_SIZE);
|
|
|
|
if (pfn < max_pfn) {
|
|
vaddr = __va(pfn << PAGE_SHIFT);
|
|
csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf);
|
|
} else {
|
|
vaddr = __ioremap(pfn << PAGE_SHIFT, PAGE_SIZE, 0);
|
|
csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf);
|
|
iounmap(vaddr);
|
|
}
|
|
|
|
return csize;
|
|
}
|