mirror of
https://github.com/torvalds/linux.git
synced 2024-12-06 11:01:43 +00:00
856b0f591e
Print details of the new kexec image loaded.
Based on the original code from
commit 221f2c770e
("arm64/kexec: Add pr_debug output")
Signed-off-by: Marcin Nowakowski <marcin.nowakowski@imgtec.com>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/14614/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
138 lines
3.5 KiB
C
138 lines
3.5 KiB
C
/*
|
|
* machine_kexec.c for kexec
|
|
* Created by <nschichan@corp.free.fr> on Thu Oct 12 15:15:06 2006
|
|
*
|
|
* This source code is licensed under the GNU General Public License,
|
|
* Version 2. See the file COPYING for more details.
|
|
*/
|
|
#include <linux/compiler.h>
|
|
#include <linux/kexec.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/delay.h>
|
|
|
|
#include <asm/cacheflush.h>
|
|
#include <asm/page.h>
|
|
|
|
extern const unsigned char relocate_new_kernel[];
|
|
extern const size_t relocate_new_kernel_size;
|
|
|
|
extern unsigned long kexec_start_address;
|
|
extern unsigned long kexec_indirection_page;
|
|
|
|
int (*_machine_kexec_prepare)(struct kimage *) = NULL;
|
|
void (*_machine_kexec_shutdown)(void) = NULL;
|
|
void (*_machine_crash_shutdown)(struct pt_regs *regs) = NULL;
|
|
#ifdef CONFIG_SMP
|
|
void (*relocated_kexec_smp_wait) (void *);
|
|
atomic_t kexec_ready_to_reboot = ATOMIC_INIT(0);
|
|
void (*_crash_smp_send_stop)(void) = NULL;
|
|
#endif
|
|
|
|
static void kexec_image_info(const struct kimage *kimage)
|
|
{
|
|
unsigned long i;
|
|
|
|
pr_debug("kexec kimage info:\n");
|
|
pr_debug(" type: %d\n", kimage->type);
|
|
pr_debug(" start: %lx\n", kimage->start);
|
|
pr_debug(" head: %lx\n", kimage->head);
|
|
pr_debug(" nr_segments: %lu\n", kimage->nr_segments);
|
|
|
|
for (i = 0; i < kimage->nr_segments; i++) {
|
|
pr_debug(" segment[%lu]: %016lx - %016lx, 0x%lx bytes, %lu pages\n",
|
|
i,
|
|
kimage->segment[i].mem,
|
|
kimage->segment[i].mem + kimage->segment[i].memsz,
|
|
(unsigned long)kimage->segment[i].memsz,
|
|
(unsigned long)kimage->segment[i].memsz / PAGE_SIZE);
|
|
}
|
|
}
|
|
|
|
int
|
|
machine_kexec_prepare(struct kimage *kimage)
|
|
{
|
|
kexec_image_info(kimage);
|
|
|
|
if (_machine_kexec_prepare)
|
|
return _machine_kexec_prepare(kimage);
|
|
return 0;
|
|
}
|
|
|
|
void
|
|
machine_kexec_cleanup(struct kimage *kimage)
|
|
{
|
|
}
|
|
|
|
void
|
|
machine_shutdown(void)
|
|
{
|
|
if (_machine_kexec_shutdown)
|
|
_machine_kexec_shutdown();
|
|
}
|
|
|
|
void
|
|
machine_crash_shutdown(struct pt_regs *regs)
|
|
{
|
|
if (_machine_crash_shutdown)
|
|
_machine_crash_shutdown(regs);
|
|
else
|
|
default_machine_crash_shutdown(regs);
|
|
}
|
|
|
|
typedef void (*noretfun_t)(void) __noreturn;
|
|
|
|
void
|
|
machine_kexec(struct kimage *image)
|
|
{
|
|
unsigned long reboot_code_buffer;
|
|
unsigned long entry;
|
|
unsigned long *ptr;
|
|
|
|
reboot_code_buffer =
|
|
(unsigned long)page_address(image->control_code_page);
|
|
|
|
kexec_start_address =
|
|
(unsigned long) phys_to_virt(image->start);
|
|
|
|
if (image->type == KEXEC_TYPE_DEFAULT) {
|
|
kexec_indirection_page =
|
|
(unsigned long) phys_to_virt(image->head & PAGE_MASK);
|
|
} else {
|
|
kexec_indirection_page = (unsigned long)&image->head;
|
|
}
|
|
|
|
memcpy((void*)reboot_code_buffer, relocate_new_kernel,
|
|
relocate_new_kernel_size);
|
|
|
|
/*
|
|
* The generic kexec code builds a page list with physical
|
|
* addresses. they are directly accessible through KSEG0 (or
|
|
* CKSEG0 or XPHYS if on 64bit system), hence the
|
|
* phys_to_virt() call.
|
|
*/
|
|
for (ptr = &image->head; (entry = *ptr) && !(entry &IND_DONE);
|
|
ptr = (entry & IND_INDIRECTION) ?
|
|
phys_to_virt(entry & PAGE_MASK) : ptr + 1) {
|
|
if (*ptr & IND_SOURCE || *ptr & IND_INDIRECTION ||
|
|
*ptr & IND_DESTINATION)
|
|
*ptr = (unsigned long) phys_to_virt(*ptr);
|
|
}
|
|
|
|
/*
|
|
* we do not want to be bothered.
|
|
*/
|
|
local_irq_disable();
|
|
|
|
printk("Will call new kernel at %08lx\n", image->start);
|
|
printk("Bye ...\n");
|
|
__flush_cache_all();
|
|
#ifdef CONFIG_SMP
|
|
/* All secondary cpus now may jump to kexec_wait cycle */
|
|
relocated_kexec_smp_wait = reboot_code_buffer +
|
|
(void *)(kexec_smp_wait - relocate_new_kernel);
|
|
smp_wmb();
|
|
atomic_set(&kexec_ready_to_reboot, 1);
|
|
#endif
|
|
((noretfun_t) reboot_code_buffer)();
|
|
}
|