2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* linux/boot/head.S
|
|
|
|
*
|
|
|
|
* Copyright (C) 1991, 1992, 1993 Linus Torvalds
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* head.S contains the 32-bit startup code.
|
|
|
|
*
|
|
|
|
* NOTE!!! Startup happens at absolute address 0x00001000, which is also where
|
|
|
|
* the page directory will exist. The startup code will be overwritten by
|
|
|
|
* the page directory. [According to comments etc elsewhere on a compressed
|
|
|
|
* kernel it will end up at 0x1000 + 1Mb I hope so as I assume this. - AC]
|
|
|
|
*
|
|
|
|
* Page 0 is deliberately kept safe, since System Management Mode code in
|
|
|
|
* laptops may need to access the BIOS data stored there. This is also
|
|
|
|
* useful for future device drivers that either access the BIOS via VM86
|
|
|
|
* mode.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
2005-06-25 21:58:59 +00:00
|
|
|
* High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2009-05-08 22:59:13 +00:00
|
|
|
.code32
|
|
|
|
.text
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2009-09-16 20:44:27 +00:00
|
|
|
#include <linux/init.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/linkage.h>
|
|
|
|
#include <asm/segment.h>
|
2008-04-08 10:54:30 +00:00
|
|
|
#include <asm/boot.h>
|
2007-05-02 17:27:07 +00:00
|
|
|
#include <asm/msr.h>
|
2008-05-12 13:43:39 +00:00
|
|
|
#include <asm/processor-flags.h>
|
2007-10-26 17:29:04 +00:00
|
|
|
#include <asm/asm-offsets.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2009-09-16 20:44:27 +00:00
|
|
|
__HEAD
|
2005-04-16 22:20:36 +00:00
|
|
|
.code32
|
2009-02-13 21:50:22 +00:00
|
|
|
ENTRY(startup_32)
|
2013-01-24 20:20:07 +00:00
|
|
|
/*
|
|
|
|
* 32bit entry is 0 and it is ABI so immutable!
|
|
|
|
* If we come here directly from a bootloader,
|
|
|
|
* kernel(text+data+bss+brk) ramdisk, zero_page, command line
|
|
|
|
* all need to be under the 4G limit.
|
|
|
|
*/
|
2005-04-16 22:20:36 +00:00
|
|
|
cld
|
2009-05-08 22:59:13 +00:00
|
|
|
/*
|
|
|
|
* Test KEEP_SEGMENTS flag to see if the bootloader is asking
|
|
|
|
* us to not reload segments
|
|
|
|
*/
|
2007-10-26 17:29:04 +00:00
|
|
|
testb $(1<<6), BP_loadflags(%esi)
|
|
|
|
jnz 1f
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
cli
|
2013-03-01 01:20:39 +00:00
|
|
|
movl $(__BOOT_DS), %eax
|
2007-05-02 17:27:07 +00:00
|
|
|
movl %eax, %ds
|
|
|
|
movl %eax, %es
|
|
|
|
movl %eax, %ss
|
2007-10-26 17:29:04 +00:00
|
|
|
1:
|
2007-05-02 17:27:07 +00:00
|
|
|
|
2009-05-08 22:59:13 +00:00
|
|
|
/*
|
|
|
|
* Calculate the delta between where we were compiled to run
|
2007-05-02 17:27:07 +00:00
|
|
|
* at and where we were actually loaded at. This can only be done
|
|
|
|
* with a short local call on x86. Nothing else will tell us what
|
|
|
|
* address we are running at. The reserved chunk of the real-mode
|
2007-07-11 19:18:33 +00:00
|
|
|
* data at 0x1e4 (defined as a scratch field) are used as the stack
|
|
|
|
* for this calculation. Only 4 bytes are needed.
|
2007-05-02 17:27:07 +00:00
|
|
|
*/
|
2009-05-06 06:24:50 +00:00
|
|
|
leal (BP_scratch+4)(%esi), %esp
|
2007-05-02 17:27:07 +00:00
|
|
|
call 1f
|
|
|
|
1: popl %ebp
|
|
|
|
subl $1b, %ebp
|
|
|
|
|
2007-05-02 17:27:08 +00:00
|
|
|
/* setup a stack and make sure cpu supports long mode. */
|
2008-04-08 10:54:30 +00:00
|
|
|
movl $boot_stack_end, %eax
|
2007-05-02 17:27:08 +00:00
|
|
|
addl %ebp, %eax
|
|
|
|
movl %eax, %esp
|
|
|
|
|
|
|
|
call verify_cpu
|
|
|
|
testl %eax, %eax
|
|
|
|
jnz no_longmode
|
|
|
|
|
2009-05-08 22:59:13 +00:00
|
|
|
/*
|
|
|
|
* Compute the delta between where we were compiled to run at
|
2007-05-02 17:27:07 +00:00
|
|
|
* and where the code will actually run at.
|
2009-05-08 22:59:13 +00:00
|
|
|
*
|
|
|
|
* %ebp contains the address we are loaded at by the boot loader and %ebx
|
2007-05-02 17:27:07 +00:00
|
|
|
* contains the address where we should move the kernel image temporarily
|
|
|
|
* for safe in-place decompression.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifdef CONFIG_RELOCATABLE
|
|
|
|
movl %ebp, %ebx
|
2009-05-11 22:56:08 +00:00
|
|
|
movl BP_kernel_alignment(%esi), %eax
|
|
|
|
decl %eax
|
|
|
|
addl %eax, %ebx
|
|
|
|
notl %eax
|
|
|
|
andl %eax, %ebx
|
2013-10-11 00:18:14 +00:00
|
|
|
cmpl $LOAD_PHYSICAL_ADDR, %ebx
|
|
|
|
jge 1f
|
2007-05-02 17:27:07 +00:00
|
|
|
#endif
|
2013-10-11 00:18:14 +00:00
|
|
|
movl $LOAD_PHYSICAL_ADDR, %ebx
|
|
|
|
1:
|
2007-05-02 17:27:07 +00:00
|
|
|
|
2009-05-09 00:42:16 +00:00
|
|
|
/* Target address to relocate to for decompression */
|
|
|
|
addl $z_extract_offset, %ebx
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
2007-05-02 17:27:07 +00:00
|
|
|
* Prepare for entering 64 bit mode
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2007-05-02 17:27:07 +00:00
|
|
|
|
|
|
|
/* Load new GDT with the 64bit segments using 32bit descriptor */
|
|
|
|
leal gdt(%ebp), %eax
|
|
|
|
movl %eax, gdt+2(%ebp)
|
|
|
|
lgdt gdt(%ebp)
|
|
|
|
|
|
|
|
/* Enable PAE mode */
|
2014-02-24 13:37:29 +00:00
|
|
|
movl %cr4, %eax
|
|
|
|
orl $X86_CR4_PAE, %eax
|
2007-05-02 17:27:07 +00:00
|
|
|
movl %eax, %cr4
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Build early 4G boot pagetable
|
|
|
|
*/
|
2009-05-08 22:59:13 +00:00
|
|
|
/* Initialize Page tables to 0 */
|
2007-05-02 17:27:07 +00:00
|
|
|
leal pgtable(%ebx), %edi
|
|
|
|
xorl %eax, %eax
|
|
|
|
movl $((4096*6)/4), %ecx
|
|
|
|
rep stosl
|
|
|
|
|
|
|
|
/* Build Level 4 */
|
|
|
|
leal pgtable + 0(%ebx), %edi
|
|
|
|
leal 0x1007 (%edi), %eax
|
|
|
|
movl %eax, 0(%edi)
|
|
|
|
|
|
|
|
/* Build Level 3 */
|
|
|
|
leal pgtable + 0x1000(%ebx), %edi
|
|
|
|
leal 0x1007(%edi), %eax
|
|
|
|
movl $4, %ecx
|
|
|
|
1: movl %eax, 0x00(%edi)
|
|
|
|
addl $0x00001000, %eax
|
|
|
|
addl $8, %edi
|
|
|
|
decl %ecx
|
|
|
|
jnz 1b
|
|
|
|
|
|
|
|
/* Build Level 2 */
|
|
|
|
leal pgtable + 0x2000(%ebx), %edi
|
|
|
|
movl $0x00000183, %eax
|
|
|
|
movl $2048, %ecx
|
|
|
|
1: movl %eax, 0(%edi)
|
|
|
|
addl $0x00200000, %eax
|
|
|
|
addl $8, %edi
|
|
|
|
decl %ecx
|
|
|
|
jnz 1b
|
|
|
|
|
|
|
|
/* Enable the boot page tables */
|
|
|
|
leal pgtable(%ebx), %eax
|
|
|
|
movl %eax, %cr3
|
|
|
|
|
|
|
|
/* Enable Long mode in EFER (Extended Feature Enable Register) */
|
|
|
|
movl $MSR_EFER, %ecx
|
|
|
|
rdmsr
|
|
|
|
btsl $_EFER_LME, %eax
|
|
|
|
wrmsr
|
|
|
|
|
2013-01-24 20:20:01 +00:00
|
|
|
/* After gdt is loaded */
|
|
|
|
xorl %eax, %eax
|
|
|
|
lldt %ax
|
|
|
|
movl $0x20, %eax
|
|
|
|
ltr %ax
|
|
|
|
|
2009-05-08 22:59:13 +00:00
|
|
|
/*
|
|
|
|
* Setup for the jump to 64bit mode
|
2007-05-02 17:27:07 +00:00
|
|
|
*
|
|
|
|
* When the jump is performend we will be in long mode but
|
|
|
|
* in 32bit compatibility mode with EFER.LME = 1, CS.L = 0, CS.D = 1
|
|
|
|
* (and in turn EFER.LMA = 1). To jump into 64bit mode we use
|
|
|
|
* the new gdt/idt that has __KERNEL_CS with CS.L = 1.
|
|
|
|
* We place all of the values on our mini stack so lret can
|
|
|
|
* used to perform that far jump.
|
|
|
|
*/
|
|
|
|
pushl $__KERNEL_CS
|
|
|
|
leal startup_64(%ebp), %eax
|
x86/efi: Firmware agnostic handover entry points
The EFI handover code only works if the "bitness" of the firmware and
the kernel match, i.e. 64-bit firmware and 64-bit kernel - it is not
possible to mix the two. This goes against the tradition that a 32-bit
kernel can be loaded on a 64-bit BIOS platform without having to do
anything special in the boot loader. Linux distributions, for one thing,
regularly run only 32-bit kernels on their live media.
Despite having only one 'handover_offset' field in the kernel header,
EFI boot loaders use two separate entry points to enter the kernel based
on the architecture the boot loader was compiled for,
(1) 32-bit loader: handover_offset
(2) 64-bit loader: handover_offset + 512
Since we already have two entry points, we can leverage them to infer
the bitness of the firmware we're running on, without requiring any boot
loader modifications, by making (1) and (2) valid entry points for both
CONFIG_X86_32 and CONFIG_X86_64 kernels.
To be clear, a 32-bit boot loader will always use (1) and a 64-bit boot
loader will always use (2). It's just that, if a single kernel image
supports (1) and (2) that image can be used with both 32-bit and 64-bit
boot loaders, and hence both 32-bit and 64-bit EFI.
(1) and (2) must be 512 bytes apart at all times, but that is already
part of the boot ABI and we could never change that delta without
breaking existing boot loaders anyhow.
Signed-off-by: Matt Fleming <matt.fleming@intel.com>
2014-01-10 15:54:31 +00:00
|
|
|
#ifdef CONFIG_EFI_MIXED
|
|
|
|
movl efi32_config(%ebp), %ebx
|
|
|
|
cmp $0, %ebx
|
|
|
|
jz 1f
|
|
|
|
leal handover_entry(%ebp), %eax
|
|
|
|
1:
|
|
|
|
#endif
|
2007-05-02 17:27:07 +00:00
|
|
|
pushl %eax
|
|
|
|
|
|
|
|
/* Enter paged protected Mode, activating Long Mode */
|
2008-05-12 13:43:39 +00:00
|
|
|
movl $(X86_CR0_PG | X86_CR0_PE), %eax /* Enable Paging and Protected mode */
|
2007-05-02 17:27:07 +00:00
|
|
|
movl %eax, %cr0
|
|
|
|
|
|
|
|
/* Jump from 32bit compatibility mode into 64bit mode. */
|
|
|
|
lret
|
2009-02-13 21:50:22 +00:00
|
|
|
ENDPROC(startup_32)
|
2007-05-02 17:27:07 +00:00
|
|
|
|
x86/efi: Firmware agnostic handover entry points
The EFI handover code only works if the "bitness" of the firmware and
the kernel match, i.e. 64-bit firmware and 64-bit kernel - it is not
possible to mix the two. This goes against the tradition that a 32-bit
kernel can be loaded on a 64-bit BIOS platform without having to do
anything special in the boot loader. Linux distributions, for one thing,
regularly run only 32-bit kernels on their live media.
Despite having only one 'handover_offset' field in the kernel header,
EFI boot loaders use two separate entry points to enter the kernel based
on the architecture the boot loader was compiled for,
(1) 32-bit loader: handover_offset
(2) 64-bit loader: handover_offset + 512
Since we already have two entry points, we can leverage them to infer
the bitness of the firmware we're running on, without requiring any boot
loader modifications, by making (1) and (2) valid entry points for both
CONFIG_X86_32 and CONFIG_X86_64 kernels.
To be clear, a 32-bit boot loader will always use (1) and a 64-bit boot
loader will always use (2). It's just that, if a single kernel image
supports (1) and (2) that image can be used with both 32-bit and 64-bit
boot loaders, and hence both 32-bit and 64-bit EFI.
(1) and (2) must be 512 bytes apart at all times, but that is already
part of the boot ABI and we could never change that delta without
breaking existing boot loaders anyhow.
Signed-off-by: Matt Fleming <matt.fleming@intel.com>
2014-01-10 15:54:31 +00:00
|
|
|
#ifdef CONFIG_EFI_MIXED
|
|
|
|
.org 0x190
|
|
|
|
ENTRY(efi32_stub_entry)
|
|
|
|
add $0x4, %esp /* Discard return address */
|
|
|
|
popl %ecx
|
|
|
|
popl %edx
|
|
|
|
popl %esi
|
|
|
|
|
|
|
|
leal (BP_scratch+4)(%esi), %esp
|
|
|
|
call 1f
|
|
|
|
1: pop %ebp
|
|
|
|
subl $1b, %ebp
|
|
|
|
|
|
|
|
movl %ecx, efi32_config(%ebp)
|
|
|
|
movl %edx, efi32_config+8(%ebp)
|
|
|
|
sgdtl efi32_boot_gdt(%ebp)
|
|
|
|
|
|
|
|
leal efi32_config(%ebp), %eax
|
|
|
|
movl %eax, efi_config(%ebp)
|
|
|
|
|
|
|
|
jmp startup_32
|
|
|
|
ENDPROC(efi32_stub_entry)
|
|
|
|
#endif
|
|
|
|
|
2007-05-02 17:27:07 +00:00
|
|
|
.code64
|
2007-05-02 17:27:08 +00:00
|
|
|
.org 0x200
|
2007-05-02 17:27:07 +00:00
|
|
|
ENTRY(startup_64)
|
2009-05-08 22:59:13 +00:00
|
|
|
/*
|
2013-01-24 20:20:07 +00:00
|
|
|
* 64bit entry is 0x200 and it is ABI so immutable!
|
2009-05-08 22:59:13 +00:00
|
|
|
* We come here either from startup_32 or directly from a
|
2013-01-24 20:20:07 +00:00
|
|
|
* 64bit bootloader.
|
|
|
|
* If we come here from a bootloader, kernel(text+data+bss+brk),
|
|
|
|
* ramdisk, zero_page, command line could be above 4G.
|
|
|
|
* We depend on an identity mapped page table being provided
|
|
|
|
* that maps our entire kernel(text+data+bss+brk), zero page
|
|
|
|
* and command line.
|
2007-05-02 17:27:07 +00:00
|
|
|
*/
|
x86, efi: EFI boot stub support
There is currently a large divide between kernel development and the
development of EFI boot loaders. The idea behind this patch is to give
the kernel developers full control over the EFI boot process. As
H. Peter Anvin put it,
"The 'kernel carries its own stub' approach been very successful in
dealing with BIOS, and would make a lot of sense to me for EFI as
well."
This patch introduces an EFI boot stub that allows an x86 bzImage to
be loaded and executed by EFI firmware. The bzImage appears to the
firmware as an EFI application. Luckily there are enough free bits
within the bzImage header so that it can masquerade as an EFI
application, thereby coercing the EFI firmware into loading it and
jumping to its entry point. The beauty of this masquerading approach
is that both BIOS and EFI boot loaders can still load and run the same
bzImage, thereby allowing a single kernel image to work in any boot
environment.
The EFI boot stub supports multiple initrds, but they must exist on
the same partition as the bzImage. Command-line arguments for the
kernel can be appended after the bzImage name when run from the EFI
shell, e.g.
Shell> bzImage console=ttyS0 root=/dev/sdb initrd=initrd.img
v7:
- Fix checkpatch warnings.
v6:
- Try to allocate initrd memory just below hdr->inird_addr_max.
v5:
- load_options_size is UTF-16, which needs dividing by 2 to convert
to the corresponding ASCII size.
v4:
- Don't read more than image->load_options_size
v3:
- Fix following warnings when compiling CONFIG_EFI_STUB=n
arch/x86/boot/tools/build.c: In function ‘main’:
arch/x86/boot/tools/build.c:138:24: warning: unused variable ‘pe_header’
arch/x86/boot/tools/build.c:138:15: warning: unused variable ‘file_sz’
- As reported by Matthew Garrett, some Apple machines have GOPs that
don't have hardware attached. We need to weed these out by
searching for ones that handle the PCIIO protocol.
- Don't allocate memory if no initrds are on cmdline
- Don't trust image->load_options_size
Maarten Lankhorst noted:
- Don't strip first argument when booted from efibootmgr
- Don't allocate too much memory for cmdline
- Don't update cmdline_size, the kernel considers it read-only
- Don't accept '\n' for initrd names
v2:
- File alignment was too large, was 8192 should be 512. Reported by
Maarten Lankhorst on LKML.
- Added UGA support for graphics
- Use VIDEO_TYPE_EFI instead of hard-coded number.
- Move linelength assignment until after we've assigned depth
- Dynamically fill out AddressOfEntryPoint in tools/build.c
- Don't use magic number for GDT/TSS stuff. Requested by Andi Kleen
- The bzImage may need to be relocated as it may have been loaded at
a high address address by the firmware. This was required to get my
macbook booting because the firmware loaded it at 0x7cxxxxxx, which
triggers this error in decompress_kernel(),
if (heap > ((-__PAGE_OFFSET-(128<<20)-1) & 0x7fffffff))
error("Destination address too large");
Cc: Mike Waychison <mikew@google.com>
Cc: Matthew Garrett <mjg@redhat.com>
Tested-by: Henrik Rydberg <rydberg@euromail.se>
Signed-off-by: Matt Fleming <matt.fleming@intel.com>
Link: http://lkml.kernel.org/r/1321383097.2657.9.camel@mfleming-mobl1.ger.corp.intel.com
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
2011-12-12 21:27:52 +00:00
|
|
|
#ifdef CONFIG_EFI_STUB
|
2012-04-15 15:06:04 +00:00
|
|
|
/*
|
x86, build: Dynamically find entry points in compressed startup code
We have historically hard-coded entry points in head.S just so it's easy
to build the executable/bzImage headers with references to them.
Unfortunately, this leads to boot loaders abusing these "known" addresses
even when they are *explicitly* told that they "should look at the ELF
header to find this address, as it may change in the future". And even
when the address in question *has* actually been changed in the past,
without fanfare or thought to compatibility.
Thus we have bootloaders doing stunningly broken things like jumping
to offset 0x200 in the kernel startup code in 64-bit mode, *hoping*
that startup_64 is still there (it has moved at least once
before). And hoping that it's actually a 64-bit kernel despite the
fact that we don't give them any indication of that fact.
This patch should hopefully remove the temptation to abuse internal
addresses in future, where sternly worded comments have not sufficed.
Instead of having hard-coded addresses and saying "please don't abuse
these", we actually pull the addresses out of the ELF payload into
zoffset.h, and make build.c shove them back into the right places in
the bzImage header.
Rather than including zoffset.h into build.c and thus having to rebuild
the tool for every kernel build, we parse it instead. The parsing code
is small and simple.
This patch doesn't actually move any of the interesting entry points, so
any offending bootloader will still continue to "work" after this patch
is applied. For some version of "work" which includes jumping into the
compressed payload and crashing, if the bzImage it's given is a 32-bit
kernel. No change there then.
[ hpa: some of the issues in the description are addressed or
retconned by the 2.12 boot protocol. This patch has been edited to
only remove fixed addresses that were *not* thus retconned. ]
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Link: http://lkml.kernel.org/r/1358513837.2397.247.camel@shinybook.infradead.org
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Cc: Matt Fleming <matt.fleming@intel.com>
2013-01-10 14:31:59 +00:00
|
|
|
* The entry point for the PE/COFF executable is efi_pe_entry, so
|
|
|
|
* only legacy boot loaders will execute this jmp.
|
2012-04-15 15:06:04 +00:00
|
|
|
*/
|
|
|
|
jmp preferred_addr
|
|
|
|
|
x86, build: Dynamically find entry points in compressed startup code
We have historically hard-coded entry points in head.S just so it's easy
to build the executable/bzImage headers with references to them.
Unfortunately, this leads to boot loaders abusing these "known" addresses
even when they are *explicitly* told that they "should look at the ELF
header to find this address, as it may change in the future". And even
when the address in question *has* actually been changed in the past,
without fanfare or thought to compatibility.
Thus we have bootloaders doing stunningly broken things like jumping
to offset 0x200 in the kernel startup code in 64-bit mode, *hoping*
that startup_64 is still there (it has moved at least once
before). And hoping that it's actually a 64-bit kernel despite the
fact that we don't give them any indication of that fact.
This patch should hopefully remove the temptation to abuse internal
addresses in future, where sternly worded comments have not sufficed.
Instead of having hard-coded addresses and saying "please don't abuse
these", we actually pull the addresses out of the ELF payload into
zoffset.h, and make build.c shove them back into the right places in
the bzImage header.
Rather than including zoffset.h into build.c and thus having to rebuild
the tool for every kernel build, we parse it instead. The parsing code
is small and simple.
This patch doesn't actually move any of the interesting entry points, so
any offending bootloader will still continue to "work" after this patch
is applied. For some version of "work" which includes jumping into the
compressed payload and crashing, if the bzImage it's given is a 32-bit
kernel. No change there then.
[ hpa: some of the issues in the description are addressed or
retconned by the 2.12 boot protocol. This patch has been edited to
only remove fixed addresses that were *not* thus retconned. ]
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Link: http://lkml.kernel.org/r/1358513837.2397.247.camel@shinybook.infradead.org
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Cc: Matt Fleming <matt.fleming@intel.com>
2013-01-10 14:31:59 +00:00
|
|
|
ENTRY(efi_pe_entry)
|
2014-01-10 15:27:14 +00:00
|
|
|
movq %rcx, efi64_config(%rip) /* Handle */
|
|
|
|
movq %rdx, efi64_config+8(%rip) /* EFI System table pointer */
|
|
|
|
|
|
|
|
leaq efi64_config(%rip), %rax
|
|
|
|
movq %rax, efi_config(%rip)
|
|
|
|
|
|
|
|
call 1f
|
|
|
|
1: popq %rbp
|
|
|
|
subq $1b, %rbp
|
|
|
|
|
|
|
|
/*
|
2014-09-23 06:05:49 +00:00
|
|
|
* Relocate efi_config->call().
|
2014-01-10 15:27:14 +00:00
|
|
|
*/
|
|
|
|
addq %rbp, efi64_config+88(%rip)
|
|
|
|
|
|
|
|
movq %rax, %rdi
|
2012-07-19 09:23:48 +00:00
|
|
|
call make_boot_params
|
|
|
|
cmpq $0,%rax
|
2014-01-10 15:27:14 +00:00
|
|
|
je fail
|
|
|
|
mov %rax, %rsi
|
2014-04-08 12:14:00 +00:00
|
|
|
leaq startup_32(%rip), %rax
|
|
|
|
movl %eax, BP_code32_start(%rsi)
|
2014-01-10 15:27:14 +00:00
|
|
|
jmp 2f /* Skip the relocation */
|
2012-07-19 09:23:48 +00:00
|
|
|
|
x86/efi: Firmware agnostic handover entry points
The EFI handover code only works if the "bitness" of the firmware and
the kernel match, i.e. 64-bit firmware and 64-bit kernel - it is not
possible to mix the two. This goes against the tradition that a 32-bit
kernel can be loaded on a 64-bit BIOS platform without having to do
anything special in the boot loader. Linux distributions, for one thing,
regularly run only 32-bit kernels on their live media.
Despite having only one 'handover_offset' field in the kernel header,
EFI boot loaders use two separate entry points to enter the kernel based
on the architecture the boot loader was compiled for,
(1) 32-bit loader: handover_offset
(2) 64-bit loader: handover_offset + 512
Since we already have two entry points, we can leverage them to infer
the bitness of the firmware we're running on, without requiring any boot
loader modifications, by making (1) and (2) valid entry points for both
CONFIG_X86_32 and CONFIG_X86_64 kernels.
To be clear, a 32-bit boot loader will always use (1) and a 64-bit boot
loader will always use (2). It's just that, if a single kernel image
supports (1) and (2) that image can be used with both 32-bit and 64-bit
boot loaders, and hence both 32-bit and 64-bit EFI.
(1) and (2) must be 512 bytes apart at all times, but that is already
part of the boot ABI and we could never change that delta without
breaking existing boot loaders anyhow.
Signed-off-by: Matt Fleming <matt.fleming@intel.com>
2014-01-10 15:54:31 +00:00
|
|
|
handover_entry:
|
2014-01-10 15:27:14 +00:00
|
|
|
call 1f
|
|
|
|
1: popq %rbp
|
|
|
|
subq $1b, %rbp
|
|
|
|
|
|
|
|
/*
|
2014-09-23 06:05:49 +00:00
|
|
|
* Relocate efi_config->call().
|
2014-01-10 15:27:14 +00:00
|
|
|
*/
|
|
|
|
movq efi_config(%rip), %rax
|
|
|
|
addq %rbp, 88(%rax)
|
|
|
|
2:
|
|
|
|
movq efi_config(%rip), %rdi
|
x86, efi: EFI boot stub support
There is currently a large divide between kernel development and the
development of EFI boot loaders. The idea behind this patch is to give
the kernel developers full control over the EFI boot process. As
H. Peter Anvin put it,
"The 'kernel carries its own stub' approach been very successful in
dealing with BIOS, and would make a lot of sense to me for EFI as
well."
This patch introduces an EFI boot stub that allows an x86 bzImage to
be loaded and executed by EFI firmware. The bzImage appears to the
firmware as an EFI application. Luckily there are enough free bits
within the bzImage header so that it can masquerade as an EFI
application, thereby coercing the EFI firmware into loading it and
jumping to its entry point. The beauty of this masquerading approach
is that both BIOS and EFI boot loaders can still load and run the same
bzImage, thereby allowing a single kernel image to work in any boot
environment.
The EFI boot stub supports multiple initrds, but they must exist on
the same partition as the bzImage. Command-line arguments for the
kernel can be appended after the bzImage name when run from the EFI
shell, e.g.
Shell> bzImage console=ttyS0 root=/dev/sdb initrd=initrd.img
v7:
- Fix checkpatch warnings.
v6:
- Try to allocate initrd memory just below hdr->inird_addr_max.
v5:
- load_options_size is UTF-16, which needs dividing by 2 to convert
to the corresponding ASCII size.
v4:
- Don't read more than image->load_options_size
v3:
- Fix following warnings when compiling CONFIG_EFI_STUB=n
arch/x86/boot/tools/build.c: In function ‘main’:
arch/x86/boot/tools/build.c:138:24: warning: unused variable ‘pe_header’
arch/x86/boot/tools/build.c:138:15: warning: unused variable ‘file_sz’
- As reported by Matthew Garrett, some Apple machines have GOPs that
don't have hardware attached. We need to weed these out by
searching for ones that handle the PCIIO protocol.
- Don't allocate memory if no initrds are on cmdline
- Don't trust image->load_options_size
Maarten Lankhorst noted:
- Don't strip first argument when booted from efibootmgr
- Don't allocate too much memory for cmdline
- Don't update cmdline_size, the kernel considers it read-only
- Don't accept '\n' for initrd names
v2:
- File alignment was too large, was 8192 should be 512. Reported by
Maarten Lankhorst on LKML.
- Added UGA support for graphics
- Use VIDEO_TYPE_EFI instead of hard-coded number.
- Move linelength assignment until after we've assigned depth
- Dynamically fill out AddressOfEntryPoint in tools/build.c
- Don't use magic number for GDT/TSS stuff. Requested by Andi Kleen
- The bzImage may need to be relocated as it may have been loaded at
a high address address by the firmware. This was required to get my
macbook booting because the firmware loaded it at 0x7cxxxxxx, which
triggers this error in decompress_kernel(),
if (heap > ((-__PAGE_OFFSET-(128<<20)-1) & 0x7fffffff))
error("Destination address too large");
Cc: Mike Waychison <mikew@google.com>
Cc: Matthew Garrett <mjg@redhat.com>
Tested-by: Henrik Rydberg <rydberg@euromail.se>
Signed-off-by: Matt Fleming <matt.fleming@intel.com>
Link: http://lkml.kernel.org/r/1321383097.2657.9.camel@mfleming-mobl1.ger.corp.intel.com
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
2011-12-12 21:27:52 +00:00
|
|
|
call efi_main
|
|
|
|
movq %rax,%rsi
|
2012-04-15 15:06:04 +00:00
|
|
|
cmpq $0,%rax
|
|
|
|
jne 2f
|
2014-01-10 15:27:14 +00:00
|
|
|
fail:
|
2012-04-15 15:06:04 +00:00
|
|
|
/* EFI init failed, so hang. */
|
|
|
|
hlt
|
2014-01-10 15:27:14 +00:00
|
|
|
jmp fail
|
2012-04-15 15:06:04 +00:00
|
|
|
2:
|
2014-04-08 12:14:00 +00:00
|
|
|
movl BP_code32_start(%esi), %eax
|
x86, efi: EFI boot stub support
There is currently a large divide between kernel development and the
development of EFI boot loaders. The idea behind this patch is to give
the kernel developers full control over the EFI boot process. As
H. Peter Anvin put it,
"The 'kernel carries its own stub' approach been very successful in
dealing with BIOS, and would make a lot of sense to me for EFI as
well."
This patch introduces an EFI boot stub that allows an x86 bzImage to
be loaded and executed by EFI firmware. The bzImage appears to the
firmware as an EFI application. Luckily there are enough free bits
within the bzImage header so that it can masquerade as an EFI
application, thereby coercing the EFI firmware into loading it and
jumping to its entry point. The beauty of this masquerading approach
is that both BIOS and EFI boot loaders can still load and run the same
bzImage, thereby allowing a single kernel image to work in any boot
environment.
The EFI boot stub supports multiple initrds, but they must exist on
the same partition as the bzImage. Command-line arguments for the
kernel can be appended after the bzImage name when run from the EFI
shell, e.g.
Shell> bzImage console=ttyS0 root=/dev/sdb initrd=initrd.img
v7:
- Fix checkpatch warnings.
v6:
- Try to allocate initrd memory just below hdr->inird_addr_max.
v5:
- load_options_size is UTF-16, which needs dividing by 2 to convert
to the corresponding ASCII size.
v4:
- Don't read more than image->load_options_size
v3:
- Fix following warnings when compiling CONFIG_EFI_STUB=n
arch/x86/boot/tools/build.c: In function ‘main’:
arch/x86/boot/tools/build.c:138:24: warning: unused variable ‘pe_header’
arch/x86/boot/tools/build.c:138:15: warning: unused variable ‘file_sz’
- As reported by Matthew Garrett, some Apple machines have GOPs that
don't have hardware attached. We need to weed these out by
searching for ones that handle the PCIIO protocol.
- Don't allocate memory if no initrds are on cmdline
- Don't trust image->load_options_size
Maarten Lankhorst noted:
- Don't strip first argument when booted from efibootmgr
- Don't allocate too much memory for cmdline
- Don't update cmdline_size, the kernel considers it read-only
- Don't accept '\n' for initrd names
v2:
- File alignment was too large, was 8192 should be 512. Reported by
Maarten Lankhorst on LKML.
- Added UGA support for graphics
- Use VIDEO_TYPE_EFI instead of hard-coded number.
- Move linelength assignment until after we've assigned depth
- Dynamically fill out AddressOfEntryPoint in tools/build.c
- Don't use magic number for GDT/TSS stuff. Requested by Andi Kleen
- The bzImage may need to be relocated as it may have been loaded at
a high address address by the firmware. This was required to get my
macbook booting because the firmware loaded it at 0x7cxxxxxx, which
triggers this error in decompress_kernel(),
if (heap > ((-__PAGE_OFFSET-(128<<20)-1) & 0x7fffffff))
error("Destination address too large");
Cc: Mike Waychison <mikew@google.com>
Cc: Matthew Garrett <mjg@redhat.com>
Tested-by: Henrik Rydberg <rydberg@euromail.se>
Signed-off-by: Matt Fleming <matt.fleming@intel.com>
Link: http://lkml.kernel.org/r/1321383097.2657.9.camel@mfleming-mobl1.ger.corp.intel.com
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
2011-12-12 21:27:52 +00:00
|
|
|
leaq preferred_addr(%rax), %rax
|
|
|
|
jmp *%rax
|
|
|
|
|
|
|
|
preferred_addr:
|
|
|
|
#endif
|
2007-05-02 17:27:07 +00:00
|
|
|
|
|
|
|
/* Setup data segments. */
|
|
|
|
xorl %eax, %eax
|
|
|
|
movl %eax, %ds
|
|
|
|
movl %eax, %es
|
|
|
|
movl %eax, %ss
|
2007-08-10 20:31:05 +00:00
|
|
|
movl %eax, %fs
|
|
|
|
movl %eax, %gs
|
2007-05-02 17:27:07 +00:00
|
|
|
|
2009-05-08 22:59:13 +00:00
|
|
|
/*
|
|
|
|
* Compute the decompressed kernel start address. It is where
|
2007-05-02 17:27:07 +00:00
|
|
|
* we were loaded at aligned to a 2M boundary. %rbp contains the
|
|
|
|
* decompressed kernel start address.
|
|
|
|
*
|
|
|
|
* If it is a relocatable kernel then decompress and run the kernel
|
|
|
|
* from load address aligned to 2MB addr, otherwise decompress and
|
2009-05-11 21:41:55 +00:00
|
|
|
* run the kernel from LOAD_PHYSICAL_ADDR
|
2009-05-09 00:42:16 +00:00
|
|
|
*
|
|
|
|
* We cannot rely on the calculation done in 32-bit mode, since we
|
|
|
|
* may have been invoked via the 64-bit entry point.
|
2007-05-02 17:27:07 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
/* Start with the delta to where the kernel will run at. */
|
|
|
|
#ifdef CONFIG_RELOCATABLE
|
|
|
|
leaq startup_32(%rip) /* - $startup_32 */, %rbp
|
2009-05-11 22:56:08 +00:00
|
|
|
movl BP_kernel_alignment(%rsi), %eax
|
|
|
|
decl %eax
|
|
|
|
addq %rax, %rbp
|
|
|
|
notq %rax
|
|
|
|
andq %rax, %rbp
|
2013-10-11 00:18:14 +00:00
|
|
|
cmpq $LOAD_PHYSICAL_ADDR, %rbp
|
|
|
|
jge 1f
|
2007-05-02 17:27:07 +00:00
|
|
|
#endif
|
2013-10-11 00:18:14 +00:00
|
|
|
movq $LOAD_PHYSICAL_ADDR, %rbp
|
|
|
|
1:
|
2007-05-02 17:27:07 +00:00
|
|
|
|
2009-05-09 00:42:16 +00:00
|
|
|
/* Target address to relocate to for decompression */
|
|
|
|
leaq z_extract_offset(%rbp), %rbx
|
2007-05-02 17:27:07 +00:00
|
|
|
|
2009-05-08 23:27:41 +00:00
|
|
|
/* Set up the stack */
|
|
|
|
leaq boot_stack_end(%rbx), %rsp
|
|
|
|
|
|
|
|
/* Zero EFLAGS */
|
|
|
|
pushq $0
|
|
|
|
popfq
|
|
|
|
|
2009-05-08 22:59:13 +00:00
|
|
|
/*
|
|
|
|
* Copy the compressed kernel to the end of our buffer
|
2007-05-02 17:27:07 +00:00
|
|
|
* where decompression in place becomes safe.
|
|
|
|
*/
|
2009-05-08 23:45:15 +00:00
|
|
|
pushq %rsi
|
|
|
|
leaq (_bss-8)(%rip), %rsi
|
|
|
|
leaq (_bss-8)(%rbx), %rdi
|
2009-05-08 23:20:34 +00:00
|
|
|
movq $_bss /* - $startup_32 */, %rcx
|
2009-05-08 23:45:15 +00:00
|
|
|
shrq $3, %rcx
|
|
|
|
std
|
|
|
|
rep movsq
|
|
|
|
cld
|
|
|
|
popq %rsi
|
2007-05-02 17:27:07 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Jump to the relocated address.
|
|
|
|
*/
|
|
|
|
leaq relocated(%rbx), %rax
|
|
|
|
jmp *%rax
|
|
|
|
|
x86/efi: Firmware agnostic handover entry points
The EFI handover code only works if the "bitness" of the firmware and
the kernel match, i.e. 64-bit firmware and 64-bit kernel - it is not
possible to mix the two. This goes against the tradition that a 32-bit
kernel can be loaded on a 64-bit BIOS platform without having to do
anything special in the boot loader. Linux distributions, for one thing,
regularly run only 32-bit kernels on their live media.
Despite having only one 'handover_offset' field in the kernel header,
EFI boot loaders use two separate entry points to enter the kernel based
on the architecture the boot loader was compiled for,
(1) 32-bit loader: handover_offset
(2) 64-bit loader: handover_offset + 512
Since we already have two entry points, we can leverage them to infer
the bitness of the firmware we're running on, without requiring any boot
loader modifications, by making (1) and (2) valid entry points for both
CONFIG_X86_32 and CONFIG_X86_64 kernels.
To be clear, a 32-bit boot loader will always use (1) and a 64-bit boot
loader will always use (2). It's just that, if a single kernel image
supports (1) and (2) that image can be used with both 32-bit and 64-bit
boot loaders, and hence both 32-bit and 64-bit EFI.
(1) and (2) must be 512 bytes apart at all times, but that is already
part of the boot ABI and we could never change that delta without
breaking existing boot loaders anyhow.
Signed-off-by: Matt Fleming <matt.fleming@intel.com>
2014-01-10 15:54:31 +00:00
|
|
|
#ifdef CONFIG_EFI_STUB
|
|
|
|
.org 0x390
|
|
|
|
ENTRY(efi64_stub_entry)
|
|
|
|
movq %rdi, efi64_config(%rip) /* Handle */
|
|
|
|
movq %rsi, efi64_config+8(%rip) /* EFI System table pointer */
|
|
|
|
|
|
|
|
leaq efi64_config(%rip), %rax
|
|
|
|
movq %rax, efi_config(%rip)
|
|
|
|
|
|
|
|
movq %rdx, %rsi
|
|
|
|
jmp handover_entry
|
|
|
|
ENDPROC(efi64_stub_entry)
|
|
|
|
#endif
|
|
|
|
|
2009-05-08 22:59:13 +00:00
|
|
|
.text
|
2007-05-02 17:27:07 +00:00
|
|
|
relocated:
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
2009-05-08 23:27:41 +00:00
|
|
|
* Clear BSS (stack is currently empty)
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2009-05-08 23:45:15 +00:00
|
|
|
xorl %eax, %eax
|
|
|
|
leaq _bss(%rip), %rdi
|
|
|
|
leaq _ebss(%rip), %rcx
|
2007-05-02 17:27:07 +00:00
|
|
|
subq %rdi, %rcx
|
2009-05-08 23:45:15 +00:00
|
|
|
shrq $3, %rcx
|
|
|
|
rep stosq
|
2007-05-02 17:27:07 +00:00
|
|
|
|
2014-09-23 06:05:49 +00:00
|
|
|
/*
|
|
|
|
* Adjust our own GOT
|
|
|
|
*/
|
|
|
|
leaq _got(%rip), %rdx
|
|
|
|
leaq _egot(%rip), %rcx
|
|
|
|
1:
|
|
|
|
cmpq %rcx, %rdx
|
|
|
|
jae 2f
|
|
|
|
addq %rbx, (%rdx)
|
|
|
|
addq $8, %rdx
|
|
|
|
jmp 1b
|
|
|
|
2:
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Do the decompression, and jump to the new kernel..
|
|
|
|
*/
|
2009-05-09 00:42:16 +00:00
|
|
|
pushq %rsi /* Save the real mode argument */
|
2014-10-31 13:40:38 +00:00
|
|
|
movq $z_run_size, %r9 /* size of kernel with .bss and .brk */
|
|
|
|
pushq %r9
|
2009-05-09 00:42:16 +00:00
|
|
|
movq %rsi, %rdi /* real mode address */
|
|
|
|
leaq boot_heap(%rip), %rsi /* malloc area for uncompression */
|
|
|
|
leaq input_data(%rip), %rdx /* input_data */
|
|
|
|
movl $z_input_len, %ecx /* input_len */
|
|
|
|
movq %rbp, %r8 /* output target address */
|
2014-10-31 13:40:38 +00:00
|
|
|
movq $z_output_len, %r9 /* decompressed length, end of relocs */
|
2013-10-11 00:18:14 +00:00
|
|
|
call decompress_kernel /* returns kernel location in %rax */
|
2014-10-31 13:40:38 +00:00
|
|
|
popq %r9
|
2007-05-02 17:27:07 +00:00
|
|
|
popq %rsi
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
2007-05-02 17:27:07 +00:00
|
|
|
* Jump to the decompressed kernel.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2013-10-11 00:18:14 +00:00
|
|
|
jmp *%rax
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2013-01-24 20:20:00 +00:00
|
|
|
.code32
|
|
|
|
no_longmode:
|
|
|
|
/* This isn't an x86-64 CPU so hang */
|
|
|
|
1:
|
|
|
|
hlt
|
|
|
|
jmp 1b
|
|
|
|
|
|
|
|
#include "../../kernel/verify_cpu.S"
|
|
|
|
|
2007-05-02 17:27:07 +00:00
|
|
|
.data
|
|
|
|
gdt:
|
|
|
|
.word gdt_end - gdt
|
|
|
|
.long gdt
|
|
|
|
.word 0
|
|
|
|
.quad 0x0000000000000000 /* NULL descriptor */
|
|
|
|
.quad 0x00af9a000000ffff /* __KERNEL_CS */
|
|
|
|
.quad 0x00cf92000000ffff /* __KERNEL_DS */
|
2007-08-10 20:31:05 +00:00
|
|
|
.quad 0x0080890000000000 /* TS descriptor */
|
|
|
|
.quad 0x0000000000000000 /* TS continued */
|
2007-05-02 17:27:07 +00:00
|
|
|
gdt_end:
|
2008-04-08 10:54:30 +00:00
|
|
|
|
2014-03-05 10:15:55 +00:00
|
|
|
#ifdef CONFIG_EFI_STUB
|
2014-01-10 15:27:14 +00:00
|
|
|
efi_config:
|
|
|
|
.quad 0
|
|
|
|
|
x86/efi: Firmware agnostic handover entry points
The EFI handover code only works if the "bitness" of the firmware and
the kernel match, i.e. 64-bit firmware and 64-bit kernel - it is not
possible to mix the two. This goes against the tradition that a 32-bit
kernel can be loaded on a 64-bit BIOS platform without having to do
anything special in the boot loader. Linux distributions, for one thing,
regularly run only 32-bit kernels on their live media.
Despite having only one 'handover_offset' field in the kernel header,
EFI boot loaders use two separate entry points to enter the kernel based
on the architecture the boot loader was compiled for,
(1) 32-bit loader: handover_offset
(2) 64-bit loader: handover_offset + 512
Since we already have two entry points, we can leverage them to infer
the bitness of the firmware we're running on, without requiring any boot
loader modifications, by making (1) and (2) valid entry points for both
CONFIG_X86_32 and CONFIG_X86_64 kernels.
To be clear, a 32-bit boot loader will always use (1) and a 64-bit boot
loader will always use (2). It's just that, if a single kernel image
supports (1) and (2) that image can be used with both 32-bit and 64-bit
boot loaders, and hence both 32-bit and 64-bit EFI.
(1) and (2) must be 512 bytes apart at all times, but that is already
part of the boot ABI and we could never change that delta without
breaking existing boot loaders anyhow.
Signed-off-by: Matt Fleming <matt.fleming@intel.com>
2014-01-10 15:54:31 +00:00
|
|
|
#ifdef CONFIG_EFI_MIXED
|
|
|
|
.global efi32_config
|
|
|
|
efi32_config:
|
|
|
|
.fill 11,8,0
|
|
|
|
.quad efi64_thunk
|
|
|
|
.byte 0
|
|
|
|
#endif
|
|
|
|
|
2014-01-10 15:27:14 +00:00
|
|
|
.global efi64_config
|
|
|
|
efi64_config:
|
|
|
|
.fill 11,8,0
|
2014-03-27 22:10:39 +00:00
|
|
|
.quad efi_call
|
2014-01-10 15:27:14 +00:00
|
|
|
.byte 1
|
2014-03-05 10:15:55 +00:00
|
|
|
#endif /* CONFIG_EFI_STUB */
|
|
|
|
|
2009-05-08 22:59:13 +00:00
|
|
|
/*
|
|
|
|
* Stack and heap for uncompression
|
|
|
|
*/
|
|
|
|
.bss
|
|
|
|
.balign 4
|
2008-04-08 10:54:30 +00:00
|
|
|
boot_heap:
|
|
|
|
.fill BOOT_HEAP_SIZE, 1, 0
|
|
|
|
boot_stack:
|
|
|
|
.fill BOOT_STACK_SIZE, 1, 0
|
|
|
|
boot_stack_end:
|
2009-05-08 23:20:34 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Space for page tables (not in .bss so not zeroed)
|
|
|
|
*/
|
|
|
|
.section ".pgtable","a",@nobits
|
|
|
|
.balign 4096
|
|
|
|
pgtable:
|
|
|
|
.fill 6*4096, 1, 0
|