linux/arch/arm64/Makefile

154 lines
4.5 KiB
Makefile
Raw Normal View History

#
# arch/arm64/Makefile
#
# This file is included by the global makefile so that you can add your own
# architecture-specific flags and dependencies.
#
# This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
# for more details.
#
# Copyright (C) 1995-2001 by Russell King
LDFLAGS_vmlinux :=-p --no-undefined -X
CPPFLAGS_vmlinux.lds = -DTEXT_OFFSET=$(TEXT_OFFSET)
GZFLAGS :=-9
ifneq ($(CONFIG_RELOCATABLE),)
arm64: relocatable: suppress R_AARCH64_ABS64 relocations in vmlinux The linker routines that we rely on to produce a relocatable PIE binary treat it as a shared ELF object in some ways, i.e., it emits symbol based R_AARCH64_ABS64 relocations into the final binary since doing so would be appropriate when linking a shared library that is subject to symbol preemption. (This means that an executable can override certain symbols that are exported by a shared library it is linked with, and that the shared library *must* update all its internal references as well, and point them to the version provided by the executable.) Symbol preemption does not occur for OS hosted PIE executables, let alone for vmlinux, and so we would prefer to get rid of these symbol based relocations. This would allow us to simplify the relocation routines, and to strip the .dynsym, .dynstr and .hash sections from the binary. (Note that these are tiny, and are placed in the .init segment, but they clutter up the vmlinux binary.) Note that these R_AARCH64_ABS64 relocations are only emitted for absolute references to symbols defined in the linker script, all other relocatable quantities are covered by anonymous R_AARCH64_RELATIVE relocations that simply list the offsets to all 64-bit values in the binary that need to be fixed up based on the offset between the link time and run time addresses. Fortunately, GNU ld has a -Bsymbolic option, which is intended for shared libraries to allow them to ignore symbol preemption, and unconditionally bind all internal symbol references to its own definitions. So set it for our PIE binary as well, and get rid of the asoociated sections and the relocation code that processes them. Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> [will: fixed conflict with __dynsym_offset linker script entry] Signed-off-by: Will Deacon <will.deacon@arm.com>
2016-07-24 12:00:13 +00:00
LDFLAGS_vmlinux += -pie -Bsymbolic
endif
ifeq ($(CONFIG_ARM64_ERRATUM_843419),y)
ifeq ($(call ld-option, --fix-cortex-a53-843419),)
$(warning ld does not support --fix-cortex-a53-843419; kernel may be susceptible to erratum)
else
LDFLAGS_vmlinux += --fix-cortex-a53-843419
endif
endif
KBUILD_DEFCONFIG := defconfig
# Check for binutils support for specific extensions
lseinstr := $(call as-instr,.arch_extension lse,-DCONFIG_AS_LSE=1)
ifeq ($(CONFIG_ARM64_LSE_ATOMICS), y)
ifeq ($(lseinstr),)
$(warning LSE atomics not supported by binutils)
endif
endif
KBUILD_CFLAGS += -mgeneral-regs-only $(lseinstr)
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
KBUILD_CFLAGS += $(call cc-option, -mpc-relative-literal-loads)
KBUILD_AFLAGS += $(lseinstr)
ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
KBUILD_CPPFLAGS += -mbig-endian
AS += -EB
LD += -EB
UTS_MACHINE := aarch64_be
else
KBUILD_CPPFLAGS += -mlittle-endian
AS += -EL
LD += -EL
UTS_MACHINE := aarch64
endif
CHECKFLAGS += -D__aarch64__
ifeq ($(CONFIG_ARM64_MODULE_CMODEL_LARGE), y)
KBUILD_CFLAGS_MODULE += -mcmodel=large
endif
ifeq ($(CONFIG_ARM64_MODULE_PLTS),y)
KBUILD_LDFLAGS_MODULE += -T $(srctree)/arch/arm64/kernel/module.lds
endif
# Default value
head-y := arch/arm64/kernel/head.o
# The byte offset of the kernel image in RAM from the start of RAM.
ifeq ($(CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET), y)
arm64: fix alignment when RANDOMIZE_TEXT_OFFSET is enabled With ARM64_64K_PAGES and RANDOMIZE_TEXT_OFFSET enabled, we hit the following issue on the boot: kernel BUG at arch/arm64/mm/mmu.c:480! Internal error: Oops - BUG: 0 [#1] PREEMPT SMP Modules linked in: CPU: 0 PID: 0 Comm: swapper Not tainted 4.6.0 #310 Hardware name: ARM Juno development board (r2) (DT) task: ffff000008d58a80 ti: ffff000008d30000 task.ti: ffff000008d30000 PC is at map_kernel_segment+0x44/0xb0 LR is at paging_init+0x84/0x5b0 pc : [<ffff000008c450b4>] lr : [<ffff000008c451a4>] pstate: 600002c5 Call trace: [<ffff000008c450b4>] map_kernel_segment+0x44/0xb0 [<ffff000008c451a4>] paging_init+0x84/0x5b0 [<ffff000008c42728>] setup_arch+0x198/0x534 [<ffff000008c40848>] start_kernel+0x70/0x388 [<ffff000008c401bc>] __primary_switched+0x30/0x74 Commit 7eb90f2ff7e3 ("arm64: cover the .head.text section in the .text segment mapping") removed the alignment between the .head.text and .text sections, and used the _text rather than the _stext interval for mapping the .text segment. Prior to this commit _stext was always section aligned and didn't cause any issue even when RANDOMIZE_TEXT_OFFSET was enabled. Since that alignment has been removed and _text is used to map the .text segment, we need ensure _text is always page aligned when RANDOMIZE_TEXT_OFFSET is enabled. This patch adds logic to TEXT_OFFSET fuzzing to ensure that the offset is always aligned to the kernel page size. To ensure this, we rely on the PAGE_SHIFT being available via Kconfig. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Reported-by: Sudeep Holla <sudeep.holla@arm.com> Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Will Deacon <will.deacon@arm.com> Fixes: 7eb90f2ff7e3 ("arm64: cover the .head.text section in the .text segment mapping") Signed-off-by: Will Deacon <will.deacon@arm.com>
2016-05-31 14:58:00 +00:00
TEXT_OFFSET := $(shell awk "BEGIN {srand(); printf \"0x%06x\n\", \
int(2 * 1024 * 1024 / (2 ^ $(CONFIG_ARM64_PAGE_SHIFT)) * \
rand()) * (2 ^ $(CONFIG_ARM64_PAGE_SHIFT))}")
else
TEXT_OFFSET := 0x00080000
endif
2015-10-12 15:52:58 +00:00
# KASAN_SHADOW_OFFSET = VA_START + (1 << (VA_BITS - 3)) - (1 << 61)
# in 32-bit arithmetic
KASAN_SHADOW_OFFSET := $(shell printf "0x%08x00000000\n" $$(( \
(0xffffffff & (-1 << ($(CONFIG_ARM64_VA_BITS) - 32))) \
+ (1 << ($(CONFIG_ARM64_VA_BITS) - 32 - 3)) \
- (1 << (64 - 32 - 3)) )) )
export TEXT_OFFSET GZFLAGS
core-y += arch/arm64/kernel/ arch/arm64/mm/
core-$(CONFIG_NET) += arch/arm64/net/
core-$(CONFIG_KVM) += arch/arm64/kvm/
core-$(CONFIG_XEN) += arch/arm64/xen/
core-$(CONFIG_CRYPTO) += arch/arm64/crypto/
libs-y := arch/arm64/lib/ $(libs-y)
core-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
# Default target when executing plain make
KBUILD_IMAGE := Image.gz
KBUILD_DTBS := dtbs
all: $(KBUILD_IMAGE) $(KBUILD_DTBS)
boot := arch/arm64/boot
Image: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
Image.%: Image
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
zinstall install:
$(Q)$(MAKE) $(build)=$(boot) $@
%.dtb: scripts
$(Q)$(MAKE) $(build)=$(boot)/dts $(boot)/dts/$@
PHONY += dtbs dtbs_install
dtbs: prepare scripts
$(Q)$(MAKE) $(build)=$(boot)/dts
dtbs_install:
$(Q)$(MAKE) $(dtbinst)=$(boot)/dts
PHONY += vdso_install
vdso_install:
$(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso $@
# We use MRPROPER_FILES and CLEAN_FILES now
archclean:
$(Q)$(MAKE) $(clean)=$(boot)
$(Q)$(MAKE) $(clean)=$(boot)/dts
arm64: fix vdso-offsets.h dependency arm64/kernel/{vdso,signal}.c include vdso-offsets.h, as well as any file that includes asm/vdso.h. Therefore, vdso-offsets.h must be generated before these files are compiled. The current rules in arm64/kernel/Makefile do not actually enforce this, because even though $(obj)/vdso is listed as a prerequisite for vdso-offsets.h, this does not result in the intended effect of building the vdso subdirectory (before all the other objects). As a consequence, depending on the order in which the rules are followed, vdso-offsets.h is updated or not before arm64/kernel/{vdso,signal}.o are built. The current rules also impose an unnecessary dependency on vdso-offsets.h for all arm64/kernel/*.o, resulting in unnecessary rebuilds. This is made obvious when using make -j: touch arch/arm64/kernel/vdso/gettimeofday.S && make -j$NCPUS arch/arm64/kernel will sometimes result in none of arm64/kernel/*.o being rebuilt, sometimes all of them, or even just some of them. It is quite difficult to ensure that a header is generated before it is used with recursive Makefiles by using normal rules. Instead, arch-specific generated headers are normally built in the archprepare recipe in the arch Makefile (see for instance arch/ia64/Makefile). Unfortunately, asm-offsets.h is included in gettimeofday.S, and must therefore be generated before vdso-offsets.h, which is not the case if archprepare is used. For this reason, a rule run after archprepare has to be used. This commit adds rules in arm64/Makefile to build vdso-offsets.h during the prepare step, ensuring that vdso-offsets.h is generated before building anything. It also removes the now-unnecessary dependencies on vdso-offsets.h in arm64/kernel/Makefile. Finally, it removes the duplication of asm-offsets.h between arm64/kernel/vdso/ and include/generated/ and makes include/generated/vdso-offsets.h a target in arm64/kernel/vdso/Makefile. Cc: Will Deacon <will.deacon@arm.com> Cc: Michal Marek <mmarek@suse.com> Signed-off-by: Kevin Brodsky <kevin.brodsky@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-05-12 16:39:15 +00:00
# We need to generate vdso-offsets.h before compiling certain files in kernel/.
# In order to do that, we should use the archprepare target, but we can't since
# asm-offsets.h is included in some files used to generate vdso-offsets.h, and
# asm-offsets.h is built in prepare0, for which archprepare is a dependency.
# Therefore we need to generate the header after prepare0 has been made, hence
# this hack.
prepare: vdso_prepare
vdso_prepare: prepare0
$(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso include/generated/vdso-offsets.h
define archhelp
echo '* Image.gz - Compressed kernel image (arch/$(ARCH)/boot/Image.gz)'
echo ' Image - Uncompressed kernel image (arch/$(ARCH)/boot/Image)'
echo '* dtbs - Build device tree blobs for enabled boards'
echo ' dtbs_install - Install dtbs to $(INSTALL_DTBS_PATH)'
echo ' install - Install uncompressed kernel'
echo ' zinstall - Install compressed kernel'
echo ' Install using (your) ~/bin/installkernel or'
echo ' (distribution) /sbin/installkernel or'
echo ' install to $$(INSTALL_PATH) and run lilo'
endef