mirror of
https://github.com/torvalds/linux.git
synced 2024-12-04 01:51:34 +00:00
34eb62d868
because the heuristics that various linkers & compilers use to handle them (include these bits into the output image vs discarding them silently) are both highly idiosyncratic and also version dependent. Instead of this historically problematic mess, this tree by Kees Cook (et al) adds build time asserts and build time warnings if there's any orphan section in the kernel or if a section is not sized as expected. And because we relied on so many silent assumptions in this area, fix a metric ton of dependencies and some outright bugs related to this, before we can finally enable the checks on the x86, ARM and ARM64 platforms. Signed-off-by: Ingo Molnar <mingo@kernel.org> -----BEGIN PGP SIGNATURE----- iQJFBAABCgAvFiEEBpT5eoXrXCwVQwEKEnMQ0APhK1gFAl+Edv4RHG1pbmdvQGtl cm5lbC5vcmcACgkQEnMQ0APhK1hiKBAApdJEOaK7hMc3013DYNctklIxEPJL2mFJ 11YJRIh4pUJTF0TE+EHT/D+rSIuRsyuoSmOQBQ61/wVSnyG067GjjVJRqh/eYaJ1 fDhJi2FuHOjXl+CiN0KxzBjjp+V4NhF7jHT59tpQSvfZeg7FjteoxfztxaCp5ek3 S3wHB3CC4c4jE3lfjHem1E9/PwT4kwPYx1c3gAUdEqJdjkihjX9fWusfjLeqW6/d Y5VkApi6bL9XiZUZj5l0dEIweLJJ86+PkKJqpo3spxxEak1LSn1MEix+lcJ8e1Kg sb/bEEivDcmFlFWOJnn0QLquCR0Cx5bz1pwsL0tuf0yAd4+sXX5IMuGUysZlEdKM BHL9h5HbevGF4BScwZwZH7lyEg7q67s5KnRu4hxy0Swfcj7y0oT/9lXqpbpZ2DqO Hd+bRRQKIbqnTMp0hcit9LfpLp93vj0dBlaV5ocAJJlu62u9VnwGG5HQuZ5giLUr kA1SLw63Y1wopFRxgFyER8les7eLsu0zxHeK44rRVlVnfI99OMTOgVNicmDFy3Fm AfcnfJG0BqBEJGQz5es34uQQKKBwFPtC9NztopI62KiwOspYYZyrO1BNxdOc6DlS mIHrmO89HMXuid5eolvLaFqUWirHoWO8TlycgZxUWVHc2txVPjAEU/axouU/dSSU w/6GpzAa+7g= =fXAw -----END PGP SIGNATURE----- Merge tag 'core-build-2020-10-12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull orphan section checking from Ingo Molnar: "Orphan link sections were a long-standing source of obscure bugs, because the heuristics that various linkers & compilers use to handle them (include these bits into the output image vs discarding them silently) are both highly idiosyncratic and also version dependent. Instead of this historically problematic mess, this tree by Kees Cook (et al) adds build time asserts and build time warnings if there's any orphan section in the kernel or if a section is not sized as expected. And because we relied on so many silent assumptions in this area, fix a metric ton of dependencies and some outright bugs related to this, before we can finally enable the checks on the x86, ARM and ARM64 platforms" * tag 'core-build-2020-10-12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (36 commits) x86/boot/compressed: Warn on orphan section placement x86/build: Warn on orphan section placement arm/boot: Warn on orphan section placement arm/build: Warn on orphan section placement arm64/build: Warn on orphan section placement x86/boot/compressed: Add missing debugging sections to output x86/boot/compressed: Remove, discard, or assert for unwanted sections x86/boot/compressed: Reorganize zero-size section asserts x86/build: Add asserts for unwanted sections x86/build: Enforce an empty .got.plt section x86/asm: Avoid generating unused kprobe sections arm/boot: Handle all sections explicitly arm/build: Assert for unwanted sections arm/build: Add missing sections arm/build: Explicitly keep .ARM.attributes sections arm/build: Refactor linker script headers arm64/build: Assert for unwanted sections arm64/build: Add missing DWARF sections arm64/build: Use common DISCARDS in linker script arm64/build: Remove .eh_frame* sections due to unwind tables ...
205 lines
6.8 KiB
Makefile
205 lines
6.8 KiB
Makefile
#
|
|
# arch/arm64/Makefile
|
|
#
|
|
# This file is included by the global makefile so that you can add your own
|
|
# architecture-specific flags and dependencies.
|
|
#
|
|
# This file is subject to the terms and conditions of the GNU General Public
|
|
# License. See the file "COPYING" in the main directory of this archive
|
|
# for more details.
|
|
#
|
|
# Copyright (C) 1995-2001 by Russell King
|
|
|
|
LDFLAGS_vmlinux :=--no-undefined -X
|
|
|
|
ifeq ($(CONFIG_RELOCATABLE), y)
|
|
# Pass --no-apply-dynamic-relocs to restore pre-binutils-2.27 behaviour
|
|
# for relative relocs, since this leads to better Image compression
|
|
# with the relocation offsets always being zero.
|
|
LDFLAGS_vmlinux += -shared -Bsymbolic -z notext -z norelro \
|
|
$(call ld-option, --no-apply-dynamic-relocs)
|
|
endif
|
|
|
|
ifeq ($(CONFIG_ARM64_ERRATUM_843419),y)
|
|
ifeq ($(call ld-option, --fix-cortex-a53-843419),)
|
|
$(warning ld does not support --fix-cortex-a53-843419; kernel may be susceptible to erratum)
|
|
else
|
|
LDFLAGS_vmlinux += --fix-cortex-a53-843419
|
|
endif
|
|
endif
|
|
|
|
# We never want expected sections to be placed heuristically by the
|
|
# linker. All sections should be explicitly named in the linker script.
|
|
LDFLAGS_vmlinux += $(call ld-option, --orphan-handling=warn)
|
|
|
|
ifeq ($(CONFIG_ARM64_USE_LSE_ATOMICS), y)
|
|
ifneq ($(CONFIG_ARM64_LSE_ATOMICS), y)
|
|
$(warning LSE atomics not supported by binutils)
|
|
endif
|
|
endif
|
|
|
|
cc_has_k_constraint := $(call try-run,echo \
|
|
'int main(void) { \
|
|
asm volatile("and w0, w0, %w0" :: "K" (4294967295)); \
|
|
return 0; \
|
|
}' | $(CC) -S -x c -o "$$TMP" -,,-DCONFIG_CC_HAS_K_CONSTRAINT=1)
|
|
|
|
ifeq ($(CONFIG_BROKEN_GAS_INST),y)
|
|
$(warning Detected assembler with broken .inst; disassembly will be unreliable)
|
|
endif
|
|
|
|
KBUILD_CFLAGS += -mgeneral-regs-only \
|
|
$(compat_vdso) $(cc_has_k_constraint)
|
|
KBUILD_CFLAGS += $(call cc-disable-warning, psabi)
|
|
KBUILD_AFLAGS += $(compat_vdso)
|
|
|
|
KBUILD_CFLAGS += $(call cc-option,-mabi=lp64)
|
|
KBUILD_AFLAGS += $(call cc-option,-mabi=lp64)
|
|
|
|
# Avoid generating .eh_frame* sections.
|
|
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables -fno-unwind-tables
|
|
KBUILD_AFLAGS += -fno-asynchronous-unwind-tables -fno-unwind-tables
|
|
|
|
ifeq ($(CONFIG_STACKPROTECTOR_PER_TASK),y)
|
|
prepare: stack_protector_prepare
|
|
stack_protector_prepare: prepare0
|
|
$(eval KBUILD_CFLAGS += -mstack-protector-guard=sysreg \
|
|
-mstack-protector-guard-reg=sp_el0 \
|
|
-mstack-protector-guard-offset=$(shell \
|
|
awk '{if ($$2 == "TSK_STACK_CANARY") print $$3;}' \
|
|
include/generated/asm-offsets.h))
|
|
endif
|
|
|
|
# Ensure that if the compiler supports branch protection we default it
|
|
# off, this will be overridden if we are using branch protection.
|
|
branch-prot-flags-y += $(call cc-option,-mbranch-protection=none)
|
|
|
|
ifeq ($(CONFIG_ARM64_PTR_AUTH),y)
|
|
branch-prot-flags-$(CONFIG_CC_HAS_SIGN_RETURN_ADDRESS) := -msign-return-address=all
|
|
# We enable additional protection for leaf functions as there is some
|
|
# narrow potential for ROP protection benefits and no substantial
|
|
# performance impact has been observed.
|
|
ifeq ($(CONFIG_ARM64_BTI_KERNEL),y)
|
|
branch-prot-flags-$(CONFIG_CC_HAS_BRANCH_PROT_PAC_RET_BTI) := -mbranch-protection=pac-ret+leaf+bti
|
|
else
|
|
branch-prot-flags-$(CONFIG_CC_HAS_BRANCH_PROT_PAC_RET) := -mbranch-protection=pac-ret+leaf
|
|
endif
|
|
# -march=armv8.3-a enables the non-nops instructions for PAC, to avoid the
|
|
# compiler to generate them and consequently to break the single image contract
|
|
# we pass it only to the assembler. This option is utilized only in case of non
|
|
# integrated assemblers.
|
|
ifeq ($(CONFIG_AS_HAS_PAC), y)
|
|
asm-arch := armv8.3-a
|
|
endif
|
|
endif
|
|
|
|
KBUILD_CFLAGS += $(branch-prot-flags-y)
|
|
|
|
ifeq ($(CONFIG_AS_HAS_ARMV8_4), y)
|
|
# make sure to pass the newest target architecture to -march.
|
|
asm-arch := armv8.4-a
|
|
endif
|
|
|
|
ifdef asm-arch
|
|
KBUILD_CFLAGS += -Wa,-march=$(asm-arch) \
|
|
-DARM64_ASM_ARCH='"$(asm-arch)"'
|
|
endif
|
|
|
|
ifeq ($(CONFIG_SHADOW_CALL_STACK), y)
|
|
KBUILD_CFLAGS += -ffixed-x18
|
|
endif
|
|
|
|
ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
|
|
KBUILD_CPPFLAGS += -mbig-endian
|
|
CHECKFLAGS += -D__AARCH64EB__
|
|
# Prefer the baremetal ELF build target, but not all toolchains include
|
|
# it so fall back to the standard linux version if needed.
|
|
KBUILD_LDFLAGS += -EB $(call ld-option, -maarch64elfb, -maarch64linuxb)
|
|
UTS_MACHINE := aarch64_be
|
|
else
|
|
KBUILD_CPPFLAGS += -mlittle-endian
|
|
CHECKFLAGS += -D__AARCH64EL__
|
|
# Same as above, prefer ELF but fall back to linux target if needed.
|
|
KBUILD_LDFLAGS += -EL $(call ld-option, -maarch64elf, -maarch64linux)
|
|
UTS_MACHINE := aarch64
|
|
endif
|
|
|
|
CHECKFLAGS += -D__aarch64__
|
|
|
|
ifeq ($(CONFIG_ARM64_MODULE_PLTS),y)
|
|
KBUILD_LDS_MODULE += $(srctree)/arch/arm64/kernel/module.lds
|
|
endif
|
|
|
|
ifeq ($(CONFIG_DYNAMIC_FTRACE_WITH_REGS),y)
|
|
KBUILD_CPPFLAGS += -DCC_USING_PATCHABLE_FUNCTION_ENTRY
|
|
CC_FLAGS_FTRACE := -fpatchable-function-entry=2
|
|
endif
|
|
|
|
# Default value
|
|
head-y := arch/arm64/kernel/head.o
|
|
|
|
ifeq ($(CONFIG_KASAN_SW_TAGS), y)
|
|
KASAN_SHADOW_SCALE_SHIFT := 4
|
|
else
|
|
KASAN_SHADOW_SCALE_SHIFT := 3
|
|
endif
|
|
|
|
KBUILD_CFLAGS += -DKASAN_SHADOW_SCALE_SHIFT=$(KASAN_SHADOW_SCALE_SHIFT)
|
|
KBUILD_CPPFLAGS += -DKASAN_SHADOW_SCALE_SHIFT=$(KASAN_SHADOW_SCALE_SHIFT)
|
|
KBUILD_AFLAGS += -DKASAN_SHADOW_SCALE_SHIFT=$(KASAN_SHADOW_SCALE_SHIFT)
|
|
|
|
core-y += arch/arm64/
|
|
libs-y := arch/arm64/lib/ $(libs-y)
|
|
libs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
|
|
|
|
# Default target when executing plain make
|
|
boot := arch/arm64/boot
|
|
KBUILD_IMAGE := $(boot)/Image.gz
|
|
|
|
all: Image.gz
|
|
|
|
|
|
Image: vmlinux
|
|
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
|
|
|
|
Image.%: Image
|
|
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
|
|
|
|
zinstall install:
|
|
$(Q)$(MAKE) $(build)=$(boot) $@
|
|
|
|
PHONY += vdso_install
|
|
vdso_install:
|
|
$(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso $@
|
|
$(if $(CONFIG_COMPAT_VDSO), \
|
|
$(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso32 $@)
|
|
|
|
# We use MRPROPER_FILES and CLEAN_FILES now
|
|
archclean:
|
|
$(Q)$(MAKE) $(clean)=$(boot)
|
|
|
|
ifeq ($(KBUILD_EXTMOD),)
|
|
# We need to generate vdso-offsets.h before compiling certain files in kernel/.
|
|
# In order to do that, we should use the archprepare target, but we can't since
|
|
# asm-offsets.h is included in some files used to generate vdso-offsets.h, and
|
|
# asm-offsets.h is built in prepare0, for which archprepare is a dependency.
|
|
# Therefore we need to generate the header after prepare0 has been made, hence
|
|
# this hack.
|
|
prepare: vdso_prepare
|
|
vdso_prepare: prepare0
|
|
$(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso include/generated/vdso-offsets.h
|
|
$(if $(CONFIG_COMPAT_VDSO),$(Q)$(MAKE) \
|
|
$(build)=arch/arm64/kernel/vdso32 \
|
|
include/generated/vdso32-offsets.h)
|
|
endif
|
|
|
|
define archhelp
|
|
echo '* Image.gz - Compressed kernel image (arch/$(ARCH)/boot/Image.gz)'
|
|
echo ' Image - Uncompressed kernel image (arch/$(ARCH)/boot/Image)'
|
|
echo ' install - Install uncompressed kernel'
|
|
echo ' zinstall - Install compressed kernel'
|
|
echo ' Install using (your) ~/bin/installkernel or'
|
|
echo ' (distribution) /sbin/installkernel or'
|
|
echo ' install to $$(INSTALL_PATH) and run lilo'
|
|
endef
|